commit_id
string | repo
string | commit_message
string | diff
string | label
int64 |
---|---|---|---|---|
a12dc3690f49704d86f63a96bff81abe62267397
|
389ds/389-ds-base
|
Ticket 47976 - Add fixed CI test case
Description: Add test case written by Thierry and fix it depending on
the current directory structure.
https://fedorahosted.org/389/ticket/47976
Reviewed by: wibrown, mreynolds (Thanks!)
|
commit a12dc3690f49704d86f63a96bff81abe62267397
Author: Simon Pichugin <[email protected]>
Date: Tue Aug 2 17:29:00 2016 +0200
Ticket 47976 - Add fixed CI test case
Description: Add test case written by Thierry and fix it depending on
the current directory structure.
https://fedorahosted.org/389/ticket/47976
Reviewed by: wibrown, mreynolds (Thanks!)
diff --git a/dirsrvtests/tests/tickets/ticket47976_test.py b/dirsrvtests/tests/tickets/ticket47976_test.py
new file mode 100644
index 000000000..df4891d18
--- /dev/null
+++ b/dirsrvtests/tests/tickets/ticket47976_test.py
@@ -0,0 +1,203 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+
+PEOPLE_OU='people'
+PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX)
+GROUPS_OU='groups'
+GROUPS_DN = "ou=%s,%s" % (GROUPS_OU, SUFFIX)
+DEFINITIONS_CN='definitions'
+DEFINITIONS_DN = "cn=%s,%s" % (DEFINITIONS_CN, SUFFIX)
+TEMPLATES_CN='templates'
+TEMPLATES_DN = "cn=%s,%s" % (TEMPLATES_CN, SUFFIX)
+MANAGED_GROUP_TEMPLATES_CN='managed group templates'
+MANAGED_GROUP_TEMPLATES_DN='cn=%s,%s' % (MANAGED_GROUP_TEMPLATES_CN, TEMPLATES_DN)
+MANAGED_GROUP_MEP_TMPL_CN='UPG'
+MANAGED_GROUP_MEP_TMPL_DN='cn=%s,%s' % (MANAGED_GROUP_MEP_TMPL_CN, MANAGED_GROUP_TEMPLATES_DN)
+MANAGED_GROUP_DEF_CN='managed group definition'
+MANAGED_GROUP_DEF_DN='cn=%s,%s' % (MANAGED_GROUP_DEF_CN, DEFINITIONS_DN)
+
+MAX_ACCOUNTS=2
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+
+class TopologyStandalone(object):
+ def __init__(self, standalone):
+ standalone.open()
+ self.standalone = standalone
+
+
[email protected](scope="module")
+def topology(request):
+ global installation1_prefix
+ if installation1_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+ # Creating standalone instance ...
+ standalone = DirSrv(verbose=False)
+ if installation1_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+ args_instance[SER_HOST] = HOST_STANDALONE
+ args_instance[SER_PORT] = PORT_STANDALONE
+ args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_standalone = args_instance.copy()
+ standalone.allocate(args_standalone)
+ instance_standalone = standalone.exists()
+ if instance_standalone:
+ standalone.delete()
+ standalone.create()
+ standalone.open()
+
+ # Delete each instance in the end
+ def fin():
+ standalone.delete()
+ request.addfinalizer(fin)
+
+ return TopologyStandalone(standalone)
+
+
+def test_ticket47976_init(topology):
+ """Create mep definitions and templates"""
+
+ try:
+ topology.standalone.add_s(Entry((PEOPLE_DN, {
+ 'objectclass': "top extensibleObject".split(),
+ 'ou': 'people'})))
+ except ldap.ALREADY_EXISTS:
+ pass
+ try:
+ topology.standalone.add_s(Entry((GROUPS_DN, {
+ 'objectclass': "top extensibleObject".split(),
+ 'ou': GROUPS_OU})))
+ except ldap.ALREADY_EXISTS:
+ pass
+ topology.standalone.add_s(Entry((DEFINITIONS_DN, {
+ 'objectclass': "top nsContainer".split(),
+ 'cn': DEFINITIONS_CN})))
+ topology.standalone.add_s(Entry((TEMPLATES_DN, {
+ 'objectclass': "top nsContainer".split(),
+ 'cn': TEMPLATES_CN})))
+ topology.standalone.add_s(Entry((MANAGED_GROUP_DEF_DN, {
+ 'objectclass': "top extensibleObject".split(),
+ 'cn': MANAGED_GROUP_DEF_CN,
+ 'originScope': PEOPLE_DN,
+ 'originFilter': '(objectclass=posixAccount)',
+ 'managedBase': GROUPS_DN,
+ 'managedTemplate': MANAGED_GROUP_MEP_TMPL_DN})))
+
+ topology.standalone.add_s(Entry((MANAGED_GROUP_TEMPLATES_DN, {
+ 'objectclass': "top nsContainer".split(),
+ 'cn': MANAGED_GROUP_TEMPLATES_CN})))
+
+ topology.standalone.add_s(Entry((MANAGED_GROUP_MEP_TMPL_DN, {
+ 'objectclass': "top mepTemplateEntry".split(),
+ 'cn': MANAGED_GROUP_MEP_TMPL_CN,
+ 'mepRDNAttr': 'cn',
+ 'mepStaticAttr': ['objectclass: posixGroup',
+ 'objectclass: extensibleObject'],
+ 'mepMappedAttr': ['cn: $cn|uid: $cn',
+ 'gidNumber: $uidNumber']})))
+
+
+ topology.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY)
+ topology.standalone.restart(timeout=10)
+
+
+def test_ticket47976_1(topology):
+ mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginConfigArea', DEFINITIONS_DN)]
+ topology.standalone.modify_s('cn=%s,cn=plugins,cn=config' % PLUGIN_MANAGED_ENTRY, mod)
+ topology.standalone.stop(timeout=10)
+ topology.standalone.start(timeout=10)
+ for cpt in range(MAX_ACCOUNTS):
+ name = "user%d" % (cpt)
+ topology.standalone.add_s(Entry(("uid=%s,%s" %(name, PEOPLE_DN), {
+ 'objectclass': 'top posixAccount extensibleObject'.split(),
+ 'uid': name,
+ 'cn': name,
+ 'uidNumber': '1',
+ 'gidNumber': '1',
+ 'homeDirectory': '/home/%s' % name
+ })))
+
+
+def test_ticket47976_2(topology):
+ """It reimports the database with a very large page size
+ so all the entries (user and its private group).
+ """
+
+ log.info('Test complete')
+ mod = [(ldap.MOD_REPLACE, 'nsslapd-db-page-size', str(128*1024))]
+ topology.standalone.modify_s(DN_LDBM, mod)
+
+ # Get the the full path and name for our LDIF we will be exporting
+ log.info('Export LDIF file...')
+ ldif_dir = topology.standalone.get_ldif_dir()
+ ldif_file = ldif_dir + "/export.ldif"
+ args = {EXPORT_REPL_INFO: False,
+ TASK_WAIT: True}
+ exportTask = Tasks(topology.standalone)
+ try:
+ exportTask.exportLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
+ except ValueError:
+ assert False
+ # import the new ldif file
+ log.info('Import LDIF file...')
+ importTask = Tasks(topology.standalone)
+ args = {TASK_WAIT: True}
+ try:
+ importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
+ os.remove(ldif_file)
+ except ValueError:
+ os.remove(ldif_file)
+ assert False
+
+
+def test_ticket47976_3(topology):
+ """A single delete of a user should hit 47976, because mep post op will
+ delete its related group.
+ """
+
+ log.info('Testing if the delete will hang or not')
+ #log.info("\n\nAttach\n\n debugger")
+ #time.sleep(60)
+ topology.standalone.set_option(ldap.OPT_TIMEOUT, 5)
+ try:
+ for cpt in range(MAX_ACCOUNTS):
+ name = "user%d" % (cpt)
+ topology.standalone.delete_s("uid=%s,%s" %(name, PEOPLE_DN))
+ except ldap.TIMEOUT as e:
+ log.fatal('Timeout... likely it hangs (47976)')
+ assert False
+
+ # check the entry has been deleted
+ for cpt in range(MAX_ACCOUNTS):
+ try:
+ name = "user%d" % (cpt)
+ topology.standalone.getEntry("uid=%s,%s" %(name, PEOPLE_DN), ldap.SCOPE_BASE, 'objectclass=*')
+ assert False
+ except ldap.NO_SUCH_OBJECT:
+ log.info('%s was correctly deleted' % name)
+ pass
+
+ assert cpt == (MAX_ACCOUNTS -1)
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
| 0 |
5fe28921810a53dcd31525ba1f675582b6aba0f7
|
389ds/389-ds-base
|
Ticket #48254 - Shell CLI fails with usage errors if an argument containing white spaces is given
Description: In addition to the patch:
Ticket #48254 - CLI db2index fails with usage errors
commit 3507c46c9f1156df11b6cf05eba695d81088b416
applying the similar changes to all the shell CLI which could be given
arguments that include white spaces.
https://fedorahosted.org/389/ticket/48254
Reviewed by [email protected] (Thank you, Mark!!)
|
commit 5fe28921810a53dcd31525ba1f675582b6aba0f7
Author: Noriko Hosoi <[email protected]>
Date: Tue Aug 25 11:48:31 2015 -0700
Ticket #48254 - Shell CLI fails with usage errors if an argument containing white spaces is given
Description: In addition to the patch:
Ticket #48254 - CLI db2index fails with usage errors
commit 3507c46c9f1156df11b6cf05eba695d81088b416
applying the similar changes to all the shell CLI which could be given
arguments that include white spaces.
https://fedorahosted.org/389/ticket/48254
Reviewed by [email protected] (Thank you, Mark!!)
diff --git a/ldap/admin/src/scripts/bak2db.in b/ldap/admin/src/scripts/bak2db.in
index a2e54cc60..ab7c6b3ec 100755
--- a/ldap/admin/src/scripts/bak2db.in
+++ b/ldap/admin/src/scripts/bak2db.in
@@ -44,12 +44,12 @@ do
h) usage
exit 0;;
Z) servid=$OPTARG;;
- n) args=$args" -n $OPTARG";;
+ n) args=$args" -n \"$OPTARG\"";;
q) args=$args" -q";;
- d) args=$args" -d $OPTARG";;
+ d) args=$args" -d \"$OPTARG\"";;
v) args=$args" -v";;
- D) args=$args" -D $OPTARG";;
- i) args=$args" -i $OPTARG";;
+ D) args=$args" -D \"$OPTARG\"";;
+ i) args=$args" -i \"$OPTARG\"";;
a) archivedir=$OPTARG;;
S) args=$args" -S";;
?) usage
@@ -76,4 +76,4 @@ else
archivedir=`pwd`/$archivedir
fi
-@sbindir@/ns-slapd archive2db -D $CONFIG_DIR -a $archivedir $args
+eval @sbindir@/ns-slapd archive2db -D $CONFIG_DIR -a $archivedir $args
diff --git a/ldap/admin/src/scripts/db2bak.in b/ldap/admin/src/scripts/db2bak.in
index 1896c197a..adbe30bf6 100755
--- a/ldap/admin/src/scripts/db2bak.in
+++ b/ldap/admin/src/scripts/db2bak.in
@@ -43,10 +43,10 @@ do
q) args=$args" -q";;
v) args=$args" -v";;
S) args=$args" -S";;
- D) args=$args" -D $OPTARG";;
- i) args=$args" -i $OPTARG";;
+ D) args=$args" -D \"$OPTARG\"";;
+ i) args=$args" -i \"$OPTARG\"";;
a) $bakdir=$OPTARG;;
- d) args=$args" -d $OPTARG";;
+ d) args=$args" -d \"$OPTARG\"";;
Z) servid=$OPTARG;;
?) usage
exit 1;;
@@ -72,4 +72,4 @@ then
fi
echo "Back up directory: $bak_dir"
-@sbindir@/ns-slapd db2archive -D $CONFIG_DIR -a $bak_dir $args
+eval @sbindir@/ns-slapd db2archive -D $CONFIG_DIR -a $bak_dir $args
diff --git a/ldap/admin/src/scripts/db2index.in b/ldap/admin/src/scripts/db2index.in
index 6a0785ecf..c8e90759d 100755
--- a/ldap/admin/src/scripts/db2index.in
+++ b/ldap/admin/src/scripts/db2index.in
@@ -35,15 +35,15 @@ do
h) usage
exit 0;;
Z) servid=$OPTARG;;
- n) args=$args" -n $OPTARG"
+ n) args=$args" -n \"$OPTARG\""
benameopt="set";;
- s) args=$args" -s $OPTARG"
+ s) args=$args" -s \"$OPTARG\""
includeSuffix="set";;
t) args=$args" -t "\"$OPTARG\";;
T) args=$args" -T "\"$OPTARG\";;
- d) args=$args" -d $OPTARG";;
- a) args=$args" -a $OPTARG";;
- x) args=$args" -x $OPTARG";;
+ d) args=$args" -d \"$OPTARG\"";;
+ a) args=$args" -a \"$OPTARG\"";;
+ x) args=$args" -x \"$OPTARG\"";;
v) args=$args" -v";;
S) args=$args" -S";;
D) args=$args" -D $OPTARG";;
diff --git a/ldap/admin/src/scripts/db2ldif.in b/ldap/admin/src/scripts/db2ldif.in
index fcf73a089..e9f7f7e8a 100755
--- a/ldap/admin/src/scripts/db2ldif.in
+++ b/ldap/admin/src/scripts/db2ldif.in
@@ -106,12 +106,12 @@ do
Z) servid=$OPTARG;;
n) benameopt="-n $OPTARG"
required_param="yes";;
- s) includeSuffix="-s $OPTARG"
+ s) includeSuffix="-s \"$OPTARG\""
required_param="yes";;
- x) excludeSuffix="-x $OPTARG";;
- a) outputFile="-a $OPTARG";;
- d) args=$args" -d $OPTARG";;
- D) args=$args" -D $OPTARG";;
+ x) excludeSuffix="-x \"$OPTARG\"";;
+ a) outputFile="-a \"$OPTARG\"";;
+ d) args=$args" -d \"$OPTARG\"";;
+ D) args=$args" -D \"$OPTARG\"";;
N) args=$args" -N";;
E) args=$args" -E";;
S) args=$args" -S";;
@@ -154,7 +154,7 @@ rn=$?
echo "Exported ldif file: $ldif_file"
if [ $rn -eq 1 ]
then
- @sbindir@/ns-slapd db2ldif -D $CONFIG_DIR $benameopt $includeSuffix $excludeSuffix $outputFile $args
+ eval @sbindir@/ns-slapd db2ldif -D $CONFIG_DIR $benameopt $includeSuffix $excludeSuffix $outputFile $args
else
- @sbindir@/ns-slapd db2ldif -D $CONFIG_DIR $benameopt $includeSuffix $excludeSuffix $args -a $ldif_file
+ eval @sbindir@/ns-slapd db2ldif -D $CONFIG_DIR $benameopt $includeSuffix $excludeSuffix $args -a $ldif_file
fi
diff --git a/ldap/admin/src/scripts/dbverify.in b/ldap/admin/src/scripts/dbverify.in
index bbacc17f1..b98e9b2c3 100755
--- a/ldap/admin/src/scripts/dbverify.in
+++ b/ldap/admin/src/scripts/dbverify.in
@@ -33,14 +33,14 @@ do
h) usage
exit 0;;
Z) servid=$OPTARG;;
- n) args=$args" -n $OPTARG";;
- d) args=$args" -d $OPTARG";;
+ n) args=$args" -n \"$OPTARG\"";;
+ d) args=$args" -d \"$OPTARG\"";;
V) args=$args" -V";;
v) args=$args" -v"
display_version="yes";;
f) args=$args" -f";;
- D) args=$args" -D $OPTARG";;
- a) args=$args" -a $OPTARG";;
+ D) args=$args" -D \"$OPTARG\"";;
+ a) args=$args" -a \"$OPTARG\"";;
?) usage
exit 1;;
esac
@@ -57,7 +57,7 @@ fi
. $initfile
-@sbindir@/ns-slapd dbverify -D $CONFIG_DIR $args
+eval @sbindir@/ns-slapd dbverify -D $CONFIG_DIR $args
if [ $display_version = "yes" ]; then
exit 0
fi
diff --git a/ldap/admin/src/scripts/dn2rdn.in b/ldap/admin/src/scripts/dn2rdn.in
index 616969acd..762e63a4c 100755
--- a/ldap/admin/src/scripts/dn2rdn.in
+++ b/ldap/admin/src/scripts/dn2rdn.in
@@ -27,12 +27,12 @@ do
h) usage
exit 0;;
Z) servid=$OPTARG;;
- d) arg=$arg" -d $OPTARG";;
- a) arg=$arg" -a $OPTARG"
+ d) arg=$arg" -d \"$OPTARG\"";;
+ a) arg=$arg" -a \"$OPTARG\""
archive="provided";;
v) arg=$arg" -v";;
f) arg=$arg" -f";;
- D) arg=$arg" -D $OPTARG";;
+ D) arg=$arg" -D \"$OPTARG\"";;
?) usage
exit 1;;
esac
@@ -55,4 +55,4 @@ if [ "$archive" != "provided" ]; then
args=$args"-a $bak_dir"
fi
-@sbindir@/ns-slapd upgradedb -D $CONFIG_DIR -r $args
+eval @sbindir@/ns-slapd upgradedb -D $CONFIG_DIR -r $args
diff --git a/ldap/admin/src/scripts/ldif2db.in b/ldap/admin/src/scripts/ldif2db.in
index a34241afd..3aed4697e 100755
--- a/ldap/admin/src/scripts/ldif2db.in
+++ b/ldap/admin/src/scripts/ldif2db.in
@@ -59,16 +59,16 @@ do
h) usage
exit 0;;
Z) servid=$OPTARG;;
- n) args=$args" -n $OPTARG";;
- i) args=$args" -i $OPTARG";;
- s) args=$args" -s $OPTARG";;
- x) args=$args" -x $OPTARG";;
- c) args=$args" -c $OPTARG";;
- d) args=$args" -d $OPTARG";;
- g) args=$args" -g $OPTARG";;
- G) args=$args" -G $OPTARG";;
- t) args=$args" -t $OPTARG";;
- D) args=$args" -D $OPTARG";;
+ n) args=$args" -n \"$OPTARG\"";;
+ i) args=$args" -i \"$OPTARG\"";;
+ s) args=$args" -s \"$OPTARG\"";;
+ x) args=$args" -x \"$OPTARG\"";;
+ c) args=$args" -c \"$OPTARG\"";;
+ d) args=$args" -d \"$OPTARG\"";;
+ g) args=$args" -g \"$OPTARG\"";;
+ G) args=$args" -G \"$OPTARG\"";;
+ t) args=$args" -t \"$OPTARG\"";;
+ D) args=$args" -D \"$OPTARG\"";;
E) args=$args" -E";;
v) args=$args" -v";;
N) args=$args" -N";;
@@ -104,6 +104,6 @@ if [ $quiet -eq 0 ]; then
echo importing data ...
fi
-@sbindir@/ns-slapd ldif2db -D $CONFIG_DIR $args 2>&1
+eval @sbindir@/ns-slapd ldif2db -D $CONFIG_DIR $args 2>&1
exit $?
diff --git a/ldap/admin/src/scripts/monitor.in b/ldap/admin/src/scripts/monitor.in
index 36a2fc9b0..e9265a126 100755
--- a/ldap/admin/src/scripts/monitor.in
+++ b/ldap/admin/src/scripts/monitor.in
@@ -73,8 +73,8 @@ fi
rm $file
if [ -n "$passwd" ]; then
- dn="-D $rootdn"
- passwd="-w$passwd"
+ dn="-D \"$rootdn\""
+ passwd="-w \"$passwd\""
fi
if [ -n "$ldapiURL" ]
then
@@ -109,9 +109,9 @@ if [ "$security" = "on" ]; then
echo "Using the next most secure protocol(STARTTLS)"
fi
if [ "$openldap" = "yes" ]; then
- ldapsearch -x -LLL -ZZ -h $host -p $port -b "$MDN" -s base $dn $passwd "objectClass=*"
+ eval ldapsearch -x -LLL -ZZ -h $host -p $port -b "$MDN" -s base $dn $passwd "objectClass=*"
else
- ldapsearch -ZZZ -P $certdir -h $host -p $port -b "$MDN" -s base $dn $passwd "objectClass=*"
+ eval ldapsearch -ZZZ -P $certdir -h $host -p $port -b "$MDN" -s base $dn $passwd "objectClass=*"
fi
exit $?
fi
diff --git a/ldap/admin/src/scripts/suffix2instance.in b/ldap/admin/src/scripts/suffix2instance.in
index 7774148e2..d7c666104 100755
--- a/ldap/admin/src/scripts/suffix2instance.in
+++ b/ldap/admin/src/scripts/suffix2instance.in
@@ -24,7 +24,7 @@ while getopts "Z:s:h" flag
do
case $flag in
Z) servid=$OPTARG;;
- s) args=$args" -s $OPTARG";;
+ s) args=$args" -s \"$OPTARG\"";;
h) usage
exit 0;;
?) usage
@@ -55,4 +55,4 @@ then
exit 1
fi
-@sbindir@/ns-slapd suffix2instance -D $CONFIG_DIR $args 2>&1
+eval @sbindir@/ns-slapd suffix2instance -D $CONFIG_DIR $args 2>&1
diff --git a/ldap/admin/src/scripts/upgradedb.in b/ldap/admin/src/scripts/upgradedb.in
index bf600dd63..2b7c79daf 100755
--- a/ldap/admin/src/scripts/upgradedb.in
+++ b/ldap/admin/src/scripts/upgradedb.in
@@ -29,10 +29,10 @@ do
v) args=$args" -v";;
f) args=$args" -f";;
r) args=$args" -r";;
- d) args=$args" -d $OPTARG";;
- a) args=$args" -a $OPTARG"
+ d) args=$args" -d \"$OPTARG\"";;
+ a) args=$args" -a \"$OPTARG\""
archive_provided="yes";;
- D) args=$args" -D $OPTARG";;
+ D) args=$args" -D \"$OPTARG\"";;
h) usage
exit 0;;
esac
@@ -56,4 +56,4 @@ then
fi
echo upgrade index files ...
-@sbindir@/ns-slapd upgradedb -D $CONFIG_DIR $args
+eval @sbindir@/ns-slapd upgradedb -D $CONFIG_DIR $args
diff --git a/ldap/admin/src/scripts/upgradednformat.in b/ldap/admin/src/scripts/upgradednformat.in
index 51585aef8..9de60eaec 100755
--- a/ldap/admin/src/scripts/upgradednformat.in
+++ b/ldap/admin/src/scripts/upgradednformat.in
@@ -36,14 +36,14 @@ do
Z) servid=$OPTARG;;
v) args=$args" -v";;
N) args=$args" -N";;
- d) args=$args" -d $OPTARG";;
- a) args=$args" -a $OPTARG"
+ d) args=$args" -d \"$OPTARG\"";;
+ a) args=$args" -a \"$OPTARG\""
dir="set";;
- n) args=$args" -n $OPTARG"
+ n) args=$args" -n \"$OPTARG\""
be="set";;
h) usage
exit 0;;
- D) args=$args" -D $OPTARG";;
+ D) args=$args" -D \"$OPTARG\"";;
?) usage
exit 1;;
esac
@@ -65,7 +65,7 @@ fi
. $initfile
-@sbindir@/ns-slapd upgradednformat -D $CONFIG_DIR $args
+eval @sbindir@/ns-slapd upgradednformat -D $CONFIG_DIR $args
rc=$?
exit $rc
diff --git a/ldap/admin/src/scripts/vlvindex.in b/ldap/admin/src/scripts/vlvindex.in
index 365e32fc8..a1696bc0f 100755
--- a/ldap/admin/src/scripts/vlvindex.in
+++ b/ldap/admin/src/scripts/vlvindex.in
@@ -29,14 +29,14 @@ do
case $flag in
Z) servid=$OPTARG;;
v) args=$args" -v";;
- s) args=$args" -s $OPTARG";;
- d) args=$args" -d $OPTARG";;
- a) args=$args" -a $OPTARG";;
- T) args=$args" -T $OPTARG";;
+ s) args=$args" -s \"$OPTARG\"";;
+ d) args=$args" -d \"$OPTARG\"";;
+ a) args=$args" -a \"$OPTARG\"";;
+ T) args=$args" -T \"$OPTARG\"";;
S) args=$args" -S";;
- n) args=$args" -n $OPTARG";;
- x) args=$args" -x $OPTARG";;
- D) args=$args" -D $OPTARG";;
+ n) args=$args" -n \"$OPTARG\"";;
+ x) args=$args" -x \"$OPTARG\"";;
+ D) args=$args" -D \"$OPTARG\"";;
h) usage
exit 0;;
?) usage
@@ -61,4 +61,4 @@ then
exit 1
fi
-@sbindir@/ns-slapd db2index -D $CONFIG_DIR $args
+eval @sbindir@/ns-slapd db2index -D $CONFIG_DIR $args
| 0 |
a908c6b57cd77ff2f6e2fe0fe1fa2e0eccba77e0
|
389ds/389-ds-base
|
Ticket #47960 - cookie_change_info returns random negative number if there was no change in a tree
Description: When no changes had not been made, Retro Changelog db
was empty and the search callback sync_handle_cnum_entry in the
Content Synchronization had no chance to be called. If it was not
called, an uninitialized garbage value in Sync_CallBackData was set
to cookie_change_info.
This patch checks if the search callback sync_handle_cnum_entry is
called or not. If it is not called, set 0 to cookie_change_info.
https://fedorahosted.org/389/ticket/47960
Reviewed by [email protected] and [email protected] (Thank you,
Rich and Thierry!!)
|
commit a908c6b57cd77ff2f6e2fe0fe1fa2e0eccba77e0
Author: Noriko Hosoi <[email protected]>
Date: Wed Dec 10 17:12:00 2014 -0800
Ticket #47960 - cookie_change_info returns random negative number if there was no change in a tree
Description: When no changes had not been made, Retro Changelog db
was empty and the search callback sync_handle_cnum_entry in the
Content Synchronization had no chance to be called. If it was not
called, an uninitialized garbage value in Sync_CallBackData was set
to cookie_change_info.
This patch checks if the search callback sync_handle_cnum_entry is
called or not. If it is not called, set 0 to cookie_change_info.
https://fedorahosted.org/389/ticket/47960
Reviewed by [email protected] and [email protected] (Thank you,
Rich and Thierry!!)
diff --git a/ldap/servers/plugins/sync/sync.h b/ldap/servers/plugins/sync/sync.h
index 9c2d8bea1..0bcec7a72 100644
--- a/ldap/servers/plugins/sync/sync.h
+++ b/ldap/servers/plugins/sync/sync.h
@@ -76,6 +76,8 @@ typedef struct sync_update {
Slapi_Entry *upd_e;
} Sync_UpdateNode;
+#define SYNC_CALLBACK_PREINIT (-1)
+
typedef struct sync_callback {
Slapi_PBlock *orig_pb;
int changenr;
diff --git a/ldap/servers/plugins/sync/sync_util.c b/ldap/servers/plugins/sync/sync_util.c
index de65b9950..af22bcbda 100644
--- a/ldap/servers/plugins/sync/sync_util.c
+++ b/ldap/servers/plugins/sync/sync_util.c
@@ -373,6 +373,7 @@ sync_handle_cnum_entry(Slapi_Entry *e, void *cb_data)
if( NULL != value && NULL != value->bv_val &&
'\0' != value->bv_val[0]) {
cb->changenr = sync_number2int(value->bv_val);
+ cb->cb_err = 0; /* changenr successfully set */
}
}
}
@@ -500,7 +501,7 @@ sync_cookie_get_change_info(Sync_CallBackData *scbd)
slapi_pblock_init(seq_pb);
slapi_seq_internal_set_pb(seq_pb, base, SLAPI_SEQ_LAST, attrname, NULL, NULL, 0, 0,
- plugin_get_default_component_id(), 0);
+ plugin_get_default_component_id(), 0);
rc = slapi_seq_internal_callback_pb (seq_pb, scbd, NULL, sync_handle_cnum_entry, NULL);
slapi_pblock_destroy(seq_pb);
@@ -518,15 +519,20 @@ sync_cookie_create (Slapi_PBlock *pb)
Sync_CallBackData scbd;
int rc;
- Sync_Cookie *sc = (Sync_Cookie *)slapi_ch_malloc(sizeof(Sync_Cookie));
-
+ Sync_Cookie *sc = (Sync_Cookie *)slapi_ch_calloc(1, sizeof(Sync_Cookie));
+ scbd.cb_err = SYNC_CALLBACK_PREINIT;
rc = sync_cookie_get_change_info (&scbd);
if (rc == 0) {
sc->cookie_server_signature = sync_cookie_get_server_info(pb);
sc->cookie_client_signature = sync_cookie_get_client_info(pb);
- sc->cookie_change_info = scbd.changenr;
+ if (scbd.cb_err == SYNC_CALLBACK_PREINIT) {
+ /* changenr is not initialized. */
+ sc->cookie_change_info = 0;
+ } else {
+ sc->cookie_change_info = scbd.changenr;
+ }
} else {
slapi_ch_free ((void **)&sc);
sc = NULL;
| 0 |
69c9f3bf7dd9fe2cadd5eae0ab72ce218b78820e
|
389ds/389-ds-base
|
Ticket #211 - dnaNextValue gets incremented even if the user addition fails
Bug Description: During the pre-op stage we were allocating and assigning
the next dna value to the entry. If the user add, or modify
fails in the backend(schema check, etc), then we just lost
the value from the range.
Fix Description: Now we do the value allocation and assignment in a new
backend_txn_preop function. At this stage most failures
would already have occurred.
However, in the preop we still need to assign a value to
the dnaType, or else we will fail the schema check in the
backend. We set the type to DNA_NEEDS_UPDATE, and in the
be_txn_preop function, we check for this to assign the
real value. We also need to get the next range of
values, if needed, in the preop as well.
Since we are changing the mods in pre_op, in ldbm_modify
we need to grab the mods again after processing the
be_txn_preop functions for index_add_mods(). For the
ldbm operations get copies of the original entries/mods/pb params
and restore them if we hit a DB_LOCK_DEADLOCK.
https://fedorahosted.org/389/ticket/211
Reviewed by: richm and nhosoi (Thanks!)
|
commit 69c9f3bf7dd9fe2cadd5eae0ab72ce218b78820e
Author: Mark Reynolds <[email protected]>
Date: Fri Feb 24 20:28:38 2012 -0500
Ticket #211 - dnaNextValue gets incremented even if the user addition fails
Bug Description: During the pre-op stage we were allocating and assigning
the next dna value to the entry. If the user add, or modify
fails in the backend(schema check, etc), then we just lost
the value from the range.
Fix Description: Now we do the value allocation and assignment in a new
backend_txn_preop function. At this stage most failures
would already have occurred.
However, in the preop we still need to assign a value to
the dnaType, or else we will fail the schema check in the
backend. We set the type to DNA_NEEDS_UPDATE, and in the
be_txn_preop function, we check for this to assign the
real value. We also need to get the next range of
values, if needed, in the preop as well.
Since we are changing the mods in pre_op, in ldbm_modify
we need to grab the mods again after processing the
be_txn_preop functions for index_add_mods(). For the
ldbm operations get copies of the original entries/mods/pb params
and restore them if we hit a DB_LOCK_DEADLOCK.
https://fedorahosted.org/389/ticket/211
Reviewed by: richm and nhosoi (Thanks!)
diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
index 1173d50b2..95ec8314b 100644
--- a/ldap/servers/plugins/dna/dna.c
+++ b/ldap/servers/plugins/dna/dna.c
@@ -116,6 +116,9 @@
#define DNA_INT_PREOP_DESC "Distributed Numeric Assignment internal preop plugin"
#define DNA_POSTOP_DESC "Distributed Numeric Assignment postop plugin"
#define DNA_EXOP_DESC "Distributed Numeric Assignment range extension extop plugin"
+#define DNA_BE_TXN_PREOP_DESC "Distributed Numeric Assignment backend txn preop plugin"
+
+#define DNA_NEEDS_UPDATE "-2"
#define INTEGER_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.27"
@@ -210,6 +213,7 @@ static int dna_close(Slapi_PBlock * pb);
static int dna_internal_preop_init(Slapi_PBlock *pb);
static int dna_postop_init(Slapi_PBlock * pb);
static int dna_exop_init(Slapi_PBlock * pb);
+static int dna_be_txn_preop_init(Slapi_PBlock *pb);
/**
*
@@ -270,6 +274,9 @@ static int dna_pre_op(Slapi_PBlock * pb, int modtype);
static int dna_mod_pre_op(Slapi_PBlock * pb);
static int dna_add_pre_op(Slapi_PBlock * pb);
static int dna_extend_exop(Slapi_PBlock *pb);
+static int dna_be_txn_pre_op(Slapi_PBlock *pb, int modtype);
+static int dna_be_txn_add_pre_op(Slapi_PBlock *pb);
+static int dna_be_txn_mod_pre_op(Slapi_PBlock *pb);
/**
* debug functions - global, for the debugger
@@ -446,6 +453,24 @@ dna_init(Slapi_PBlock *pb)
status = DNA_FAILURE;
}
+ if (status == DNA_SUCCESS) {
+ plugin_type = "betxnpreoperation";
+
+ /* the config change checking post op */
+ if (slapi_register_plugin(plugin_type, /* op type */
+ 1, /* Enabled */
+ "dna_init", /* this function desc */
+ dna_be_txn_preop_init, /* init func for post op */
+ DNA_BE_TXN_PREOP_DESC, /* plugin desc */
+ NULL, /* ? */
+ plugin_identity /* access control */
+ )) {
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
+ "dna_init: failed to register be_txn_pre_op plugin\n");
+ status = DNA_FAILURE;
+ }
+ }
+
slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM,
"<-- dna_init\n");
return status;
@@ -525,6 +550,21 @@ dna_exop_init(Slapi_PBlock * pb)
return status;
}
+static int
+dna_be_txn_preop_init(Slapi_PBlock *pb){
+ int status = DNA_SUCCESS;
+
+ if( slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION, SLAPI_PLUGIN_VERSION_01) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION, (void *) &pdesc) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_BE_TXN_PRE_ADD_FN, (void *)dna_be_txn_add_pre_op) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_BE_TXN_PRE_MODIFY_FN, (void *)dna_be_txn_mod_pre_op) != 0){
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
+ "dna_init: failed to register be_txn_pre_op plugin\n");
+ status = DNA_FAILURE;
+ }
+
+ return status;
+}
/*
dna_start
@@ -2075,7 +2115,7 @@ static int dna_get_next_value(struct configEntry *config_entry,
if (LDAP_SUCCESS != ret) {
/* check if we overflowed the configured range */
if (setval > config_entry->maxval) {
- /* try for a new range or fail */
+ /* this should not happen, as pre_op should of allocated the next range */
ret = dna_fix_maxval(config_entry);
if (LDAP_SUCCESS != ret) {
slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
@@ -2135,7 +2175,7 @@ static int dna_get_next_value(struct configEntry *config_entry,
}
/* update our cached config */
- dna_notice_allocation(config_entry, nextval, setval, 1);
+ dna_notice_allocation(config_entry, nextval, setval, 0);
}
done:
@@ -2814,21 +2854,31 @@ dna_create_valcheck_filter(struct configEntry *config_entry, PRUint64 value, cha
static int dna_pre_op(Slapi_PBlock * pb, int modtype)
{
- char *dn = 0;
- PRCList *list = 0;
- struct configEntry *config_entry = 0;
- struct slapi_entry *e = 0;
- Slapi_Entry *resulting_e = 0;
- char *value = 0;
+ struct configEntry *config_entry = NULL;
+ struct slapi_entry *e = NULL;
+ Slapi_Entry *test_e = NULL;
+ Slapi_Entry *resulting_e = NULL;
+ Slapi_DN *tmp_dn = NULL;
+ PRCList *list = NULL;
+ struct berval *bv = NULL;
char **types_to_generate = NULL;
char **generated_types = NULL;
- Slapi_Mods *smods = 0;
- Slapi_Mod *smod = 0;
+ char *errstr = NULL;
+ char *dn = NULL;
+ char *value = NULL;
+ char *type = NULL;
+ Slapi_Mod *next_mod = NULL;
+ Slapi_Mods *smods = NULL;
+ Slapi_Mod *smod = NULL;
+ Slapi_Attr *attr = NULL;
LDAPMod **mods;
+ PRUint64 setval = 0;
int free_entry = 0;
- char *errstr = NULL;
- int i = 0;
+ int e_numvals = 0;
+ int numvals = 0;
int ret = 0;
+ int len = 0;
+ int i;
slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM,
"--> dna_pre_op\n");
@@ -2843,18 +2893,7 @@ static int dna_pre_op(Slapi_PBlock * pb, int modtype)
if (LDAP_CHANGETYPE_ADD == modtype) {
slapi_pblock_get(pb, SLAPI_ADD_ENTRY, &e);
} else {
- /* xxxPAR: Ideally SLAPI_MODIFY_EXISTING_ENTRY should be
- * available but it turns out that is only true if you are
- * a dbm backend pre-op plugin - lucky dbm backend pre-op
- * plugins.
- * I think that is wrong since the entry is useful for filter
- * tests and schema checks and this plugin shouldn't be limited
- * to a single backend type, but I don't want that fight right
- * now so we go get the entry here
- *
- slapi_pblock_get( pb, SLAPI_MODIFY_EXISTING_ENTRY, &e);
- */
- Slapi_DN *tmp_dn = dna_get_sdn(pb);
+ tmp_dn = dna_get_sdn(pb);
if (tmp_dn) {
slapi_search_internal_get_entry(tmp_dn, 0, &e, getPluginID());
free_entry = 1;
@@ -2879,7 +2918,7 @@ static int dna_pre_op(Slapi_PBlock * pb, int modtype)
}
}
- if (0 == e)
+ if (e == NULL)
goto bailmod;
if (dna_dn_is_config(dn)) {
@@ -2887,7 +2926,6 @@ static int dna_pre_op(Slapi_PBlock * pb, int modtype)
* This allows us to reject invalid config changes
* here at the pre-op stage. Applying the config
* needs to be done at the post-op stage. */
- Slapi_Entry *test_e = NULL;
/* For a MOD, we need to check the resulting entry */
if (LDAP_CHANGETYPE_ADD == modtype) {
@@ -2906,31 +2944,365 @@ static int dna_pre_op(Slapi_PBlock * pb, int modtype)
"DNA configuration.");
}
}
-
/* We're done, so just bail. */
goto bailmod;
- }
-
- /* See if the operation is going to be rejected by the ACIs. There's no use in
- * us worrying about the change if it's going to be rejected. */
- if (LDAP_CHANGETYPE_MODIFY == modtype) {
- if (slapi_acl_check_mods(pb, e, slapi_mods_get_ldapmods_byref(smods), NULL) != LDAP_SUCCESS) {
- goto bailmod;
- }
} else {
- if (slapi_access_allowed(pb, e, NULL, NULL, SLAPI_ACL_ADD) != LDAP_SUCCESS) {
+ /* Bail out if the plug-in close function was just called. */
+ if (!g_plugin_started) {
goto bailmod;
}
- }
+ /*
+ * Find the config that matches this entry, Set the types that need to be
+ * generated to DNA_NEEDS_UPDATE. The be_txn_preop will set the values if
+ * the operation hasn't been rejected by that point.
+ *
+ * We also check if we need to get the next range of values, and grab them.
+ * We do this here so we don't have to do it in the be_txn_preop.
+ */
+ dna_read_lock();
- dna_read_lock();
+ if (!PR_CLIST_IS_EMPTY(dna_global_config)) {
+ list = PR_LIST_HEAD(dna_global_config);
+
+ while (list != dna_global_config && LDAP_SUCCESS == ret) {
+ config_entry = (struct configEntry *) list;
+
+ /* Did we already service all of these configured types? */
+ if (dna_list_contains_types(generated_types, config_entry->types)) {
+ goto next;
+ }
+
+ /* is the entry in scope? */
+ if (config_entry->scope) {
+ if (!slapi_dn_issuffix(dn, config_entry->scope))
+ goto next;
+ }
+
+ /* does the entry match the filter? */
+ if (config_entry->slapi_filter) {
+ /* For a MOD operation, we need to check the filter
+ * against the resulting entry. */
+ if (LDAP_CHANGETYPE_ADD == modtype) {
+ test_e = e;
+ } else {
+ test_e = resulting_e;
+ }
+
+ if (LDAP_SUCCESS != slapi_vattr_filter_test(pb, test_e, config_entry->slapi_filter, 0))
+ goto next;
+ }
+
+ if (LDAP_CHANGETYPE_ADD == modtype) {
+ if (dna_is_multitype_range(config_entry)) {
+ /* For a multi-type range, we only generate a value
+ * for types where the magic value is set. We do not
+ * generate a value for missing types. */
+ for (i = 0; config_entry->types && config_entry->types[i]; i++) {
+ value = slapi_entry_attr_get_charptr(e, config_entry->types[i]);
+
+ if (value && !slapi_UTF8CASECMP(config_entry->generate, value)) {
+ slapi_ch_array_add(&types_to_generate, slapi_ch_strdup(config_entry->types[i]));
+ }
+ slapi_ch_free_string(&value);
+ }
+ } else {
+ /* For a single type range, we generate the value if
+ * the magic value is set or if the type is missing. */
+ value = slapi_entry_attr_get_charptr(e, config_entry->types[0]);
+
+ if ((value && !slapi_UTF8CASECMP(config_entry->generate, value)) || 0 == value) {
+ slapi_ch_array_add(&types_to_generate, slapi_ch_strdup(config_entry->types[0]));
+ }
+ slapi_ch_free_string(&value);
+ }
+ } else {
+ /* check mods for magic value */
+ next_mod = slapi_mod_new();
+ smod = slapi_mods_get_first_smod(smods, next_mod);
+ while (smod) {
+ type = (char *)slapi_mod_get_type(smod);
+
+ /* See if the type matches any configured type. */
+ if (dna_list_contains_type(config_entry->types, type)) {
+ /* If all values are being deleted, we need to
+ * generate a new value. We don't do this for
+ * multi-type ranges since they require the magic
+ * value to be specified to trigger generation. */
+ if (SLAPI_IS_MOD_DELETE(slapi_mod_get_operation(smod)) &&
+ !dna_is_multitype_range(config_entry)) {
+ numvals = slapi_mod_get_num_values(smod);
+
+ if (numvals == 0) {
+ slapi_ch_array_add(&types_to_generate, slapi_ch_strdup(type));
+ } else {
+ e_numvals = 0;
+ slapi_entry_attr_find(e, type, &attr);
+ if (attr) {
+ slapi_attr_get_numvalues(attr, &e_numvals);
+ if (numvals >= e_numvals) {
+ slapi_ch_array_add(&types_to_generate, slapi_ch_strdup(type));
+ }
+ }
+ }
+ } else {
+ /* This is either adding or replacing a value */
+ bv = slapi_mod_get_first_value(smod);
+
+ /* If this type is already in the to be generated
+ * list, a previous mod in this same modify operation
+ * either removed all values or set the magic value.
+ * It's possible that this mod is adding a valid value,
+ * which means we would not want to generate a new value.
+ * It is safe to remove this type from the to be
+ * generated list since it will be re-added here if
+ * necessary. */
+ if (dna_list_contains_type(types_to_generate, type)) {
+ dna_list_remove_type(types_to_generate, type);
+ }
+
+ /* If we have a value, see if it's the magic value. */
+ if (bv) {
+ len = strlen(config_entry->generate);
+ if (len == bv->bv_len) {
+ if (!slapi_UTF8NCASECMP(bv->bv_val,
+ config_entry->generate,
+ len)) {
+ slapi_ch_array_add(&types_to_generate,
+ slapi_ch_strdup(type));
+ }
+ }
+ } else if (!dna_is_multitype_range(config_entry)) {
+ /* This is a replace with no new values, so we need
+ * to generate a new value if this is not a multi-type
+ * range. */
+ slapi_ch_array_add(&types_to_generate,slapi_ch_strdup(type));
+ }
+ }
+ }
+ slapi_mod_done(next_mod);
+ smod = slapi_mods_get_next_smod(smods, next_mod);
+ }
+ slapi_mod_free(&next_mod);
+ }
+
+ /* We need to perform one last check for modify operations. If an
+ * entry within the scope has not triggered generation yet, we need
+ * to see if a value exists for the managed type in the resulting
+ * entry. This will catch a modify operation that brings an entry
+ * into scope for a managed range, but doesn't supply a value for
+ * the managed type. We don't do this for multi-type ranges. */
+ if ((LDAP_CHANGETYPE_MODIFY == modtype) && (!types_to_generate ||
+ (types_to_generate && !types_to_generate[0])) &&
+ !dna_is_multitype_range(config_entry)) {
+ if (slapi_entry_attr_find(resulting_e, config_entry->types[0], &attr) != 0) {
+ slapi_ch_array_add(&types_to_generate, slapi_ch_strdup(config_entry->types[0]));
+ }
+ }
+
+ if (types_to_generate && types_to_generate[0]) {
+ /* do the mod */
+ if (LDAP_CHANGETYPE_ADD == modtype) {
+ /* add - add to entry */
+ for (i = 0; types_to_generate && types_to_generate[i]; i++) {
+ slapi_entry_attr_set_charptr(e, types_to_generate[i],
+ slapi_ch_strdup(DNA_NEEDS_UPDATE));
+ }
+ } else {
+ /* mod - add to mods */
+ for (i = 0; types_to_generate && types_to_generate[i]; i++) {
+ slapi_mods_add_string(smods, LDAP_MOD_REPLACE, types_to_generate[i],
+ slapi_ch_strdup(DNA_NEEDS_UPDATE));
+ }
+ }
+
+ /* Make sure we don't generate for this
+ * type again by keeping a list of types
+ * we have generated for already.
+ */
+ if (generated_types == NULL) {
+ /* If we don't have a list of generated types yet,
+ * we can just use the types_to_generate list so
+ * we don't have to allocate anything. */
+ generated_types = types_to_generate;
+ types_to_generate = NULL;
+ } else {
+ /* Just reuse the elements out of types_to_generate for the
+ * generated types list to avoid allocating them again. */
+ for (i = 0; types_to_generate && types_to_generate[i]; ++i) {
+ slapi_ch_array_add(&generated_types, types_to_generate[i]);
+ types_to_generate[i] = NULL;
+ }
+ }
+
+ /* free up */
+ slapi_ch_free_string(&value);
+ slapi_ch_array_free(types_to_generate);
+ } else if (types_to_generate) {
+ slapi_ch_free((void **)&types_to_generate);
+ }
+
+ /*
+ * Now grab the next value and see if we need to get the next range
+ */
+ slapi_lock_mutex(config_entry->lock);
+
+ ret = dna_first_free_value(config_entry, &setval);
+ if (LDAP_SUCCESS != ret) {
+ /* check if we overflowed the configured range */
+ if (setval > config_entry->maxval) {
+ /* try for a new range or fail */
+ ret = dna_fix_maxval(config_entry);
+ if (LDAP_SUCCESS != ret) {
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
+ "dna_pre_op: no more values available!!\n");
+ slapi_unlock_mutex(config_entry->lock);
+ break;
+ }
+
+ /* Make sure dna_first_free_value() doesn't error out */
+ ret = dna_first_free_value(config_entry, &setval);
+ if (LDAP_SUCCESS != ret){
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
+ "dna_pre_op: failed to allocate a new ID\n");
+ slapi_unlock_mutex(config_entry->lock);
+ break;
+ }
+ } else {
+ /* dna_first_free_value() failed for some unknown reason */
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
+ "dna_pre_op: failed to allocate a new ID!!\n");
+ slapi_unlock_mutex(config_entry->lock);
+ break;
+ }
+ }
+
+ slapi_unlock_mutex(config_entry->lock);
+
+ next:
+ list = PR_NEXT_LINK(list);
+ }
+ }
- /* Bail out if the plug-in close function was just called. */
- if (!g_plugin_started) {
dna_unlock();
+ }
+
+ bailmod:
+ if (LDAP_CHANGETYPE_MODIFY == modtype) {
+ /* Put the updated mods back into place. */
+ mods = slapi_mods_get_ldapmods_passout(smods);
+ slapi_pblock_set(pb, SLAPI_MODIFY_MODS, mods);
+ slapi_mods_free(&smods);
+ }
+
+ bail:
+ slapi_ch_array_free(generated_types);
+
+ if (free_entry && e)
+ slapi_entry_free(e);
+
+ if (resulting_e)
+ slapi_entry_free(resulting_e);
+
+ if (ret) {
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM,
+ "dna_pre_op: operation failure [%d]\n", ret);
+ slapi_send_ldap_result(pb, ret, NULL, errstr, 0, NULL);
+ slapi_ch_free((void **)&errstr);
+ ret = DNA_FAILURE;
+ }
+
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM,
+ "<-- dna_pre_op\n");
+
+ return ret;
+}
+
+static int dna_add_pre_op(Slapi_PBlock * pb)
+{
+ return dna_pre_op(pb, LDAP_CHANGETYPE_ADD);
+}
+
+static int dna_mod_pre_op(Slapi_PBlock * pb)
+{
+ return dna_pre_op(pb, LDAP_CHANGETYPE_MODIFY);
+}
+
+static int dna_be_txn_add_pre_op(Slapi_PBlock *pb)
+{
+ return dna_be_txn_pre_op(pb, LDAP_CHANGETYPE_ADD);
+}
+
+static int dna_be_txn_mod_pre_op(Slapi_PBlock *pb)
+{
+ return dna_be_txn_pre_op(pb, LDAP_CHANGETYPE_MODIFY);
+}
+
+/*
+ * dna_be_txn_pre_op()
+ *
+ * In the preop if we found that we need to update a DNA attribute,
+ * We set the value to "-2" or DNA_NEEDS_UPDATE, because we don't want
+ * to do the value allocation in the preop as the operation could fail -
+ * resulting in lost values from the range. So we need to to ensure
+ * that the value will not be lost by performing the value allocation
+ * in the backend txn preop.
+ *
+ * Although the modifications have already been applied in backend,
+ * we still need to add the modification of the real value to the
+ * existing Slapi_Mods, so that the value gets indexed correctly.
+ *
+ * Also, since the modifications have already been applied to the entry
+ * in the backend, we need to manually update the entry with the new value.
+ */
+static int dna_be_txn_pre_op(Slapi_PBlock *pb, int modtype)
+{
+ struct configEntry *config_entry = NULL;
+ struct slapi_entry *e = NULL;
+ Slapi_Mods *smods = NULL;
+ Slapi_Mod *smod = NULL;
+ Slapi_Mod *next_mod = NULL;
+ Slapi_Attr *attr = NULL;
+ LDAPMod **mods = NULL;
+ struct berval *bv = NULL;
+ PRCList *list = NULL;
+ char *value = NULL;
+ char **types_to_generate = NULL;
+ char **generated_types = NULL;
+ char *new_value = NULL;
+ char *errstr = NULL;
+ char *dn = NULL;
+ char *type = NULL;
+ int numvals, e_numvals = 0;
+ int i, len, ret = 0;
+
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM,
+ "--> dna_be_txn_pre_op\n");
+
+ if (!g_plugin_started)
goto bail;
+
+ if (0 == (dn = dna_get_dn(pb)))
+ goto bail;
+
+ if (dna_dn_is_config(dn))
+ goto bail;
+
+ if (LDAP_CHANGETYPE_ADD == modtype) {
+ slapi_pblock_get(pb, SLAPI_ADD_ENTRY, &e);
+ } else {
+ slapi_pblock_get(pb, SLAPI_MODIFY_EXISTING_ENTRY, &e);
+ }
+
+ if (e == NULL){
+ goto bail;
+ } else if (LDAP_CHANGETYPE_MODIFY == modtype){
+ slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods);
+ smods = slapi_mods_new();
+ slapi_mods_init_passin(smods, mods);
}
+ dna_read_lock();
+
if (!PR_CLIST_IS_EMPTY(dna_global_config)) {
list = PR_LIST_HEAD(dna_global_config);
@@ -2950,24 +3322,10 @@ static int dna_pre_op(Slapi_PBlock * pb, int modtype)
/* does the entry match the filter? */
if (config_entry->slapi_filter) {
- Slapi_Entry *test_e = NULL;
-
- /* For a MOD operation, we need to check the filter
- * against the resulting entry. */
- if (LDAP_CHANGETYPE_ADD == modtype) {
- test_e = e;
- } else {
- test_e = resulting_e;
- }
-
- if (LDAP_SUCCESS != slapi_vattr_filter_test(pb,
- test_e,
- config_entry->
- slapi_filter, 0))
+ if(LDAP_SUCCESS != slapi_vattr_filter_test(pb,e,config_entry->slapi_filter, 0))
goto next;
}
-
if (LDAP_CHANGETYPE_ADD == modtype) {
if (dna_is_multitype_range(config_entry)) {
/* For a multi-type range, we only generate a value
@@ -2976,11 +3334,10 @@ static int dna_pre_op(Slapi_PBlock * pb, int modtype)
for (i = 0; config_entry->types && config_entry->types[i]; i++) {
value = slapi_entry_attr_get_charptr(e, config_entry->types[i]);
- if (value && !slapi_UTF8CASECMP(config_entry->generate, value)) {
+ if (value && !slapi_UTF8CASECMP(value, DNA_NEEDS_UPDATE)) {
slapi_ch_array_add(&types_to_generate,
slapi_ch_strdup(config_entry->types[i]));
}
-
slapi_ch_free_string(&value);
}
} else {
@@ -2988,39 +3345,30 @@ static int dna_pre_op(Slapi_PBlock * pb, int modtype)
* the magic value is set or if the type is missing. */
value = slapi_entry_attr_get_charptr(e, config_entry->types[0]);
- if ((value && !slapi_UTF8CASECMP(config_entry->generate, value))
- || 0 == value) {
+ if (0 == value || (value && !slapi_UTF8CASECMP(value, DNA_NEEDS_UPDATE)) ) {
slapi_ch_array_add(&types_to_generate,
slapi_ch_strdup(config_entry->types[0]));
}
-
slapi_ch_free_string(&value);
}
} else {
- /* check mods for magic value */
- Slapi_Mod *next_mod = slapi_mod_new();
+ /* check mods for DNA_NEEDS_UPDATE*/
+ next_mod = slapi_mod_new();
smod = slapi_mods_get_first_smod(smods, next_mod);
while (smod) {
- char *type = (char *)
- slapi_mod_get_type(smod);
+ type = (char *)slapi_mod_get_type(smod);
/* See if the type matches any configured type. */
if (dna_list_contains_type(config_entry->types, type)) {
/* If all values are being deleted, we need to
- * generate a new value. We don't do this for
- * multi-type ranges since they require the magic
- * value to be specified to trigger generation. */
+ * generate a new value. */
if (SLAPI_IS_MOD_DELETE(slapi_mod_get_operation(smod)) &&
- !dna_is_multitype_range(config_entry)) {
- int numvals = slapi_mod_get_num_values(smod);
+ !dna_is_multitype_range(config_entry)) {
+ numvals = slapi_mod_get_num_values(smod);
if (numvals == 0) {
- slapi_ch_array_add(&types_to_generate,
- slapi_ch_strdup(type));
+ slapi_ch_array_add(&types_to_generate,slapi_ch_strdup(type));
} else {
- Slapi_Attr *attr = NULL;
- int e_numvals = 0;
-
slapi_entry_attr_find(e, type, &attr);
if (attr) {
slapi_attr_get_numvalues(attr, &e_numvals);
@@ -3032,48 +3380,29 @@ static int dna_pre_op(Slapi_PBlock * pb, int modtype)
}
} else {
/* This is either adding or replacing a value */
- struct berval *bv = slapi_mod_get_first_value(smod);
-
- /* If this type is already in the to be generated
- * list, a previous mod in this same modify operation
- * either removed all values or set the magic value.
- * It's possible that this mod is adding a valid value,
- * which means we would not want to generate a new value.
- * It is safe to remove this type from the to be
- * generated list since it will be re-added here if
- * necessary. */
- if (dna_list_contains_type(types_to_generate, type)) {
- dna_list_remove_type(types_to_generate, type);
- }
-
- /* If we have a value, see if it's the magic value. */
- if (bv) {
- int len = strlen(config_entry->generate);
- if (len == bv->bv_len) {
- if (!slapi_UTF8NCASECMP(bv->bv_val,
- config_entry->generate,
- len)) {
- slapi_ch_array_add(&types_to_generate,
- slapi_ch_strdup(type));
- }
- }
- } else if (!dna_is_multitype_range(config_entry)) {
- /* This is a replace with no new values, so we need
- * to generate a new value if this is not a multi-type
- * range. */
- slapi_ch_array_add(&types_to_generate,
- slapi_ch_strdup(type));
- }
- }
- }
-
- slapi_mod_done(next_mod);
- smod = slapi_mods_get_next_smod(smods, next_mod);
- }
-
- slapi_mod_free(&next_mod);
- }
-
+ bv = slapi_mod_get_first_value(smod);
+
+ if (dna_list_contains_type(types_to_generate, type)) {
+ dna_list_remove_type(types_to_generate, type);
+ }
+
+ /* If we have a value, see if it's the magic value. */
+ if (bv) {
+ if (!slapi_UTF8CASECMP(bv->bv_val,DNA_NEEDS_UPDATE)) {
+ slapi_ch_array_add(&types_to_generate, slapi_ch_strdup(type));
+ }
+ } else if (!dna_is_multitype_range(config_entry)) {
+ /* This is a replace with no new values, so we need
+ * to generate a new value if this is not a multi-type range. */
+ slapi_ch_array_add(&types_to_generate, slapi_ch_strdup(type));
+ }
+ }
+ }
+ slapi_mod_done(next_mod);
+ smod = slapi_mods_get_next_smod(smods, next_mod);
+ }
+ slapi_mod_free(&next_mod);
+ }
/* We need to perform one last check for modify operations. If an
* entry within the scope has not triggered generation yet, we need
* to see if a value exists for the managed type in the resulting
@@ -3081,19 +3410,13 @@ static int dna_pre_op(Slapi_PBlock * pb, int modtype)
* into scope for a managed range, but doesn't supply a value for
* the managed type. We don't do this for multi-type ranges. */
if ((LDAP_CHANGETYPE_MODIFY == modtype) && (!types_to_generate ||
- (types_to_generate && !types_to_generate[0])) &&
- !dna_is_multitype_range(config_entry)) {
- Slapi_Attr *attr = NULL;
- if (slapi_entry_attr_find(resulting_e, config_entry->types[0], &attr) != 0) {
- slapi_ch_array_add(&types_to_generate,
- slapi_ch_strdup(config_entry->types[0]));
+ (types_to_generate && !types_to_generate[0])) && !dna_is_multitype_range(config_entry)) {
+ if (slapi_entry_attr_find(e, config_entry->types[0], &attr) != 0) {
+ slapi_ch_array_add(&types_to_generate, slapi_ch_strdup(config_entry->types[0]));
}
}
if (types_to_generate && types_to_generate[0]) {
- char *new_value;
- int len;
-
/* create the value to add */
ret = dna_get_next_value(config_entry, &value);
if (DNA_SUCCESS != ret) {
@@ -3116,25 +3439,23 @@ static int dna_pre_op(Slapi_PBlock * pb, int modtype)
strcat(new_value, value);
} else
strcpy(new_value, value);
-
/* do the mod */
if (LDAP_CHANGETYPE_ADD == modtype) {
/* add - add to entry */
for (i = 0; types_to_generate && types_to_generate[i]; i++) {
- slapi_entry_attr_set_charptr(e,
- types_to_generate[i],
- new_value);
+ slapi_entry_attr_set_charptr(e, types_to_generate[i], new_value);
}
} else {
/* mod - add to mods */
for (i = 0; types_to_generate && types_to_generate[i]; i++) {
- slapi_mods_add_string(smods,
- LDAP_MOD_REPLACE,
- types_to_generate[i], new_value);
+ slapi_mods_add_string(smods, LDAP_MOD_REPLACE, types_to_generate[i], new_value);
+
+ /* we need to directly update the entry in be_txn_preop */
+ slapi_entry_attr_set_charptr(e, types_to_generate[i], new_value);
}
}
-
- /* Make sure we don't generate for this
+ /*
+ * Make sure we don't generate for this
* type again by keeping a list of types
* we have generated for already.
*/
@@ -3166,10 +3487,10 @@ static int dna_pre_op(Slapi_PBlock * pb, int modtype)
list = PR_NEXT_LINK(list);
}
}
-
dna_unlock();
- bailmod:
+bail:
+
if (LDAP_CHANGETYPE_MODIFY == modtype) {
/* Put the updated mods back into place. */
mods = slapi_mods_get_ldapmods_passout(smods);
@@ -3177,37 +3498,20 @@ static int dna_pre_op(Slapi_PBlock * pb, int modtype)
slapi_mods_free(&smods);
}
- bail:
slapi_ch_array_free(generated_types);
- if (free_entry && e)
- slapi_entry_free(e);
-
- if (resulting_e)
- slapi_entry_free(resulting_e);
-
if (ret) {
slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM,
- "dna_pre_op: operation failure [%d]\n", ret);
+ "dna_be_txn_pre_op: operation failure [%d]\n", ret);
slapi_send_ldap_result(pb, ret, NULL, errstr, 0, NULL);
slapi_ch_free((void **)&errstr);
ret = DNA_FAILURE;
}
slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM,
- "<-- dna_pre_op\n");
-
- return ret;
-}
+ "<-- dna_be_txn_pre_op\n");
-static int dna_add_pre_op(Slapi_PBlock * pb)
-{
- return dna_pre_op(pb, LDAP_CHANGETYPE_ADD);
-}
-
-static int dna_mod_pre_op(Slapi_PBlock * pb)
-{
- return dna_pre_op(pb, LDAP_CHANGETYPE_MODIFY);
+ return ret;
}
static int dna_config_check_post_op(Slapi_PBlock * pb)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
index 8cecadaab..5dbd89a7b 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
@@ -84,6 +84,7 @@ ldbm_back_add( Slapi_PBlock *pb )
struct backentry *tombstoneentry = NULL;
struct backentry *addingentry = NULL;
struct backentry *parententry = NULL;
+ struct backentry *originalentry = NULL;
ID pid;
int isroot;
char *errbuf= NULL;
@@ -661,6 +662,10 @@ ldbm_back_add( Slapi_PBlock *pb )
}
}
+ if ( (originalentry = backentry_dup(addingentry )) == NULL ) {
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
/*
* So, we believe that no code up till here actually added anything
* to persistent store. From now on, we're transacted
@@ -672,6 +677,15 @@ ldbm_back_add( Slapi_PBlock *pb )
/* txn is no longer valid - reset slapi_txn to the parent */
txn.back_txn_txn = NULL;
slapi_pblock_set(pb, SLAPI_TXN, parent_txn);
+
+ backentry_free(&addingentry);
+ slapi_pblock_set( pb, SLAPI_ADD_ENTRY, originalentry->ep_entry );
+ addingentry = originalentry;
+ if ( (originalentry = backentry_dup( addingentry )) == NULL ) {
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
+
/* We're re-trying */
LDAPDebug( LDAP_DEBUG_TRACE, "Add Retrying Transaction\n", 0, 0, 0 );
#ifndef LDBM_NO_BACKOFF_DELAY
@@ -1030,6 +1044,7 @@ common_return:
{
slapi_send_ldap_result( pb, ldap_result_code, ldap_result_matcheddn, ldap_result_message, 0, NULL );
}
+ backentry_free(&originalentry);
slapi_sdn_done(&parentsdn);
slapi_ch_free( (void**)&ldap_result_matcheddn );
slapi_ch_free( (void**)&errbuf );
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
index 26255061b..77165fcf3 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
@@ -60,6 +60,7 @@ ldbm_back_delete( Slapi_PBlock *pb )
struct ldbminfo *li = NULL;
struct backentry *e = NULL;
struct backentry *tombstone = NULL;
+ struct backentry *original_entry = NULL;
char *dn = NULL;
back_txn txn;
back_txnid parent_txn;
@@ -453,6 +454,11 @@ ldbm_back_delete( Slapi_PBlock *pb )
}
}
+ if ( (original_entry = backentry_dup( e )) == NULL ) {
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
+
/*
* So, we believe that no code up till here actually added anything
* to the persistent store. From now on, we're transacted
@@ -462,6 +468,14 @@ ldbm_back_delete( Slapi_PBlock *pb )
for (retry_count = 0; retry_count < RETRY_TIMES; retry_count++) {
if (txn.back_txn_txn && (txn.back_txn_txn != parent_txn)) {
dblayer_txn_abort(li,&txn);
+
+ backentry_free(&e);
+ slapi_pblock_set( pb, SLAPI_DELETE_EXISTING_ENTRY, original_entry->ep_entry );
+ e = original_entry;
+ if ( (original_entry = backentry_dup( e )) == NULL ) {
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
/* We're re-trying */
LDAPDebug( LDAP_DEBUG_TRACE, "Delete Retrying Transaction\n", 0, 0, 0 );
#ifndef LDBM_NO_BACKOFF_DELAY
@@ -1057,6 +1071,7 @@ diskfull_return:
slapi_pblock_set(pb, SLAPI_URP_NAMING_COLLISION_DN, slapi_ch_strdup (dn));
}
done_with_pblock_entry(pb, SLAPI_DELETE_EXISTING_ENTRY);
+ backentry_free(&original_entry);
slapi_ch_free((void**)&errbuf);
slapi_sdn_done(&sdn);
slapi_ch_free_string(&e_uniqueid);
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
index 7d16d0cda..b74e7188d 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
@@ -193,9 +193,10 @@ ldbm_back_modify( Slapi_PBlock *pb )
backend *be;
ldbm_instance *inst = NULL;
struct ldbminfo *li;
- struct backentry *e = NULL, *ec = NULL;
+ struct backentry *e = NULL, *ec = NULL, *original_entry = NULL;
Slapi_Entry *postentry = NULL;
LDAPMod **mods;
+ LDAPMod **mods_original;
Slapi_Mods smods = {0};
back_txn txn;
back_txnid parent_txn;
@@ -411,6 +412,17 @@ ldbm_back_modify( Slapi_PBlock *pb )
}
}
+ /*
+ * Grab a copy of the mods and the entry in case the be_txn_preop changes
+ * the them. If we have a failure, then we need to reset the mods to their
+ * their original state;
+ */
+ mods_original = copy_mods(mods);
+ if ( (original_entry = backentry_dup( e )) == NULL ) {
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
+
txn.back_txn_txn = NULL; /* ready to create the child transaction */
for (retry_count = 0; retry_count < RETRY_TIMES; retry_count++) {
@@ -419,6 +431,22 @@ ldbm_back_modify( Slapi_PBlock *pb )
/* txn is no longer valid - reset slapi_txn to the parent */
txn.back_txn_txn = NULL;
slapi_pblock_set(pb, SLAPI_TXN, parent_txn);
+ /*
+ * Since be_txn_preop functions could have modified the entry/mods,
+ * We need to grab the current mods, free them, and restore the
+ * originals. Same thing for the entry.
+ */
+ slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods);
+ ldap_mods_free(mods, 1);
+ slapi_pblock_set(pb, SLAPI_MODIFY_MODS, copy_mods(mods_original));
+ backentry_free(&ec);
+ slapi_pblock_set( pb, SLAPI_MODIFY_EXISTING_ENTRY, original_entry->ep_entry );
+ ec = original_entry;
+ if ( (original_entry = backentry_dup( e )) == NULL ) {
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
+
LDAPDebug( LDAP_DEBUG_TRACE, "Modify Retrying Transaction\n", 0, 0, 0 );
#ifndef LDBM_NO_BACKOFF_DELAY
{
@@ -449,6 +477,9 @@ ldbm_back_modify( Slapi_PBlock *pb )
goto error_return;
}
+ /* the mods might have been changed, so get the latest */
+ slapi_pblock_get( pb, SLAPI_MODIFY_MODS, &mods );
+
/*
* Update the ID to Entry index.
* Note that id2entry_add replaces the entry, so the Entry ID stays the same.
@@ -653,8 +684,12 @@ common_return:
{
slapi_send_ldap_result( pb, ldap_result_code, NULL, ldap_result_message, 0, NULL );
}
-
+
+ /* free our backups */
+ ldap_mods_free(mods_original, 1);
+ backentry_free(&original_entry);
slapi_ch_free_string(&errbuf);
+
return rc;
}
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
index 2a974eab4..796b421c7 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
@@ -82,6 +82,9 @@ ldbm_back_modrdn( Slapi_PBlock *pb )
struct backentry *parententry= NULL;
struct backentry *newparententry= NULL;
struct backentry *existingentry= NULL;
+ struct backentry *original_entry = NULL;
+ struct backentry *original_parent = NULL;
+ struct backentry *original_newparent = NULL;
modify_context parent_modify_context = {0};
modify_context newparent_modify_context = {0};
modify_context ruv_c = {0};
@@ -95,6 +98,9 @@ ldbm_back_modrdn( Slapi_PBlock *pb )
Slapi_DN dn_newrdn;
Slapi_DN *dn_newsuperiordn = NULL;
Slapi_DN dn_parentdn;
+ Slapi_DN *orig_dn_newsuperiordn = NULL;
+ Slapi_Entry *target_entry = NULL;
+ Slapi_Entry *original_targetentry = NULL;
int rc;
int isroot;
LDAPMod **mods;
@@ -110,7 +116,10 @@ ldbm_back_modrdn( Slapi_PBlock *pb )
entry_address oldparent_addr;
entry_address *newsuperior_addr;
char ebuf[BUFSIZ];
+ char *original_newrdn = NULL;
CSN *opcsn = NULL;
+ const char *newdn = NULL;
+ char *newrdn = NULL;
/* sdn & parentsdn need to be initialized before "goto *_return" */
slapi_sdn_init(&dn_newdn);
@@ -212,9 +221,6 @@ ldbm_back_modrdn( Slapi_PBlock *pb )
/* <new rdn>,<new superior> */
if(slapi_isbitset_int(rc,SLAPI_RTN_BIT_FETCH_EXISTING_DN_ENTRY))
{
- const char *newdn = NULL;
- char * newrdn = NULL;
-
/* see if an entry with the new name already exists */
done_with_pblock_entry(pb,SLAPI_MODRDN_EXISTING_ENTRY); /* Could be through this multiple times */
slapi_pblock_get(pb, SLAPI_MODRDN_NEWRDN, &newrdn);
@@ -694,6 +700,33 @@ ldbm_back_modrdn( Slapi_PBlock *pb )
}
}
+ /*
+ * make copies of the originals, no need to copy the mods because
+ * we have already copied them
+ */
+ if ( (original_entry = backentry_dup( ec )) == NULL ) {
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
+ if ( (original_parent = backentry_dup( parententry )) == NULL ) {
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
+ if ( (original_newparent = backentry_dup( newparententry )) == NULL ) {
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
+ slapi_pblock_get(pb, SLAPI_MODRDN_TARGET_ENTRY, &target_entry);
+ if ( (original_targetentry = slapi_entry_dup(target_entry)) == NULL ) {
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
+
+ slapi_pblock_get(pb, SLAPI_MODRDN_NEWRDN, &newrdn);
+ original_newrdn = slapi_ch_strdup(newrdn);
+ slapi_pblock_get(pb, SLAPI_MODRDN_NEWSUPERIOR_SDN, &dn_newsuperiordn);
+ orig_dn_newsuperiordn = slapi_sdn_dup(dn_newsuperiordn);
+
/*
* So, we believe that no code up till here actually added anything
* to persistent store. From now on, we're transacted
@@ -707,6 +740,48 @@ ldbm_back_modrdn( Slapi_PBlock *pb )
/* txn is no longer valid - reset slapi_txn to the parent */
txn.back_txn_txn = NULL;
slapi_pblock_set(pb, SLAPI_TXN, parent_txn);
+
+ slapi_pblock_get(pb, SLAPI_MODRDN_NEWRDN, &newrdn);
+ slapi_ch_free_string(&newrdn);
+ slapi_pblock_set(pb, SLAPI_MODRDN_NEWRDN, original_newrdn);
+ original_newrdn = slapi_ch_strdup(original_newrdn);
+
+ slapi_pblock_get(pb, SLAPI_MODRDN_NEWSUPERIOR_SDN, &dn_newsuperiordn);
+ slapi_sdn_free(&dn_newsuperiordn);
+ slapi_pblock_set(pb, SLAPI_MODRDN_NEWSUPERIOR_SDN, orig_dn_newsuperiordn);
+ orig_dn_newsuperiordn = slapi_sdn_dup(orig_dn_newsuperiordn);
+
+ backentry_free(&ec);
+ slapi_pblock_set( pb, SLAPI_MODRDN_EXISTING_ENTRY, original_entry->ep_entry );
+ ec = original_entry;
+ if ( (original_entry = backentry_dup( ec )) == NULL ) {
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
+
+ slapi_pblock_get(pb, SLAPI_MODRDN_TARGET_ENTRY, &target_entry );
+ slapi_entry_free(target_entry);
+ slapi_pblock_set( pb, SLAPI_MODRDN_TARGET_ENTRY, original_targetentry );
+ if ( (original_targetentry = slapi_entry_dup( original_targetentry )) == NULL ) {
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
+
+ backentry_free(&parententry);
+ slapi_pblock_set( pb, SLAPI_MODRDN_PARENT_ENTRY, original_parent->ep_entry );
+ parententry = original_parent;
+ if ( (original_entry = backentry_dup( parententry )) == NULL ) {
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
+
+ backentry_free(&newparententry);
+ slapi_pblock_set( pb, SLAPI_MODRDN_NEWPARENT_ENTRY, original_newparent->ep_entry );
+ newparententry = original_entry;
+ if ( (original_entry = backentry_dup( newparententry )) == NULL ) {
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
/* We're re-trying */
LDAPDebug( LDAP_DEBUG_TRACE, "Modrdn Retrying Transaction\n", 0, 0, 0 );
}
@@ -1124,6 +1199,13 @@ common_return:
done_with_pblock_entry(pb,SLAPI_MODRDN_PARENT_ENTRY);
done_with_pblock_entry(pb,SLAPI_MODRDN_NEWPARENT_ENTRY);
done_with_pblock_entry(pb,SLAPI_MODRDN_TARGET_ENTRY);
+ slapi_ch_free_string(&original_newrdn);
+ slapi_sdn_free(&orig_dn_newsuperiordn);
+ backentry_free(&original_entry);
+ backentry_free(&original_parent);
+ backentry_free(&original_newparent);
+ slapi_entry_free(original_targetentry);
+
if(dblock_acquired)
{
dblayer_unlock_backend(be);
diff --git a/ldap/servers/slapd/operation.c b/ldap/servers/slapd/operation.c
index f5b16275e..1a7599bad 100644
--- a/ldap/servers/slapd/operation.c
+++ b/ldap/servers/slapd/operation.c
@@ -374,7 +374,7 @@ struct slapi_operation_parameters *operation_parameters_new()
return (slapi_operation_parameters *)slapi_ch_calloc (1, sizeof (slapi_operation_parameters));
}
-static LDAPMod **
+LDAPMod **
copy_mods(LDAPMod **orig_mods)
{
LDAPMod **new_mods = NULL;
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index 2ef428807..7d4e94470 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -436,6 +436,7 @@ void operation_set_flag(Slapi_Operation *op,int flag);
void operation_clear_flag(Slapi_Operation *op,int flag);
int operation_is_flag_set(Slapi_Operation *op,int flag);
unsigned long operation_get_type(Slapi_Operation *op);
+LDAPMod **copy_mods(LDAPMod **orig_mods);
/*
* From ldap.h
| 0 |
0d4b820b0156efd5ca0888897724bf8394bb2f3d
|
389ds/389-ds-base
|
Issue 4169 - UI - Fix retrochangelog and schema Typeaheads (#5837)
Description: During PF4 Migration, a few typeaheads got broken.
Fix retroChangelog and schema typeahead selects.
Fix style errors (thanks, Mark!)
Related: https://github.com/389ds/389-ds-base/issues/4169
Reviewed by: @mreynolds389 (Thanks!)
|
commit 0d4b820b0156efd5ca0888897724bf8394bb2f3d
Author: Simon Pichugin <[email protected]>
Date: Thu Jul 13 17:55:09 2023 -0700
Issue 4169 - UI - Fix retrochangelog and schema Typeaheads (#5837)
Description: During PF4 Migration, a few typeaheads got broken.
Fix retroChangelog and schema typeahead selects.
Fix style errors (thanks, Mark!)
Related: https://github.com/389ds/389-ds-base/issues/4169
Reviewed by: @mreynolds389 (Thanks!)
diff --git a/src/cockpit/389-console/src/css/ds.css b/src/cockpit/389-console/src/css/ds.css
index f1abbdc17..9cd669569 100644
--- a/src/cockpit/389-console/src/css/ds.css
+++ b/src/cockpit/389-console/src/css/ds.css
@@ -425,7 +425,7 @@ textarea {
margin-left: 80px;
}
-@media screen and (max-width: 1300px) {
+@media screen and (max-width >= 1300px) {
.ds-plugin-spinner {
margin-left: 0;
}
@@ -453,7 +453,7 @@ textarea {
top: -16px;
}
-@media screen and (max-width: 770px) {
+@media screen and (max-width >= 770px) {
.ds-plugin-tab-header {
margin-top: 0;
margin-bottom: 0;
diff --git a/src/cockpit/389-console/src/lib/plugins/retroChangelog.jsx b/src/cockpit/389-console/src/lib/plugins/retroChangelog.jsx
index 22e5022f3..c0027ca89 100644
--- a/src/cockpit/389-console/src/lib/plugins/retroChangelog.jsx
+++ b/src/cockpit/389-console/src/lib/plugins/retroChangelog.jsx
@@ -46,6 +46,7 @@ class RetroChangelog extends React.Component {
maxAgeUnit: "w",
trimInterval: 300,
excludeSuffix: [],
+ excludeSuffixOptions: [],
excludeAttrs: [],
// original values
_isReplicated: false,
@@ -152,10 +153,10 @@ class RetroChangelog extends React.Component {
isExcludeSuffixOpen: false
}, () => { this.validate() });
};
- this.onExcludeSuffixCreateOption = newValue => {
- if (!this.state.excludeSuffix.includes(newValue)) {
+ this.handleOnExcludeSuffixCreateOption = newValue => {
+ if (!this.state.excludeSuffixOptions.includes(newValue)) {
this.setState({
- excludeSuffix: [...this.state.excludeSuffix, newValue],
+ excludeSuffixOptions: [...this.state.excludeSuffixOptions, newValue],
isExcludeSuffixOpen: false
}, () => { this.validate() });
}
@@ -388,7 +389,7 @@ class RetroChangelog extends React.Component {
placeholderText="Type a suffix..."
noResultsFoundText="There are no matching entries"
isCreatable
- onCreateOption={this.handleSubtreeScopeCreateOption}
+ onCreateOption={this.handleOnExcludeSuffixCreateOption}
validated={error.excludeSuffix ? ValidatedOptions.error : ValidatedOptions.default}
>
{[""].map((attr, index) => (
diff --git a/src/cockpit/389-console/src/schema.jsx b/src/cockpit/389-console/src/schema.jsx
index 7989c4ad7..cc79f3716 100644
--- a/src/cockpit/389-console/src/schema.jsx
+++ b/src/cockpit/389-console/src/schema.jsx
@@ -223,7 +223,8 @@ export class Schema extends React.Component {
this.handleAliasNameCreateOption = newValue => {
if (!this.state.atAliasOptions.includes(newValue)) {
this.setState({
- atAliasOptions: [...this.state.atAliasOptions, { value: newValue }]
+ atAliasOptions: [...this.state.atAliasOptions, { value: newValue }],
+ isAliasNameOpen: false
});
}
};
@@ -291,7 +292,8 @@ export class Schema extends React.Component {
this.handleRequiredAttrsCreateOption = newValue => {
if (!this.state.ocMustOptions.includes(newValue)) {
this.setState({
- ocMustOptions: [...this.state.ocParentocMustOptionsOptions, { value: newValue }]
+ ocMustOptions: [...this.state.ocMustOptions, { value: newValue }],
+ isRequiredAttrsOpen: false
});
}
};
@@ -329,7 +331,8 @@ export class Schema extends React.Component {
this.handleAllowedAttrsCreateOption = newValue => {
if (!this.state.ocMayOptions.includes(newValue)) {
this.setState({
- ocMayOptions: [...this.state.ocMayOptions, { value: newValue }]
+ ocMayOptions: [...this.state.ocMayOptions, { value: newValue }],
+ isAllowedAttrsOpen: false
});
}
};
| 0 |
758a68db83c653187203d683c9d9a71742314be5
|
389ds/389-ds-base
|
Ticket #321 - krbExtraData is being null modified and replicated on each ssh login
Bug Description: Crash during IPA install
Fix Description: Have to free the mod before moving the unremoved mods
down the list on top of the freed mod.
Reviewed by: mreynolds (Thanks!)
|
commit 758a68db83c653187203d683c9d9a71742314be5
Author: Rich Megginson <[email protected]>
Date: Mon May 21 09:08:04 2012 -0600
Ticket #321 - krbExtraData is being null modified and replicated on each ssh login
Bug Description: Crash during IPA install
Fix Description: Have to free the mod before moving the unremoved mods
down the list on top of the freed mod.
Reviewed by: mreynolds (Thanks!)
diff --git a/ldap/servers/plugins/replication/repl5_protocol_util.c b/ldap/servers/plugins/replication/repl5_protocol_util.c
index 332740581..30d211a79 100644
--- a/ldap/servers/plugins/replication/repl5_protocol_util.c
+++ b/ldap/servers/plugins/replication/repl5_protocol_util.c
@@ -707,6 +707,11 @@ repl5_strip_fractional_mods(Repl_Agmt *agmt, LDAPMod ** mods)
*/
if (0 == slapi_attr_type_cmp(mods[j]->mod_type, a[i], SLAPI_TYPE_CMP_SUBTYPE))
{
+ /* Adjust value of j, implicit in not incrementing it */
+ /* Free this mod */
+ ber_bvecfree(mods[j]->mod_bvalues);
+ slapi_ch_free((void **)&(mods[j]->mod_type));
+ slapi_ch_free((void **)&mods[j]);
/* Move down all subsequent mods */
for (k = j; mods[k+1] ; k++)
{
@@ -714,11 +719,6 @@ repl5_strip_fractional_mods(Repl_Agmt *agmt, LDAPMod ** mods)
}
/* Zero the end of the array */
mods[k] = NULL;
- /* Adjust value of j, implicit in not incrementing it */
- /* Free this mod */
- ber_bvecfree(mods[j]->mod_bvalues);
- slapi_ch_free((void **)&(mods[j]->mod_type));
- slapi_ch_free((void **)&mods[j]);
} else {
j++;
}
| 0 |
bf54018e8ee4b355c66621ad3bfe5e59d4820170
|
389ds/389-ds-base
|
Ticket #491 - multimaster_extop_cleanruv returns wrong error codes
https://fedorahosted.org/389/ticket/491
Reviewed by: nhosoi (Thanks!)
Branch: master
Fix Description: multimaster_extop_cleanruv must return with a code of
SLAPI_PLUGIN_EXTENDED_SENT_RESULT to tell the server that the result
has been sent - otherwise, in 1.2.10, the server will attempt to send
the result again. In 1.2.11 the result code has been changed to ignore
a subsequent attempt to send a result for the same operation, but the
function should still return the correct codes.
I also cleaned up the error codes and memory management a bit.
Platforms tested: RHEL6 x86_64, Fedora 16
Flag Day: no
Doc impact: no
|
commit bf54018e8ee4b355c66621ad3bfe5e59d4820170
Author: Rich Megginson <[email protected]>
Date: Thu Oct 11 11:57:24 2012 -0600
Ticket #491 - multimaster_extop_cleanruv returns wrong error codes
https://fedorahosted.org/389/ticket/491
Reviewed by: nhosoi (Thanks!)
Branch: master
Fix Description: multimaster_extop_cleanruv must return with a code of
SLAPI_PLUGIN_EXTENDED_SENT_RESULT to tell the server that the result
has been sent - otherwise, in 1.2.10, the server will attempt to send
the result again. In 1.2.11 the result code has been changed to ignore
a subsequent attempt to send a result for the same operation, but the
function should still return the correct codes.
I also cleaned up the error codes and memory management a bit.
Platforms tested: RHEL6 x86_64, Fedora 16
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
index c94ef8f72..5aac69954 100644
--- a/ldap/servers/plugins/replication/repl_extop.c
+++ b/ldap/servers/plugins/replication/repl_extop.c
@@ -1446,19 +1446,20 @@ free_and_return:
int
multimaster_extop_abort_cleanruv(Slapi_PBlock *pb)
{
- multimaster_mtnode_extension *mtnode_ext;
+ multimaster_mtnode_extension *mtnode_ext = NULL;
+ int release_it = 0;
PRThread *thread = NULL;
cleanruv_data *data;
Replica *r;
ReplicaId rid;
- CSN *maxcsn;
- struct berval *extop_payload;
+ CSN *maxcsn = NULL;
+ struct berval *extop_payload = NULL;
char *extop_oid;
char *repl_root;
char *payload = NULL;
char *certify_all;
char *iter;
- int rc = 0;
+ int rc = LDAP_SUCCESS;
slapi_pblock_get(pb, SLAPI_EXT_OP_REQ_OID, &extop_oid);
slapi_pblock_get(pb, SLAPI_EXT_OP_REQ_VALUE, &extop_payload);
@@ -1496,6 +1497,7 @@ multimaster_extop_abort_cleanruv(Slapi_PBlock *pb)
}
if (mtnode_ext->replica){
object_acquire (mtnode_ext->replica);
+ release_it = 1;
} else {
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Abort cleanAllRUV task: replica is missing from (%s), "
"aborting operation\n",repl_root);
@@ -1519,6 +1521,7 @@ multimaster_extop_abort_cleanruv(Slapi_PBlock *pb)
goto out;
}
data->repl_obj = mtnode_ext->replica; /* released in replica_abort_task_thread() */
+ release_it = 0; /* thread owns it now */
data->replica = r;
data->task = NULL;
data->payload = slapi_ch_bvdup(extop_payload);
@@ -1540,17 +1543,20 @@ multimaster_extop_abort_cleanruv(Slapi_PBlock *pb)
(void *)data, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
PR_UNJOINABLE_THREAD, SLAPD_DEFAULT_THREAD_STACKSIZE);
if (thread == NULL) {
- if(mtnode_ext->replica){
- object_release(mtnode_ext->replica);
- }
slapi_log_error( SLAPI_LOG_REPL, repl_plugin_name, "Abort cleanAllRUV task: unable to create abort "
"thread. Aborting task.\n");
+ release_it = 1; /* have to release mtnode_ext->replica now */
slapi_ch_free_string(&data->repl_root);
slapi_ch_free_string(&data->certify);
+ ber_bvfree(data->payload);
+ slapi_ch_free((void **)&data);
rc = LDAP_OPERATIONS_ERROR;
}
out:
+ if (release_it && mtnode_ext && mtnode_ext->replica) {
+ object_release(mtnode_ext->replica);
+ }
slapi_ch_free_string(&payload);
return rc;
@@ -1570,7 +1576,7 @@ out:
int
multimaster_extop_cleanruv(Slapi_PBlock *pb)
{
- multimaster_mtnode_extension *mtnode_ext;
+ multimaster_mtnode_extension *mtnode_ext = NULL;
PRThread *thread = NULL;
Replica *r = NULL;
cleanruv_data *data = NULL;
@@ -1585,7 +1591,7 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb)
char *iter;
int release_it = 0;
int rid = 0;
- int rc = 0;
+ int rc = LDAP_OPERATIONS_ERROR;
slapi_pblock_get(pb, SLAPI_EXT_OP_REQ_OID, &extop_oid);
slapi_pblock_get(pb, SLAPI_EXT_OP_REQ_VALUE, &extop_payload);
@@ -1593,7 +1599,6 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb)
if (NULL == extop_oid || strcmp(extop_oid, REPL_CLEANRUV_OID) != 0 ||
NULL == extop_payload || NULL == extop_payload->bv_val){
/* something is wrong, error out */
- rc = -1;
goto free_and_return;
}
/*
@@ -1601,7 +1606,6 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb)
*/
if(decode_cleanruv_payload(extop_payload, &payload)){
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "cleanAllRUV_task: failed to decode payload. Aborting ext op\n");
- rc = -1;
goto free_and_return;
}
rid = atoi(ldap_utf8strtok_r(payload, ":", &iter));
@@ -1614,7 +1618,7 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb)
*/
if(is_cleaned_rid(rid)){
csn_free(&maxcsn);
- rc = 1;
+ rc = LDAP_SUCCESS;
goto free_and_return;
}
@@ -1624,25 +1628,21 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb)
if((mtnode_ext = replica_config_get_mtnode_by_dn(repl_root)) == NULL){
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "cleanAllRUV_task: failed to get replication node "
"from (%s), aborting operation\n", repl_root);
- rc = -1;
goto free_and_return;
}
if (mtnode_ext->replica){
object_acquire (mtnode_ext->replica);
release_it = 1;
- }
- if (mtnode_ext->replica == NULL){
+ } else {
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "cleanAllRUV_task: replica is missing from (%s), "
"aborting operation\n",repl_root);
- rc = LDAP_OPERATIONS_ERROR;
goto free_and_return;
}
r = (Replica*)object_get_data (mtnode_ext->replica);
if(r == NULL){
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "cleanAllRUV_task: replica is NULL, aborting task\n");
- rc = -1;
goto free_and_return;
}
@@ -1657,7 +1657,6 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb)
if (data == NULL) {
slapi_log_error( SLAPI_LOG_FATAL, repl_plugin_name, "cleanAllRUV_task: failed to allocate "
"cleanruv_Data\n");
- rc = -1;
goto free_and_return;
}
data->repl_obj = mtnode_ext->replica;
@@ -1671,9 +1670,15 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb)
(void *)data, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
PR_UNJOINABLE_THREAD, SLAPD_DEFAULT_THREAD_STACKSIZE);
if (thread == NULL) {
- rc = -1;
slapi_log_error( SLAPI_LOG_FATAL, repl_plugin_name, "cleanAllRUV_task: unable to create cleanAllRUV "
"monitoring thread. Aborting task.\n");
+ ber_bvfree(data->payload);
+ data->payload = NULL;
+ slapi_ch_free((void **)&data);
+ } else {
+ release_it = 0; /* thread will release data->repl_obj == mtnode_ext->replica */
+ maxcsn = NULL; /* thread owns it now */
+ rc = LDAP_SUCCESS;
}
} else { /* this is a read-only consumer */
/*
@@ -1713,24 +1718,20 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb)
/* free everything */
object_release(ruv_obj);
- csn_free(&maxcsn);
- if (mtnode_ext->replica && release_it)
- object_release (mtnode_ext->replica);
/*
* This read-only replica has no easy way to tell when it's safe to release the rid.
* So we won't release it, not until a server restart.
*/
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "cleanAllRUV_task: You must restart the server if you want to reuse rid(%d).\n", rid);
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "cleanAllRUV_task: Successfully cleaned rid(%d).\n", rid);
+ rc = LDAP_SUCCESS;
}
free_and_return:
- if(rc && release_it){
- if (mtnode_ext->replica)
- object_release (mtnode_ext->replica);
+ if(release_it && mtnode_ext && mtnode_ext->replica) {
+ object_release (mtnode_ext->replica);
}
- if(rc)
- csn_free(&maxcsn);
+ csn_free(&maxcsn);
slapi_ch_free_string(&payload);
/*
@@ -1754,6 +1755,10 @@ free_and_return:
{
ber_bvfree(resp_bval);
}
+ /* tell extendop code that we have already sent the result */
+ rc = SLAPI_PLUGIN_EXTENDED_SENT_RESULT;
+ } else {
+ rc = LDAP_OPERATIONS_ERROR;
}
return rc;
| 0 |
16e5ce768bc70a3d30dca139ec8fc9330d071168
|
389ds/389-ds-base
|
Ticket 47654 - Fix regression (deadlock/crash)
Bug Description: dblayer_close() is called for shutdowns, and when the backend
is being disabled for certain tasks like db2bak/bak2db. The
original fix assuemd this fucntion was only called during shutdowns.
Fix Description: Only free certain ersouces when the server is actually shutting
down.
https://fedorahosted.org/389/ticket/47654
Reviewed by: rmeggins(Thanks!)
|
commit 16e5ce768bc70a3d30dca139ec8fc9330d071168
Author: Mark Reynolds <[email protected]>
Date: Wed Jul 9 17:39:38 2014 -0400
Ticket 47654 - Fix regression (deadlock/crash)
Bug Description: dblayer_close() is called for shutdowns, and when the backend
is being disabled for certain tasks like db2bak/bak2db. The
original fix assuemd this fucntion was only called during shutdowns.
Fix Description: Only free certain ersouces when the server is actually shutting
down.
https://fedorahosted.org/389/ticket/47654
Reviewed by: rmeggins(Thanks!)
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index 0fda6d3ba..4f388457c 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -2896,6 +2896,7 @@ int dblayer_post_close(struct ldbminfo *li, int dbmode)
dblayer_private *priv = 0;
int return_value = 0;
dblayer_private_env *pEnv;
+ int shutdown = g_get_shutdown();
PR_ASSERT(NULL != li);
priv = (dblayer_private*)li->li_dblayer_private;
@@ -2928,14 +2929,17 @@ int dblayer_post_close(struct ldbminfo *li, int dbmode)
charray_free(priv->dblayer_data_directories);
priv->dblayer_data_directories = NULL;
}
- slapi_ch_free_string(&priv->dblayer_dbhome_directory);
- slapi_ch_free_string(&priv->dblayer_home_directory);
+ if(shutdown){
+ slapi_ch_free_string(&priv->dblayer_dbhome_directory);
+ slapi_ch_free_string(&priv->dblayer_home_directory);
+ }
return return_value;
}
/*
- * This function is called when the server is shutting down.
+ * This function is called when the server is shutting down, or when the
+ * backend is being disabled (e.g. backup/restore).
* This is not safe to call while other threads are calling into the open
* databases !!! So: DON'T !
*/
@@ -2945,6 +2949,7 @@ int dblayer_close(struct ldbminfo *li, int dbmode)
ldbm_instance *inst;
Object *inst_obj;
int return_value = 0;
+ int shutdown = g_get_shutdown();
dblayer_pre_close(li);
@@ -2957,7 +2962,9 @@ int dblayer_close(struct ldbminfo *li, int dbmode)
for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
inst = (ldbm_instance *)object_get_data(inst_obj);
- vlv_close(inst);
+ if(shutdown){
+ vlv_close(inst);
+ }
be = inst->inst_be;
if (NULL != be->be_instance_info) {
return_value |= dblayer_instance_close(be);
| 0 |
b43ec69c8656a28573b30abef4b8f3a44a8caedc
|
389ds/389-ds-base
|
Issue 50926 - Remove dual spinner and other UI fixes
Description: Removed unnecessary spinner when loading the backup table.
Removed duplicate ID's from html
Revised Create Instance modal to be easier to read
Fixed typo in posix winsync fixup task help description
Removed unused CSS styles
Fixed plugin modal alignment issues
Fixed typo in Enable Replication modal
relates: https://pagure.io/389-ds-base/issue/50926
Reviewed by: spichugi(Thanks!)
|
commit b43ec69c8656a28573b30abef4b8f3a44a8caedc
Author: Mark Reynolds <[email protected]>
Date: Tue Mar 3 16:08:57 2020 -0500
Issue 50926 - Remove dual spinner and other UI fixes
Description: Removed unnecessary spinner when loading the backup table.
Removed duplicate ID's from html
Revised Create Instance modal to be easier to read
Fixed typo in posix winsync fixup task help description
Removed unused CSS styles
Fixed plugin modal alignment issues
Fixed typo in Enable Replication modal
relates: https://pagure.io/389-ds-base/issue/50926
Reviewed by: spichugi(Thanks!)
diff --git a/src/cockpit/389-console/src/css/ds.css b/src/cockpit/389-console/src/css/ds.css
index 76bf2a9f1..9248116e7 100644
--- a/src/cockpit/389-console/src/css/ds.css
+++ b/src/cockpit/389-console/src/css/ds.css
@@ -23,16 +23,6 @@
text-align: left !important;
}
-/* Main nav page index.html */
-.ds-content {
- padding: 0;
- padding-top: 115px; /* this pushes the content below fixed nav bar */
- padding-bottom: 50px;
- margin-top: 0;
- margin-right: 10px;
- margin-bottom: 10px;
-}
-
.ds-chart-right {
margin-left: 65px;
}
@@ -70,25 +60,6 @@ td {
max-width: 200px !important;
}
-.ds-hr {
- margin-top: 0.5em;
- margin-bottom: 0.5em;
- border-style: inset;
- border-width: 1px;
- padding: 0px !important;
-}
-
-.ds-hr-logs {
- display: block;
- margin-left: 0;
- margin-top: 0.5em;
- margin-bottom: 0.5em;
- border-style: inset;
- border-width: 1px;
- padding: 0px !important;
- width: 525px;
-}
-
.ds-no-padding {
padding: 0px !important;
}
@@ -117,10 +88,6 @@ td {
opacity: 0.7;
}
-.ds-split {
- width: 400px;
-}
-
.ds-input {
margin-top: 5px !important;
padding-right: 5px !important;
@@ -141,20 +108,6 @@ td {
text-align: right;
}
-.ds-pw-input {
- margin-top: 5px;
- padding-right: 5px;
- padding-left: 5px !important;
- min-width: 65px !important;
- max-width: 65px !important;
-}
-
-.ds-history-input {
- margin-top: !important;
- margin-right: 5px;
- margin-left: 40px;
-}
-
.ds-divider {
width: 35px;
}
@@ -180,27 +133,6 @@ td {
text-align: left;
}
-.ds-dblink-form-input {
- width: 415px !important;
- margin-top: 5px;
- padding-right: 10px;
- height: 37px !important;
-}
-
-.ds-repl-table {
- background-color: white !important;
- padding: 0px !important;
- border: 1px solid #909090;
- table-layout: fixed;
- width: 100%;
- clear: both;
- word-wrap: break-word !important;
- text-align: center;
- margin-top: -10px !important;
- line-height: 1;
-}
-
-
.ds-db-table {
border: 1px solid #d1d1d1;
word-wrap: break-word !important;
@@ -270,12 +202,6 @@ td {
margin-right: 5px !important;
}
-.ds-send-expiring-checkbox {
- margin-top: 12px !important;
- margin-right: 12px !important;
- padding: 5px;
-}
-
.ds-operate-spinner {
margin-left: 10px;
top: 10px;
@@ -298,38 +224,11 @@ td {
margin-bottom: 10px !important;
}
-.ds-config-label-lrg {
- margin-top: 10px;
- width: 160px !important;
- margin-bottom: 10px !important;
-}
-
-.ds-config-sub-label {
- margin-top: 10px;
- width: 225px !important;
- margin-bottom: 10px !important;
- padding-left: 20px;
-}
-
.ds-label-sm {
width: 175px !important;
margin-bottom: 10px !important;
}
-.ds-expire-label {
- margin-top: 7px;
- margin-bottom: 7px;
- width: 285px !important;
-}
-
-.ds-minage-label {
- padding: 10px;
-}
-
-.ds-expired-div {
- padding-left: 30px !important;
-}
-
.ds-modal-row {
margin-left: 20px;
margin-right: 0px !important;
@@ -348,17 +247,6 @@ td {
width: 85px;
}
-.ds-oc-form-list {
- width: 232px !important;
- max-height: 350px !important;
- min-height: 350px !important;
-}
-
-.ds-oc-form-list:focus {
- max-height: 350px !important;
- min-height: 350px !important;
-}
-
p {
line-height: 1;
white-space: normal;
@@ -435,11 +323,6 @@ option {
background-color: #f3f3f3;
}
-.ds-nav-tab a {
- text-align: center;
- min-width: 125px !important;
-}
-
.ds-float-right {
float: right;
}
@@ -459,29 +342,6 @@ option {
Width: 18px !important;
}
-.ds-footer {
- background-color: #f5f5f5 !important;
- margin-left: -25px;
- padding: 10px;
- position: fixed;
- bottom: 0;
- width: 100%;
- height: 50px;
- border-top: 1px solid #e2e2e2 !important;
-}
-
-.ds-nav-bar {
- position: fixed;
- top: 0;
- width: 100%;
- background-color: white;
- z-index: 1;
-}
-
-.ds-nav-item a {
- text-align: left;
-}
-
.ds-config-header {
margin-bottom: 20px;
}
@@ -555,38 +415,6 @@ option {
overflow-y:auto;
}
-/* wizard accordions are narrower */
-.ds-wiz-accordion {
- margin-top: 20px;
- color: #228bc0 !important;
- background-color: white;
- border: 0;
- position: relative;
- overflow: hidden;
- width: 500px;
- text-align: left;
-}
-
-.ds-wiz-accordion:after {
- display: inline-block;
- content: "";
- height: 1px;
- background: #228bc0;
- position: absolute;
- width: 500px;
- top: 50% !important;
- margin-left: 10px;
- text-align: left;
-}
-
-.ds-wiz-accordion:focus {
- outline: none !important;
- border: 0 !important;
- -moz-outline: none !important;
- -webkit-box-shadow: none !important;
- box-shadow: 0 !important;
-}
-
.ds-indent {
margin-left: 15px !important;
margin-right: 15px !important;
@@ -678,10 +506,6 @@ option {
transform: translate(-25%, -35%);
}
-.ds-popup {
- min-width: 350px !important;
-}
-
.ds-input-auto {
width: 100%;
margin-right: 10px;
diff --git a/src/cockpit/389-console/src/ds.jsx b/src/cockpit/389-console/src/ds.jsx
index 439a85009..c003d1690 100644
--- a/src/cockpit/389-console/src/ds.jsx
+++ b/src/cockpit/389-console/src/ds.jsx
@@ -120,13 +120,6 @@ export class DSInstance extends React.Component {
state: "success"
}
}));
- } else {
- this.setState(prevState => ({
- pageLoadingState: {
- ...prevState.pageLoadingState,
- state: "loading"
- }
- }));
}
}
);
@@ -311,9 +304,6 @@ export class DSInstance extends React.Component {
}
loadBackups() {
- this.setState({
- loadingOperate: true
- });
const cmd = ["dsctl", "-j", this.state.serverId, "backups"];
log_cmd("loadBackupsDSInstance", "Load Backups", cmd);
cockpit.spawn(cmd, { superuser: true, err: "message" }).done(content => {
@@ -324,7 +314,6 @@ export class DSInstance extends React.Component {
}
this.setState({
backupRows: rows,
- loadingOperate: false
});
});
}
@@ -490,7 +479,6 @@ export class DSInstance extends React.Component {
} = this.state;
let mainContent = "";
-
if (pageLoadingState.state === "loading") {
mainContent = (
<div id="loading-instances" className="all-pages ds-center">
@@ -760,7 +748,7 @@ class CreateInstanceModal extends React.Component {
createDBSuffix: "",
createDBName: "",
createTLSCert: true,
- createInitDB: "",
+ createInitDB: "noInit",
loadingCreate: false
};
@@ -1088,19 +1076,19 @@ class CreateInstanceModal extends React.Component {
>
<Icon type="pf" name="close" />
</button>
- <Modal.Title>Create New Server Instance</Modal.Title>
+ <Modal.Title className="ds-center">Create New Server Instance</Modal.Title>
</Modal.Header>
<Modal.Body>
<Form horizontal>
<FormGroup controlId="createServerId">
<Col
componentClass={ControlLabel}
- sm={4}
+ sm={5}
title="The instance name, this is what gets appended to 'slapi-'. The instance name can only contain letters, numbers, and: # % : - _"
>
Instance Name
</Col>
- <Col sm={8}>
+ <Col sm={7}>
<FormControl
id="createServerId"
type="text"
@@ -1113,12 +1101,12 @@ class CreateInstanceModal extends React.Component {
<FormGroup controlId="createPort">
<Col
componentClass={ControlLabel}
- sm={4}
+ sm={5}
title="The server port number"
>
Port
</Col>
- <Col sm={8}>
+ <Col sm={7}>
<FormControl
type="number"
min="0"
@@ -1131,12 +1119,12 @@ class CreateInstanceModal extends React.Component {
<FormGroup controlId="createSecurePort">
<Col
componentClass={ControlLabel}
- sm={4}
+ sm={5}
title="The secure port number for TLS connections"
>
Secure Port
</Col>
- <Col sm={8}>
+ <Col sm={7}>
<FormControl
type="number"
min="0"
@@ -1149,12 +1137,12 @@ class CreateInstanceModal extends React.Component {
<FormGroup controlId="createTLSCert">
<Col
componentClass={ControlLabel}
- sm={4}
+ sm={5}
title="Create a self-signed certificate database"
>
- Create Self-Signed TLS Certificate DB
+ Create Self-Signed TLS Certificate
</Col>
- <Col sm={8}>
+ <Col sm={7}>
<Checkbox
id="createTLSCert"
checked={createTLSCert}
@@ -1165,12 +1153,12 @@ class CreateInstanceModal extends React.Component {
<FormGroup controlId="createDM">
<Col
componentClass={ControlLabel}
- sm={4}
+ sm={5}
title="The DN for the unrestricted user"
>
Directory Manager DN
</Col>
- <Col sm={8}>
+ <Col sm={7}>
<FormControl
type="text"
id="createDM"
@@ -1182,12 +1170,12 @@ class CreateInstanceModal extends React.Component {
<FormGroup controlId="createDMPassword">
<Col
componentClass={ControlLabel}
- sm={4}
+ sm={5}
title="Directory Manager password."
>
Directory Manager Password
</Col>
- <Col sm={8}>
+ <Col sm={7}>
<FormControl
id="createDMPassword"
type="password"
@@ -1198,10 +1186,10 @@ class CreateInstanceModal extends React.Component {
</Col>
</FormGroup>
<FormGroup controlId="createDMPasswordConfirm">
- <Col componentClass={ControlLabel} sm={4} title="Confirm password.">
+ <Col componentClass={ControlLabel} sm={5} title="Confirm password.">
Confirm Password
</Col>
- <Col sm={8}>
+ <Col sm={7}>
<FormControl
id="createDMPasswordConfirm"
type="password"
@@ -1213,15 +1201,15 @@ class CreateInstanceModal extends React.Component {
</FormGroup>
<hr />
<h5 className="ds-center">Optional Database Settings</h5>
- <FormGroup controlId="createDBSuffix">
+ <FormGroup className="ds-margin-top-lg" controlId="createDBSuffix">
<Col
componentClass={ControlLabel}
- sm={4}
+ sm={5}
title="Database suffix, like 'dc=example,dc=com'. The suffix must be a valid LDAP Distiguished Name (DN)"
>
Database Suffix
</Col>
- <Col sm={8}>
+ <Col sm={7}>
<FormControl
type="text"
id="createDBSuffix"
@@ -1234,12 +1222,12 @@ class CreateInstanceModal extends React.Component {
<FormGroup controlId="createDBName">
<Col
componentClass={ControlLabel}
- sm={4}
+ sm={5}
title="The name for the backend database, like 'userroot'. The name can be a combination of alphanumeric characters, dashes (-), and underscores (_). No other characters are allowed, and the name must be unique across all backends."
>
Database Name
</Col>
- <Col sm={8}>
+ <Col sm={7}>
<FormControl
type="text"
id="createDBName"
@@ -1254,7 +1242,7 @@ class CreateInstanceModal extends React.Component {
controlId="createInitDBn"
disabled={false}
>
- <Col smOffset={4} sm={8}>
+ <Col smOffset={5} sm={7}>
<Radio
id="createInitDB"
value="noInit"
@@ -1272,7 +1260,7 @@ class CreateInstanceModal extends React.Component {
controlId="createInitDBs"
disabled={false}
>
- <Col smOffset={4} sm={8}>
+ <Col smOffset={5} sm={7}>
<Radio
id="createInitDB"
value="createSuffix"
@@ -1290,7 +1278,7 @@ class CreateInstanceModal extends React.Component {
controlId="createInitDBp"
disabled={false}
>
- <Col smOffset={4} sm={8}>
+ <Col smOffset={5} sm={7}>
<Radio
id="createInitDB"
value="createSample"
diff --git a/src/cockpit/389-console/src/lib/customTableToolbar.jsx b/src/cockpit/389-console/src/lib/customTableToolbar.jsx
index d6b6e2e4b..6605591ef 100644
--- a/src/cockpit/389-console/src/lib/customTableToolbar.jsx
+++ b/src/cockpit/389-console/src/lib/customTableToolbar.jsx
@@ -36,7 +36,6 @@ class CustomTableToolbar extends React.Component {
<div className="ds-float-left">
<FormControl
type="text"
- id="find"
placeholder={placeholder}
value={searchFilterValue}
onChange={handleValueChange}
diff --git a/src/cockpit/389-console/src/lib/database/databaseTables.jsx b/src/cockpit/389-console/src/lib/database/databaseTables.jsx
index beca69004..67bae31c1 100644
--- a/src/cockpit/389-console/src/lib/database/databaseTables.jsx
+++ b/src/cockpit/389-console/src/lib/database/databaseTables.jsx
@@ -930,6 +930,7 @@ class BackupTable extends React.Component {
} else {
backupTable =
<DSTable
+ id="backupTable"
noSearchBar
getColumns={this.getColumns}
rowKey={this.state.rowKey}
diff --git a/src/cockpit/389-console/src/lib/dsTable.jsx b/src/cockpit/389-console/src/lib/dsTable.jsx
index 4b1784023..399f1a321 100644
--- a/src/cockpit/389-console/src/lib/dsTable.jsx
+++ b/src/cockpit/389-console/src/lib/dsTable.jsx
@@ -273,7 +273,6 @@ class DSTable extends React.Component {
onNextPage={this.onNextPage}
onLastPage={this.onLastPage}
onSubmit={this.onSubmit}
- id={this.props.searchField}
/>
</div>
);
@@ -400,7 +399,7 @@ DSTable.propTypes = {
toolBarDisableLoadingSpinner: PropTypes.bool,
toolBarPagination: PropTypes.array,
toolBarPaginationPerPage: PropTypes.number,
- noSearchBar: PropTypes.bool
+ noSearchBar: PropTypes.bool,
};
DSShortTable.propTypes = {
diff --git a/src/cockpit/389-console/src/lib/plugins/attributeUniqueness.jsx b/src/cockpit/389-console/src/lib/plugins/attributeUniqueness.jsx
index 66453f9a2..e31532c63 100644
--- a/src/cockpit/389-console/src/lib/plugins/attributeUniqueness.jsx
+++ b/src/cockpit/389-console/src/lib/plugins/attributeUniqueness.jsx
@@ -467,12 +467,14 @@ class AttributeUniqueness extends React.Component {
<Col sm={12}>
<Form horizontal>
<FormGroup controlId="configName">
- <Col sm={3}>
- <ControlLabel title='Sets the name of the plug-in configuration record. (cn) You can use any string, but "attribute_name Attribute Uniqueness" is recommended.'>
- Config Name
- </ControlLabel>
+ <Col
+ componentClass={ControlLabel}
+ sm={4}
+ title='Sets the name of the plug-in configuration record. (cn) You can use any string, but "attribute_name Attribute Uniqueness" is recommended.'
+ >
+ Config Name
</Col>
- <Col sm={9}>
+ <Col sm={8}>
<FormControl
type="text"
value={configName}
@@ -488,12 +490,12 @@ class AttributeUniqueness extends React.Component {
>
<Col
componentClass={ControlLabel}
- sm={3}
+ sm={4}
title="Sets the name of the attribute whose values must be unique. This attribute is multi-valued. (uniqueness-attribute-name)"
>
Attribute Names
</Col>
- <Col sm={9}>
+ <Col sm={8}>
<Typeahead
allowNew
multiple
@@ -516,12 +518,12 @@ class AttributeUniqueness extends React.Component {
>
<Col
componentClass={ControlLabel}
- sm={3}
+ sm={4}
title="Sets the DN under which the plug-in checks for uniqueness of the attributes value. This attribute is multi-valued (uniqueness-subtrees)"
>
Subtrees
</Col>
- <Col sm={9}>
+ <Col sm={8}>
<Typeahead
allowNew
multiple
@@ -550,12 +552,12 @@ class AttributeUniqueness extends React.Component {
>
<Col
componentClass={ControlLabel}
- sm={3}
+ sm={4}
title="Verifies that the value of the attribute set in uniqueness-attribute-name is unique in this subtree (uniqueness-top-entry-oc)"
>
Top Entry OC
</Col>
- <Col sm={9}>
+ <Col sm={8}>
<Typeahead
allowNew
onChange={value => {
@@ -577,12 +579,12 @@ class AttributeUniqueness extends React.Component {
>
<Col
componentClass={ControlLabel}
- sm={3}
+ sm={4}
title="Verifies if an attribute is unique, if the entry contains the object class set in this parameter (uniqueness-subtree-entries-oc)"
>
Subtree Entries OC
</Col>
- <Col sm={6}>
+ <Col sm={5}>
<Typeahead
allowNew
onChange={value => {
@@ -614,7 +616,7 @@ class AttributeUniqueness extends React.Component {
>
<Col
componentClass={ControlLabel}
- sm={3}
+ sm={4}
title="Identifies whether or not the config is enabled."
>
Enable config
diff --git a/src/cockpit/389-console/src/lib/plugins/autoMembership.jsx b/src/cockpit/389-console/src/lib/plugins/autoMembership.jsx
index 6de8d3a7c..ab0661c58 100644
--- a/src/cockpit/389-console/src/lib/plugins/autoMembership.jsx
+++ b/src/cockpit/389-console/src/lib/plugins/autoMembership.jsx
@@ -771,8 +771,8 @@ class AutoMembership extends React.Component {
<Col sm={12}>
<Form horizontal>
<FormGroup key="definitionName" controlId="definitionName">
- <Col sm={3}>
- <ControlLabel>Definition Name</ControlLabel>
+ <Col componentClass={ControlLabel} sm={3}>
+ Definition Name
</Col>
<Col sm={9}>
<FormControl
@@ -787,10 +787,8 @@ class AutoMembership extends React.Component {
{Object.entries(modalDefinitionFields).map(
([id, content]) => (
<FormGroup key={id} controlId={id}>
- <Col sm={3}>
- <ControlLabel title={content.help}>
- {content.name}
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={3} title={content.help}>
+ {content.name}
</Col>
<Col sm={9}>
<FormControl
@@ -806,12 +804,8 @@ class AutoMembership extends React.Component {
key="groupingAttrEntry"
controlId="groupingAttrEntry"
>
- <Col sm={3}>
- <ControlLabel
- title={`Specifies the name of the member attribute in the group entry and the attribute in the object entry that supplies the member attribute value, in the format group_member_attr:entry_attr (autoMemberGroupingAttr)`}
- >
- Grouping Attributes
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={3} title="Specifies the name of the member attribute in the group entry and the attribute in the object entry that supplies the member attribute value, in the format group_member_attr:entry_attr (autoMemberGroupingAttr)">
+ Grouping Attributes
</Col>
<Col sm={4}>
<Typeahead
@@ -840,6 +834,7 @@ class AutoMembership extends React.Component {
</Form>
</Col>
</Row>
+ <hr />
<Row>
<Col sm={12}>
<AutoMembershipRegexTable
@@ -898,8 +893,8 @@ class AutoMembership extends React.Component {
<Col sm={12}>
<Form horizontal>
<FormGroup key="regexName" controlId="regexName">
- <Col sm={3}>
- <ControlLabel>Regex Name</ControlLabel>
+ <Col componentClass={ControlLabel} sm={3}>
+ Regex Name
</Col>
<Col sm={9}>
<FormControl
@@ -912,12 +907,8 @@ class AutoMembership extends React.Component {
</Col>
</FormGroup>
<FormGroup key="regexExclusive" controlId="regexExclusive">
- <Col sm={3}>
- <ControlLabel
- title={`Sets a single regular expression to use to identify entries to exclude (autoMemberExclusiveRegex)`}
- >
- Exclusive Regex
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={3} title="Sets a single regular expression to use to identify entries to exclude (autoMemberExclusiveRegex)">
+ Exclusive Regex
</Col>
<Col sm={9}>
<Typeahead
@@ -936,12 +927,8 @@ class AutoMembership extends React.Component {
</Col>
</FormGroup>
<FormGroup key="regexInclusive" controlId="regexInclusive">
- <Col sm={3}>
- <ControlLabel
- title={`Sets a single regular expression to use to identify entries to exclude (autoMemberExclusiveRegex)`}
- >
- Inclusive Regex
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={3} title="Sets a single regular expression to use to identify entries to exclude (autoMemberExclusiveRegex)">
+ Inclusive Regex
</Col>
<Col sm={9}>
<Typeahead
@@ -963,12 +950,8 @@ class AutoMembership extends React.Component {
key="regexTargetGroup"
controlId="regexTargetGroup"
>
- <Col sm={3}>
- <ControlLabel
- title={`Sets which group to add the entry to as a member, if it meets the regular expression conditions (autoMemberTargetGroup)`}
- >
- Target Group
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={3} title="Sets which group to add the entry to as a member, if it meets the regular expression conditions (autoMemberTargetGroup)">
+ Target Group
</Col>
<Col sm={9}>
<FormControl
diff --git a/src/cockpit/389-console/src/lib/plugins/dna.jsx b/src/cockpit/389-console/src/lib/plugins/dna.jsx
index 453925586..f4b1e0533 100644
--- a/src/cockpit/389-console/src/lib/plugins/dna.jsx
+++ b/src/cockpit/389-console/src/lib/plugins/dna.jsx
@@ -880,8 +880,8 @@ class DNA extends React.Component {
<Col sm={12}>
<Form horizontal>
<FormGroup key="configName" controlId="configName">
- <Col sm={4}>
- <ControlLabel>Config Name</ControlLabel>
+ <Col componentClass={ControlLabel} sm={4}>
+ Config Name
</Col>
<Col sm={8}>
<FormControl
@@ -921,10 +921,8 @@ class DNA extends React.Component {
</Col>
</FormGroup>
<FormGroup key="type" controlId="type">
- <Col sm={4}>
- <ControlLabel title="Sets which attributes have unique numbers being generated for them (dnaType)">
- Type
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={4} title="Sets which attributes have unique numbers being generated for them (dnaType)">
+ Type
</Col>
<Col sm={8}>
<Typeahead
@@ -944,10 +942,8 @@ class DNA extends React.Component {
</FormGroup>
{Object.entries(modalConfigFields).map(([id, content]) => (
<FormGroup key={id} controlId={id}>
- <Col sm={4}>
- <ControlLabel title={content.help}>
- {content.name}
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={4} title={content.help}>
+ {content.name}
</Col>
<Col sm={8}>
<FormControl
diff --git a/src/cockpit/389-console/src/lib/plugins/linkedAttributes.jsx b/src/cockpit/389-console/src/lib/plugins/linkedAttributes.jsx
index 30900f73d..accd694d7 100644
--- a/src/cockpit/389-console/src/lib/plugins/linkedAttributes.jsx
+++ b/src/cockpit/389-console/src/lib/plugins/linkedAttributes.jsx
@@ -353,10 +353,8 @@ class LinkedAttributes extends React.Component {
<Col sm={12}>
<Form horizontal>
<FormGroup controlId="configName">
- <Col sm={3}>
- <ControlLabel title="The Linked Attributes configuration name">
- Config Name
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={3} title="The Linked Attributes configuration name">
+ Config Name
</Col>
<Col sm={9}>
<FormControl
@@ -368,10 +366,8 @@ class LinkedAttributes extends React.Component {
</Col>
</FormGroup>
<FormGroup controlId="linkType">
- <Col sm={3}>
- <ControlLabel title="Sets the attribute that is managed manually by administrators (linkType)">
- Link Type
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={3} title="Sets the attribute that is managed manually by administrators (linkType)">
+ Link Type
</Col>
<Col sm={9}>
<Typeahead
@@ -389,10 +385,8 @@ class LinkedAttributes extends React.Component {
</Col>
</FormGroup>
<FormGroup controlId="managedType">
- <Col sm={3}>
- <ControlLabel title="Sets the attribute that is created dynamically by the plugin (managedType)">
- Managed Type
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={3} title="Sets the attribute that is created dynamically by the plugin (managedType)">
+ Managed Type
</Col>
<Col sm={9}>
<Typeahead
@@ -410,10 +404,8 @@ class LinkedAttributes extends React.Component {
</Col>
</FormGroup>
<FormGroup controlId="linkScope">
- <Col sm={3}>
- <ControlLabel title="Sets the base DN that restricts the plugin to a specific part of the directory tree (linkScope)">
- Link Scope
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={3} title="Sets the base DN that restricts the plugin to a specific part of the directory tree (linkScope)">
+ Link Scope
</Col>
<Col sm={9}>
<FormControl
diff --git a/src/cockpit/389-console/src/lib/plugins/managedEntries.jsx b/src/cockpit/389-console/src/lib/plugins/managedEntries.jsx
index b403fb2df..679e9196c 100644
--- a/src/cockpit/389-console/src/lib/plugins/managedEntries.jsx
+++ b/src/cockpit/389-console/src/lib/plugins/managedEntries.jsx
@@ -614,8 +614,8 @@ class ManagedEntries extends React.Component {
<Col sm={12}>
<Form horizontal>
<FormGroup key="configName" controlId="configName">
- <Col sm={3}>
- <ControlLabel>Config Name</ControlLabel>
+ <Col componentClass={ControlLabel} sm={3}>
+ Config Name
</Col>
<Col sm={9}>
<FormControl
@@ -629,10 +629,8 @@ class ManagedEntries extends React.Component {
</FormGroup>
{Object.entries(modalConfigFields).map(([id, content]) => (
<FormGroup key={id} controlId={id}>
- <Col sm={3}>
- <ControlLabel title={content.help}>
- {content.name}
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={3} title={content.help}>
+ {content.name}
</Col>
<Col sm={9}>
<FormControl
@@ -709,10 +707,8 @@ class ManagedEntries extends React.Component {
<Col sm={12}>
<Form horizontal>
<FormGroup controlId="templateDN">
- <Col sm={4}>
- <ControlLabel title="DN of the template entry">
- Template DN
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={4} title="DN of the template entry">
+ Template DN
</Col>
<Col sm={8}>
<FormControl
@@ -761,10 +757,8 @@ class ManagedEntries extends React.Component {
controlId="templateStaticAttr"
disabled={false}
>
- <Col sm={4}>
- <ControlLabel title="Sets an attribute with a defined value that must be added to the automatically-generated entry (mepStaticAttr)">
- Static Attribute
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={4} title="Sets an attribute with a defined value that must be added to the automatically-generated entry (mepStaticAttr)">
+ Static Attribute
</Col>
<Col sm={8}>
<Typeahead
@@ -786,10 +780,8 @@ class ManagedEntries extends React.Component {
controlId="templateMappedAttr"
disabled={false}
>
- <Col sm={4}>
- <ControlLabel title="Sets attributes in the Managed Entries template entry which must exist in the generated entry (mepMappedAttr)">
- Mapped Attributes
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={4} title="Sets attributes in the Managed Entries template entry which must exist in the generated entry (mepMappedAttr)">
+ Mapped Attributes
</Col>
<Col sm={8}>
<Typeahead
diff --git a/src/cockpit/389-console/src/lib/plugins/memberOf.jsx b/src/cockpit/389-console/src/lib/plugins/memberOf.jsx
index 9cbceffe7..74a74e6de 100644
--- a/src/cockpit/389-console/src/lib/plugins/memberOf.jsx
+++ b/src/cockpit/389-console/src/lib/plugins/memberOf.jsx
@@ -815,10 +815,8 @@ class MemberOf extends React.Component {
<Col sm={12}>
<Form horizontal>
<FormGroup controlId="configAutoAddOC" disabled={false}>
- <Col sm={3}>
- <ControlLabel title="If an entry does not have an object class that allows the memberOf attribute then the memberOf plugin will automatically add the object class listed in the memberOfAutoAddOC parameter">
- Auto Add OC
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={3} title="If an entry does not have an object class that allows the memberOf attribute then the memberOf plugin will automatically add the object class listed in the memberOfAutoAddOC parameter">
+ Auto Add OC
</Col>
<Col sm={9}>
<Typeahead
@@ -1042,10 +1040,11 @@ class MemberOf extends React.Component {
<Form horizontal>
<FormGroup controlId="memberOfAutoAddOC" disabled={false}>
<Col
+ componentClass={ControlLabel}
sm={3}
title="If an entry does not have an object class that allows the memberOf attribute then the memberOf plugin will automatically add the object class listed in the memberOfAutoAddOC parameter"
>
- <ControlLabel>Auto Add OC</ControlLabel>
+ Auto Add OC
</Col>
<Col sm={8}>
<Typeahead
diff --git a/src/cockpit/389-console/src/lib/plugins/passthroughAuthentication.jsx b/src/cockpit/389-console/src/lib/plugins/passthroughAuthentication.jsx
index d0c478761..6b2d0673b 100644
--- a/src/cockpit/389-console/src/lib/plugins/passthroughAuthentication.jsx
+++ b/src/cockpit/389-console/src/lib/plugins/passthroughAuthentication.jsx
@@ -759,8 +759,8 @@ class PassthroughAuthentication extends React.Component {
<Col sm={12}>
<Form horizontal>
<FormGroup key="pamConfigName" controlId="pamConfigName">
- <Col sm={3}>
- <ControlLabel>Config Name</ControlLabel>
+ <Col componentClass={ControlLabel} sm={3}>
+ Config Name
</Col>
<Col sm={9}>
<FormControl
@@ -833,10 +833,8 @@ class PassthroughAuthentication extends React.Component {
controlId="pamIDAttr"
disabled={false}
>
- <Col sm={3}>
- <ControlLabel title="Contains the attribute name which is used to hold the PAM user ID (pamIDAttr)">
- ID Attribute
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={3} title="Contains the attribute name which is used to hold the PAM user ID (pamIDAttr)">
+ ID Attribute
</Col>
<Col sm={9}>
<Typeahead
@@ -904,10 +902,8 @@ class PassthroughAuthentication extends React.Component {
{Object.entries(modalPAMConfigFields).map(
([id, content]) => (
<FormGroup key={id} controlId={id}>
- <Col sm={3}>
- <ControlLabel title={content.help}>
- {content.name}
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={3} title={content.help}>
+ {content.name}
</Col>
<Col sm={9}>
<FormControl
@@ -921,26 +917,30 @@ class PassthroughAuthentication extends React.Component {
)}
<FormGroup key="pamCheckboxes" controlId="pamCheckboxes">
- <Col sm={4}>
- <Checkbox
- id="pamFallback"
- checked={pamFallback}
- onChange={this.handleCheckboxChange}
- title={`Sets whether to fallback to regular LDAP authentication if PAM authentication fails (pamFallback)`}
- >
- Fallback
- </Checkbox>
- </Col>
- <Col sm={4}>
- <Checkbox
- id="pamSecure"
- checked={pamSecure}
- onChange={this.handleCheckboxChange}
- title="Requires secure TLS connection for PAM authentication (pamSecure)"
- >
- Secure
- </Checkbox>
- </Col>
+ <Row>
+ <Col smOffset={1} sm={7}>
+ <Checkbox
+ id="pamFallback"
+ checked={pamFallback}
+ onChange={this.handleCheckboxChange}
+ title={`Sets whether to fallback to regular LDAP authentication if PAM authentication fails (pamFallback)`}
+ >
+ Fallback Enabled
+ </Checkbox>
+ </Col>
+ </Row>
+ <Row className="ds-margin-top">
+ <Col smOffset={1} sm={7}>
+ <Checkbox
+ id="pamSecure"
+ checked={pamSecure}
+ onChange={this.handleCheckboxChange}
+ title="Requires secure TLS connection for PAM authentication (pamSecure)"
+ >
+ Require Secure Connection
+ </Checkbox>
+ </Col>
+ </Row>
</FormGroup>
</Form>
</Col>
@@ -975,7 +975,7 @@ class PassthroughAuthentication extends React.Component {
<Icon type="pf" name="close" />
</button>
<Modal.Title>
- {newPAMConfigEntry ? "Add" : "Edit"}
+ {newPAMConfigEntry ? "Add " : "Edit "}
Passthough Authentication Plugin URL
</Modal.Title>
</Modal.Header>
@@ -1022,10 +1022,8 @@ class PassthroughAuthentication extends React.Component {
</FormGroup>
{Object.entries(modalURLFields).map(([id, content]) => (
<FormGroup key={id} controlId={id}>
- <Col sm={5}>
- <ControlLabel title={content.help}>
- {content.name}
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={5} title={content.help}>
+ {content.name}
</Col>
<Col sm={7}>
<FormControl
@@ -1075,7 +1073,7 @@ class PassthroughAuthentication extends React.Component {
</Col>
</FormGroup>
<FormGroup key="urlStartTLS" controlId="urlStartTLS">
- <Col sm={4}>
+ <Col componentClass={ControlLabel} sm={5}>
<Checkbox
id="urlStartTLS"
checked={urlStartTLS}
@@ -1087,18 +1085,14 @@ class PassthroughAuthentication extends React.Component {
</Col>
</FormGroup>
<FormGroup key="resultURL" controlId="resultURL">
- <Col sm={5}>
- <ControlLabel title="The URL that will be added or modified after you click 'Save'">
- Result URL
- </ControlLabel>
+ <Col componentClass={ControlLabel} sm={5} title="The URL that will be added or modified after you click 'Save'">
+ Result URL
</Col>
<Col sm={7}>
- <ControlLabel>
- {urlConnType}://{urlAuthDS}/{urlSubtree}{" "}
- {urlMaxConns},{urlMaxOps},{urlTimeout},
- {urlLDVer},{urlConnLifeTime},
- {urlStartTLS ? "1" : "0"}
- </ControlLabel>
+ {urlConnType}://{urlAuthDS}/{urlSubtree}{" "}
+ {urlMaxConns},{urlMaxOps},{urlTimeout},
+ {urlLDVer},{urlConnLifeTime},
+ {urlStartTLS ? "1" : "0"}
</Col>
</FormGroup>
</Form>
diff --git a/src/cockpit/389-console/src/lib/replication/replModals.jsx b/src/cockpit/389-console/src/lib/replication/replModals.jsx
index 4c15d7118..0ea6f47fd 100644
--- a/src/cockpit/389-console/src/lib/replication/replModals.jsx
+++ b/src/cockpit/389-console/src/lib/replication/replModals.jsx
@@ -1300,7 +1300,7 @@ export class EnableReplModal extends React.Component {
<p className="ds-margin-top-xxlg">
You can optionally define the authentication information
for this replicated suffix. Either a Manager DN and Password,
- a Bind Group DN, or both, can be provideed. The Manager DN should
+ a Bind Group DN, or both, can be provided. The Manager DN should
be an entry under "cn=config" and if it does not exist it will
be created, while the Bind Group DN is usually an existing
group located in the database suffix. Typically, just the
diff --git a/src/cockpit/389-console/src/monitor.jsx b/src/cockpit/389-console/src/monitor.jsx
index a155f4914..0b0c08e43 100644
--- a/src/cockpit/389-console/src/monitor.jsx
+++ b/src/cockpit/389-console/src/monitor.jsx
@@ -166,11 +166,6 @@ export class Monitor extends React.Component {
}
loadSuffixTree(fullReset) {
- if (this.state.firstLoad) {
- this.setState({
- firstLoad: false
- });
- }
const cmd = [
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
"backend", "get-tree",
@@ -408,6 +403,11 @@ export class Monitor extends React.Component {
// - SNMP
// - Finally load the "tree"
//
+ if (this.state.firstLoad) {
+ this.setState({
+ firstLoad: false
+ });
+ }
let cmd = [
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
"config", "get", "nsslapd-auditlog", "nsslapd-accesslog", "nsslapd-errorlog", "nsslapd-auditfaillog"
diff --git a/src/cockpit/389-console/src/plugins.jsx b/src/cockpit/389-console/src/plugins.jsx
index ae2494534..53ef51405 100644
--- a/src/cockpit/389-console/src/plugins.jsx
+++ b/src/cockpit/389-console/src/plugins.jsx
@@ -162,8 +162,7 @@ export class Plugins extends React.Component {
var myObject = JSON.parse(content);
this.setState({
rows: myObject.items
- });
- this.toggleLoading();
+ }, this.toggleLoading());
})
.fail(err => {
if (err != 0) {
diff --git a/src/lib389/lib389/cli_conf/plugins/posix_winsync.py b/src/lib389/lib389/cli_conf/plugins/posix_winsync.py
index 05124188e..d4de7ae38 100644
--- a/src/lib389/lib389/cli_conf/plugins/posix_winsync.py
+++ b/src/lib389/lib389/cli_conf/plugins/posix_winsync.py
@@ -41,7 +41,7 @@ def do_fixup(inst, basedn, log, args):
def _add_parser_args(parser):
parser.add_argument('--create-memberof-task', choices=['true', 'false'], type=str.lower,
- help='Sets whether to run the memberOf fix-up task immediately after a sync run in order '
+ help='Sets whether to run the memberUID fix-up task immediately after a sync run in order '
'to update group memberships for synced users (posixWinsyncCreateMemberOfTask)')
parser.add_argument('--lower-case-uid', choices=['true', 'false'], type=str.lower,
help='Sets whether to store (and, if necessary, convert) the UID value in the memberUID '
| 0 |
bc781018e69b74a9cd087953bbbffcdd6ed44dd5
|
389ds/389-ds-base
|
fix compiler warning in acct policy plugin
|
commit bc781018e69b74a9cd087953bbbffcdd6ed44dd5
Author: Rich Megginson <[email protected]>
Date: Fri Feb 10 10:04:13 2012 -0700
fix compiler warning in acct policy plugin
diff --git a/ldap/servers/plugins/acctpolicy/acct_plugin.c b/ldap/servers/plugins/acctpolicy/acct_plugin.c
index c0420c2dd..5eb3d06e8 100644
--- a/ldap/servers/plugins/acctpolicy/acct_plugin.c
+++ b/ldap/servers/plugins/acctpolicy/acct_plugin.c
@@ -102,6 +102,7 @@ acct_record_login( const char *dn, void *txn )
acctPluginCfg *cfg;
void *plugin_id;
Slapi_PBlock *modpb = NULL;
+ int skip_mod_attrs = 1; /* value doesn't matter as long as not NULL */
cfg = get_config();
plugin_id = get_identity();
@@ -126,7 +127,7 @@ acct_record_login( const char *dn, void *txn )
plugin_id, SLAPI_OP_FLAG_NO_ACCESS_CHECK |
SLAPI_OP_FLAG_BYPASS_REFERRALS );
slapi_pblock_set( modpb, SLAPI_TXN, txn );
- slapi_pblock_set( modpb, SLAPI_SKIP_MODIFIED_ATTRS, 1);
+ slapi_pblock_set( modpb, SLAPI_SKIP_MODIFIED_ATTRS, &skip_mod_attrs );
slapi_modify_internal_pb( modpb );
slapi_pblock_get( modpb, SLAPI_PLUGIN_INTOP_RESULT, &ldrc );
| 0 |
d2c285f02af792e97493fbed1ada287ff15e8317
|
389ds/389-ds-base
|
Issue 1199 - Misleading message in access log for idle timeout (#4385)
Issue 1199 - Misleading message in access log for idle timeout
Description: Update timeout error code in daemon.
Add extra detail to idle and IO timeout error messaging.
Typo in logconv.pl
Relates: #1199
Reviewed by: mreynolds389, droideck, Firstyear (Thanks folks)
|
commit d2c285f02af792e97493fbed1ada287ff15e8317
Author: Jamie Chapman <[email protected]>
Date: Tue Oct 20 15:00:27 2020 +0100
Issue 1199 - Misleading message in access log for idle timeout (#4385)
Issue 1199 - Misleading message in access log for idle timeout
Description: Update timeout error code in daemon.
Add extra detail to idle and IO timeout error messaging.
Typo in logconv.pl
Relates: #1199
Reviewed by: mreynolds389, droideck, Firstyear (Thanks folks)
diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl
index d23bfb3e8..461902c80 100755
--- a/ldap/admin/src/logconv.pl
+++ b/ldap/admin/src/logconv.pl
@@ -2371,7 +2371,7 @@ sub parseLineNormal
elsif (m/- B3/){ $hashes->{rc}->{"B3"}++; }
elsif (m/- R1/){ $hashes->{rc}->{"R1"}++; }
elsif (m/- P1/){ $hashes->{rc}->{"P1"}++; }
- elsif (m/- P1/){ $hashes->{rc}->{"P2"}++; }
+ elsif (m/- P2/){ $hashes->{rc}->{"P2"}++; }
elsif (m/- U1/){ $hashes->{rc}->{"U1"}++; }
else { $hashes->{rc}->{"other"}++; }
}
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index 74a39cf73..88b7dc3be 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -2226,6 +2226,8 @@ static ps_wakeup_all_fn_ptr ps_wakeup_all_fn = NULL;
void
disconnect_server_nomutex_ext(Connection *conn, PRUint64 opconnid, int opid, PRErrorCode reason, PRInt32 error, int schedule_closure_job)
{
+ char * str_reason = NULL;
+
if ((conn->c_sd != SLAPD_INVALID_SOCKET &&
conn->c_connid == opconnid) &&
!(conn->c_flags & CONN_FLAG_CLOSING)) {
@@ -2250,18 +2252,29 @@ disconnect_server_nomutex_ext(Connection *conn, PRUint64 opconnid, int opid, PRE
g_decrement_current_conn_count();
/*
- * Print the error captured above.
+ * Provide info on the error captured above.
*/
- if (error && (EPIPE != error)) {
+ switch(reason) {
+ case SLAPD_DISCONNECT_IDLE_TIMEOUT:
+ str_reason = "Idle timeout (nsslapd-idletimeout)";
+ break;
+ case SLAPD_DISCONNECT_IO_TIMEOUT:
+ str_reason = "IO timeout (nsslapd-ioblocktimeout)";
+ break;
+ default:
+ str_reason = "error";
+ break;
+ }
+ if(error) {
slapi_log_access(LDAP_DEBUG_STATS,
- "conn=%" PRIu64 " op=%d fd=%d closed error %d (%s) - %s\n",
- conn->c_connid, opid, conn->c_sd, error,
+ "conn=%" PRIu64 " op=%d fd=%d closed %s %d (%s) - %s\n",
+ conn->c_connid, opid, conn->c_sd, str_reason, error,
slapd_system_strerror(error),
slapd_pr_strerror(reason));
} else {
slapi_log_access(LDAP_DEBUG_STATS,
- "conn=%" PRIu64 " op=%d fd=%d closed - %s\n",
- conn->c_connid, opid, conn->c_sd,
+ "conn=%" PRIu64 " op=%d fd=%d closed %s - %s\n",
+ conn->c_connid, opid, conn->c_sd, str_reason,
slapd_pr_strerror(reason));
}
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 0f4f65588..3deb358f9 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -1567,7 +1567,7 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused
NULL == c->c_ops) {
/* idle timeout */
disconnect_server_nomutex(c, c->c_connid, -1,
- SLAPD_DISCONNECT_IDLE_TIMEOUT, EAGAIN);
+ SLAPD_DISCONNECT_IDLE_TIMEOUT, ETIMEDOUT);
}
}
pthread_mutex_unlock(&(c->c_mutex));
| 0 |
b0f8c322aa926236b85b86afc62e4dfebde4fb0e
|
389ds/389-ds-base
|
Issue 5539 - Make logger's parameter name unified (#5540)
Description: Some of the functions of `lib389.cli_conf.security`
used `log` as logger's parameter name while another ones - `logs`.
This lead to regression like #5539.
Fix Description:
Replace `logs` with `log`.
relates: https://github.com/389ds/389-ds-base/issues/5539
Reviewed by: mreynolds (Thanks)
|
commit b0f8c322aa926236b85b86afc62e4dfebde4fb0e
Author: Stanislav Levin <[email protected]>
Date: Tue Nov 22 17:22:46 2022 +0300
Issue 5539 - Make logger's parameter name unified (#5540)
Description: Some of the functions of `lib389.cli_conf.security`
used `log` as logger's parameter name while another ones - `logs`.
This lead to regression like #5539.
Fix Description:
Replace `logs` with `log`.
relates: https://github.com/389ds/389-ds-base/issues/5539
Reviewed by: mreynolds (Thanks)
diff --git a/src/lib389/lib389/cli_conf/security.py b/src/lib389/lib389/cli_conf/security.py
index 15196a729..d06eee05c 100644
--- a/src/lib389/lib389/cli_conf/security.py
+++ b/src/lib389/lib389/cli_conf/security.py
@@ -76,7 +76,7 @@ RSA_ATTRS_MAP = OrderedDict([
])
-def _security_generic_get(inst, basedn, logs, args, attrs_map):
+def _security_generic_get(inst, basedn, log, args, attrs_map):
result = {}
for attr, props in attrs_map.items():
val = props.cls(inst).get_attr_val_utf8(props.attr)
@@ -89,7 +89,7 @@ def _security_generic_get(inst, basedn, logs, args, attrs_map):
print('\n'.join([f'{attr}: {value or ""}' for attr, value in result.items()]))
-def _security_generic_set(inst, basedn, logs, args, attrs_map):
+def _security_generic_set(inst, basedn, log, args, attrs_map):
for attr, props in attrs_map.items():
arg = getattr(args, attr.replace('-', '_'))
if arg is None:
| 0 |
6d70cbe8d491de66629f1e67501f355b8512e175
|
389ds/389-ds-base
|
Issue 50712 - Version comparison doesn't work correctly on git builds
Bug Description:
`python3-packaging` is not shipped in RHEL8. But it's bundled with
`setuptools` which is present in all major distributions.
Fix Description:
Use `pkg_resources` module from `setuptools` which provides needed
functionality, change lib389 and rpm dependencies accordingly.
Unfortunately, `pkg_resources.parse_version()` returns different
objects for different strings too, so use `LegacyVersion` directly
from `pkg_resources.extern.packaging.version`.
Fixes: https://pagure.io/389-ds-base/issue/50712
Relates: https://pagure.io/389-ds-base/issue/50706
|
commit 6d70cbe8d491de66629f1e67501f355b8512e175
Author: Viktor Ashirov <[email protected]>
Date: Fri Nov 15 11:55:07 2019 +0100
Issue 50712 - Version comparison doesn't work correctly on git builds
Bug Description:
`python3-packaging` is not shipped in RHEL8. But it's bundled with
`setuptools` which is present in all major distributions.
Fix Description:
Use `pkg_resources` module from `setuptools` which provides needed
functionality, change lib389 and rpm dependencies accordingly.
Unfortunately, `pkg_resources.parse_version()` returns different
objects for different strings too, so use `LegacyVersion` directly
from `pkg_resources.extern.packaging.version`.
Fixes: https://pagure.io/389-ds-base/issue/50712
Relates: https://pagure.io/389-ds-base/issue/50706
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index 5a2cfc4ed..6f4a1e1a9 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -130,7 +130,6 @@ BuildRequires: python%{python3_pkgversion}-argcomplete
BuildRequires: python%{python3_pkgversion}-argparse-manpage
BuildRequires: python%{python3_pkgversion}-policycoreutils
BuildRequires: python%{python3_pkgversion}-libselinux
-BuildRequires: python%{python3_pkgversion}-packaging
# For cockpit
BuildRequires: rsync
@@ -303,7 +302,7 @@ Requires: python%{python3_pkgversion}-pyasn1-modules
Requires: python%{python3_pkgversion}-dateutil
Requires: python%{python3_pkgversion}-argcomplete
Requires: python%{python3_pkgversion}-libselinux
-Requires: python%{python3_pkgversion}-packaging
+Requires: python%{python3_pkgversion}-setuptools
%{?python_provide:%python_provide python%{python3_pkgversion}-lib389}
%description -n python%{python3_pkgversion}-lib389
diff --git a/src/lib389/lib389/tests/utils_test.py b/src/lib389/lib389/tests/utils_test.py
index 5378066b6..a696eb5c9 100644
--- a/src/lib389/lib389/tests/utils_test.py
+++ b/src/lib389/lib389/tests/utils_test.py
@@ -145,6 +145,35 @@ def test_get_log_data(data):
assert display_log_data(before) == after
[email protected]('ds_ver, cmp_ver', [
+ ('1.3.1', '1.3.2'),
+ ('1.3.1', '1.3.10'),
+ ('1.3.2', '1.3.10'),
+ ('1.3.9', ('1.3.10', '1.4.2.0')),
+ ('1.4.0.1', ('1.3.9', '1.4.1.0', '1.4.2.1')),
+ ('1.4.1', '1.4.2.0-20191115gitbadc0ffee' ),
+])
+def test_ds_is_older_versions(ds_ver, cmp_ver):
+ if isinstance(cmp_ver, tuple):
+ assert ds_is_related('older', ds_ver, *cmp_ver)
+ else:
+ assert ds_is_related('older', ds_ver, cmp_ver)
+
[email protected]('ds_ver, cmp_ver', [
+ ('1.3.2', '1.3.1'),
+ ('1.3.10', '1.3.1'),
+ ('1.3.10', '1.3.2'),
+ ('1.3.10', ('1.3.9', '1.4.2.0')),
+ ('1.4.2.1', ('1.3.9', '1.4.0.1', '1.4.2.0')),
+ ('1.4.2.0-20191115gitbadc0ffee', '1.4.1' ),
+])
+def test_ds_is_newer_versions(ds_ver, cmp_ver):
+ if isinstance(cmp_ver, tuple):
+ assert ds_is_related('newer', ds_ver, *cmp_ver)
+ else:
+ assert ds_is_related('newer', ds_ver, cmp_ver)
+
+
if __name__ == "__main__":
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s -v %s" % CURRENT_FILE)
diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py
index 3234cdccb..b9eacfdea 100644
--- a/src/lib389/lib389/utils.py
+++ b/src/lib389/lib389/utils.py
@@ -40,7 +40,7 @@ import shlex
import operator
import subprocess
import math
-from packaging.version import LegacyVersion
+from pkg_resources.extern.packaging.version import LegacyVersion
from socket import getfqdn
from ldapurl import LDAPUrl
from contextlib import closing
@@ -1067,13 +1067,12 @@ def get_ds_version():
return p.version
-def ds_is_related(relation, *ver):
+def ds_is_related(relation, ds_ver, *ver):
"""
Return a result of a comparison between the current version of ns-slapd and a provided version.
"""
ops = {'older': operator.lt,
'newer': operator.ge}
- ds_ver = get_ds_version()
if len(ver) > 1:
for cmp_ver in ver:
if cmp_ver.startswith(ds_ver[:3]):
@@ -1086,14 +1085,16 @@ def ds_is_older(*ver):
"""
Return True if the current version of ns-slapd is older than a provided version
"""
- return ds_is_related('older', *ver)
+ ds_ver = get_ds_version()
+ return ds_is_related('older', ds_ver, *ver)
def ds_is_newer(*ver):
"""
Return True if the current version of ns-slapd is newer than a provided version
"""
- return ds_is_related('newer', *ver)
+ ds_ver = get_ds_version()
+ return ds_is_related('newer', ds_ver, *ver)
def gentime_to_datetime(gentime):
diff --git a/src/lib389/requirements.txt b/src/lib389/requirements.txt
index 5cce1d04b..eb2475f3b 100644
--- a/src/lib389/requirements.txt
+++ b/src/lib389/requirements.txt
@@ -6,4 +6,4 @@ six
argcomplete
argparse-manpage
python-ldap
-packaging
+setuptools
diff --git a/src/lib389/setup.py b/src/lib389/setup.py
index f2e404333..056173936 100644
--- a/src/lib389/setup.py
+++ b/src/lib389/setup.py
@@ -82,7 +82,7 @@ setup(
'argcomplete',
'argparse-manpage',
'python-ldap',
- 'packaging',
+ 'setuptools',
],
cmdclass={
| 0 |
6a1c2737ee49ba295ffae5a49265dcaa9debf09d
|
389ds/389-ds-base
|
Bug 544321 - remove-ds.pl should not throw error unlabelling port
When removing an instance using remove-ds.pl, a fatal error will
be thrown when trying to remove the SELinux port label if the port
is not labelled. This patch makes this case a non-error since
there is no need to complain about removing a label if it has
already been removed.
|
commit 6a1c2737ee49ba295ffae5a49265dcaa9debf09d
Author: Nathan Kinder <[email protected]>
Date: Mon Oct 11 11:23:28 2010 -0700
Bug 544321 - remove-ds.pl should not throw error unlabelling port
When removing an instance using remove-ds.pl, a fatal error will
be thrown when trying to remove the SELinux port label if the port
is not labelled. This patch makes this case a non-error since
there is no need to complain about removing a label if it has
already been removed.
diff --git a/ldap/admin/src/scripts/DSCreate.pm.in b/ldap/admin/src/scripts/DSCreate.pm.in
index a156f108f..b04e54ccd 100644
--- a/ldap/admin/src/scripts/DSCreate.pm.in
+++ b/ldap/admin/src/scripts/DSCreate.pm.in
@@ -1194,7 +1194,7 @@ sub removeDSInstance {
{
my $semanage_err = `semanage port -d -t ldap_port_t -p tcp $port 2>&1`;
if ($? != 0) {
- if ($semanage_err !~ /defined in policy, cannot be deleted/) {
+ if (($semanage_err !~ /defined in policy, cannot be deleted/) && ($semanage_err !~ /is not defined/)) {
push @errs, [ 'error_removing_port_label', $port, $semanage_err];
debug(1, "Warning: Port $port not removed from selinux policy correctly. Error: $semanage_err\n");
}
@@ -1205,7 +1205,7 @@ sub removeDSInstance {
{
my $semanage_err = `semanage port -d -t ldap_port_t -p tcp $secureport 2>&1`;
if ($? != 0) {
- if ($semanage_err !~ /defined in policy, cannot be deleted/) {
+ if (($semanage_err !~ /defined in policy, cannot be deleted/) && ($semanage_err !~ /is not defined/)) {
push @errs, [ 'error_removing_port_label', $secureport, $semanage_err];
debug(1, "Warning: Port $secureport not removed from selinux policy correctly. Error: $semanage_err\n");
}
| 0 |
ba8134132fe2d9ed62b990c781b51beff680ada2
|
389ds/389-ds-base
|
Bug 610119 - fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199
https://bugzilla.redhat.com/show_bug.cgi?id=610119
Resolves: bug 610119
Bug description: Fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199
Fix description: Catch possible NULL pointer in import_producer(), upgradedn_producer(),
bulk_import_queue(), and dse_conf_backup_core().
|
commit ba8134132fe2d9ed62b990c781b51beff680ada2
Author: Endi S. Dewata <[email protected]>
Date: Thu Jul 1 23:25:26 2010 -0500
Bug 610119 - fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199
https://bugzilla.redhat.com/show_bug.cgi?id=610119
Resolves: bug 610119
Bug description: Fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199
Fix description: Catch possible NULL pointer in import_producer(), upgradedn_producer(),
bulk_import_queue(), and dse_conf_backup_core().
diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c
index 3f5491c95..8ea647745 100644
--- a/ldap/servers/slapd/back-ldbm/import-threads.c
+++ b/ldap/servers/slapd/back-ldbm/import-threads.c
@@ -646,7 +646,7 @@ import_producer(void *param)
}
ep = import_make_backentry(e, id);
- if (!ep)
+ if (!ep || !ep->ep_entry)
goto error;
/* check for include/exclude subtree lists */
@@ -1606,7 +1606,7 @@ upgradedn_producer(void *param)
}
ep = import_make_backentry(e, temp_id);
- if (!ep) {
+ if (!ep || !ep->ep_entry) {
slapi_entry_free(e); e = NULL;
goto error;
}
@@ -2725,7 +2725,7 @@ static int bulk_import_queue(ImportJob *job, Slapi_Entry *entry)
/* make into backentry */
ep = import_make_backentry(entry, id);
- if (!ep) {
+ if (!ep || !ep->ep_entry) {
import_abort_all(job, 1);
PR_Unlock(job->wire_lock);
return -1;
@@ -2965,6 +2965,14 @@ dse_conf_backup_core(struct ldbminfo *li, char *dest_dir, char *file_name, char
}
srch_pb = slapi_pblock_new();
+ if (!srch_pb) {
+ LDAPDebug(LDAP_DEBUG_ANY,
+ "dse_conf_backup(%s): out of memory\n",
+ filter, 0, 0);
+ rval = -1;
+ goto out;
+ }
+
slapi_search_internal_set_pb(srch_pb, li->li_plugin->plg_dn,
LDAP_SCOPE_SUBTREE, filter, NULL, 0, NULL, NULL, li->li_identity, 0);
slapi_search_internal_pb(srch_pb);
@@ -3049,9 +3057,9 @@ dse_conf_backup_core(struct ldbminfo *li, char *dest_dir, char *file_name, char
}
out:
- slapi_free_search_results_internal(srch_pb);
if (srch_pb)
{
+ slapi_free_search_results_internal(srch_pb);
slapi_pblock_destroy(srch_pb);
}
| 0 |
03df340d4aacd6074700e3c5945e785965f2ae51
|
389ds/389-ds-base
|
Ticket 47838 - CI test: add test case for ticket 47838
Description: harden the list of ciphers available by default
Test Case 1 - Check the ciphers availability for "+all"
Test Case 2 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha"
Test Case 3 - Check the ciphers availability for "-all"
Test Case 4 - Check no nssSSL3Chiphers (default setting)
Test Case 5 - Check default nssSSL3Chiphers (default setting)
Test Case 6 - Check nssSSL3Chiphers: +all,-rsa_rc4_128_md5
Test Case 7 - Check nssSSL3Chiphers: -all,+rsa_rc4_128_md5
Test Case 8 - Check no nssSSL3Chiphers (default setting) with no errorlog-level
Test Case 9 - Check nssSSL3Chiphers: long list using the NSS Cipher Suite name
|
commit 03df340d4aacd6074700e3c5945e785965f2ae51
Author: Noriko Hosoi <[email protected]>
Date: Thu Aug 7 15:05:56 2014 -0700
Ticket 47838 - CI test: add test case for ticket 47838
Description: harden the list of ciphers available by default
Test Case 1 - Check the ciphers availability for "+all"
Test Case 2 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha"
Test Case 3 - Check the ciphers availability for "-all"
Test Case 4 - Check no nssSSL3Chiphers (default setting)
Test Case 5 - Check default nssSSL3Chiphers (default setting)
Test Case 6 - Check nssSSL3Chiphers: +all,-rsa_rc4_128_md5
Test Case 7 - Check nssSSL3Chiphers: -all,+rsa_rc4_128_md5
Test Case 8 - Check no nssSSL3Chiphers (default setting) with no errorlog-level
Test Case 9 - Check nssSSL3Chiphers: long list using the NSS Cipher Suite name
diff --git a/dirsrvtests/tickets/ticket47838_test.py b/dirsrvtests/tickets/ticket47838_test.py
new file mode 100644
index 000000000..dedd61d93
--- /dev/null
+++ b/dirsrvtests/tickets/ticket47838_test.py
@@ -0,0 +1,552 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import socket
+import pytest
+import shutil
+from lib389 import DirSrv, Entry, tools
+from lib389 import DirSrvTools
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from constants import *
+
+log = logging.getLogger(__name__)
+
+installation_prefix = None
+
+CONFIG_DN = 'cn=config'
+ENCRYPTION_DN = 'cn=encryption,%s' % CONFIG_DN
+RSA = 'RSA'
+RSA_DN = 'cn=%s,%s' % (RSA, ENCRYPTION_DN)
+LDAPSPORT = '10636'
+SERVERCERT = 'Server-Cert'
+plus_all_ecount = 0
+plus_all_dcount = 0
+
+class TopologyStandalone(object):
+ def __init__(self, standalone):
+ standalone.open()
+ self.standalone = standalone
+
[email protected](scope="module")
+def topology(request):
+ '''
+ This fixture is used to standalone topology for the 'module'.
+ At the beginning, It may exists a standalone instance.
+ It may also exists a backup for the standalone instance.
+
+ Principle:
+ If standalone instance exists:
+ restart it
+ If backup of standalone exists:
+ create/rebind to standalone
+
+ restore standalone instance from backup
+ else:
+ Cleanup everything
+ remove instance
+ remove backup
+ Create instance
+ Create backup
+ '''
+ global installation_prefix
+
+ if installation_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation_prefix
+
+ standalone = DirSrv(verbose=False)
+
+ # Args for the standalone instance
+ args_instance[SER_HOST] = HOST_STANDALONE
+ args_instance[SER_PORT] = PORT_STANDALONE
+ args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ args_standalone = args_instance.copy()
+ standalone.allocate(args_standalone)
+
+ # Get the status of the backups
+ backup_standalone = standalone.checkBackupFS()
+
+ # Get the status of the instance and restart it if it exists
+ instance_standalone = standalone.exists()
+ if instance_standalone:
+ # assuming the instance is already stopped, just wait 5 sec max
+ standalone.stop(timeout=5)
+ try:
+ standalone.start(timeout=10)
+ except ldap.SERVER_DOWN:
+ pass
+
+ if backup_standalone:
+ # The backup exist, assuming it is correct
+ # we just re-init the instance with it
+ if not instance_standalone:
+ standalone.create()
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # restore standalone instance from backup
+ standalone.stop(timeout=10)
+ standalone.restoreFS(backup_standalone)
+ standalone.start(timeout=10)
+
+ else:
+ # We should be here only in two conditions
+ # - This is the first time a test involve standalone instance
+ # - Something weird happened (instance/backup destroyed)
+ # so we discard everything and recreate all
+
+ # Remove the backup. So even if we have a specific backup file
+ # (e.g backup_standalone) we clear backup that an instance may have created
+ if backup_standalone:
+ standalone.clearBackupFS()
+
+ # Remove the instance
+ if instance_standalone:
+ standalone.delete()
+
+ # Create the instance
+ standalone.create()
+
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # Time to create the backups
+ standalone.stop(timeout=10)
+ standalone.backupfile = standalone.backupFS()
+ standalone.start(timeout=10)
+
+ # clear the tmp directory
+ standalone.clearTmpDir(__file__)
+
+ #
+ # Here we have standalone instance up and running
+ # Either coming from a backup recovery
+ # or from a fresh (re)init
+ # Time to return the topology
+ return TopologyStandalone(standalone)
+
+def _header(topology, label):
+ topology.standalone.log.info("\n\n###############################################")
+ topology.standalone.log.info("#######")
+ topology.standalone.log.info("####### %s" % label)
+ topology.standalone.log.info("#######")
+ topology.standalone.log.info("###############################################")
+
+def test_ticket47838_init(topology):
+ """
+ Generate self signed cert and import it to the DS cert db.
+ Enable SSL
+ """
+ _header(topology, 'Testing Ticket 47838 - harden the list of ciphers available by default')
+
+ conf_dir = topology.standalone.confdir
+
+ log.info("\n######################### Checking existing certs ######################\n")
+ os.system('certutil -L -d %s -n "CA certificate"' % conf_dir)
+ os.system('certutil -L -d %s -n "%s"' % (conf_dir, SERVERCERT))
+
+ log.info("\n######################### Create a password file ######################\n")
+ pwdfile = '%s/pwdfile.txt' % (conf_dir)
+ opasswd = os.popen("(ps -ef ; w ) | sha1sum | awk '{print $1}'", "r")
+ passwd = opasswd.readline()
+ pwdfd = open(pwdfile, "w")
+ pwdfd.write(passwd)
+ pwdfd.close()
+
+ log.info("\n######################### Create a noise file ######################\n")
+ noisefile = '%s/noise.txt' % (conf_dir)
+ noise = os.popen("(w ; ps -ef ; date ) | sha1sum | awk '{print $1}'", "r")
+ noisewdfd = open(noisefile, "w")
+ noisewdfd.write(noise.readline())
+ noisewdfd.close()
+
+ log.info("\n######################### Create key3.db and cert8.db database ######################\n")
+ os.system("ls %s" % pwdfile)
+ os.system("cat %s" % pwdfile)
+ os.system('certutil -N -d %s -f %s' % (conf_dir, pwdfile))
+
+ log.info("\n######################### Creating encryption key for CA ######################\n")
+ os.system('certutil -G -d %s -z %s -f %s' % (conf_dir, noisefile, pwdfile))
+
+ log.info("\n######################### Creating self-signed CA certificate ######################\n")
+ os.system('( echo y ; echo ; echo y ) | certutil -S -n "CA certificate" -s "cn=CAcert" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' % (conf_dir, noisefile, pwdfile))
+
+ log.info("\n######################### Exporting the CA certificate to cacert.asc ######################\n")
+ cafile = '%s/cacert.asc' % conf_dir
+ catxt = os.popen('certutil -L -d %s -n "CA certificate" -a' % conf_dir)
+ cafd = open(cafile, "w")
+ while True:
+ line = catxt.readline()
+ if (line == ''):
+ break
+ cafd.write(line)
+ cafd.close()
+
+ log.info("\n######################### Generate the server certificate ######################\n")
+ ohostname = os.popen('hostname --fqdn', "r")
+ myhostname = ohostname.readline()
+ os.system('certutil -S -n "%s" -s "cn=%s,ou=389 Directory Server" -c "CA certificate" -t "u,u,u" -m 1001 -v 120 -d %s -z %s -f %s' % (SERVERCERT, myhostname.rstrip(), conf_dir, noisefile, pwdfile))
+
+ log.info("\n######################### create the pin file ######################\n")
+ pinfile = '%s/pin.txt' % (conf_dir)
+ pintxt = 'Internal (Software) Token:%s' % passwd
+ pinfd = open(pinfile, "w")
+ pinfd.write(pintxt)
+ pinfd.close()
+
+ log.info("\n######################### enable SSL in the directory server with all ciphers ######################\n")
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', 'on'),
+ (ldap.MOD_REPLACE, 'nsSSLClientAuth', 'allowed'),
+ (ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all')])
+
+ topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-security', 'on'),
+ (ldap.MOD_REPLACE, 'nsslapd-ssl-check-hostname', 'off'),
+ (ldap.MOD_REPLACE, 'nsslapd-secureport', LDAPSPORT)])
+
+ topology.standalone.add_s(Entry((RSA_DN, {'objectclass': "top nsEncryptionModule".split(),
+ 'cn': RSA,
+ 'nsSSLPersonalitySSL': SERVERCERT,
+ 'nsSSLToken': 'internal (software)',
+ 'nsSSLActivation': 'on'})))
+
+def test_ticket47838_run_0(topology):
+ """
+ Check nsSSL3Ciphers: +all
+ All ciphers are enabled except null.
+ """
+ _header(topology, 'Test Case 1 - Check the ciphers availability for "+all"')
+
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')])
+
+ log.info("\n######################### Restarting the server ######################\n")
+ topology.standalone.restart(timeout=120)
+
+ enabled = os.popen('egrep "SSL alert:" %s | egrep enabled | wc -l' % topology.standalone.errlog)
+ disabled = os.popen('egrep "SSL alert:" %s | egrep disabled | wc -l' % topology.standalone.errlog)
+ ecount = int(enabled.readline().rstrip())
+ dcount = int(disabled.readline().rstrip())
+
+ log.info("Enabled ciphers: %d" % ecount)
+ log.info("Disabled ciphers: %d" % dcount)
+ assert ecount >= 60
+ assert dcount <= 7
+ global plus_all_ecount
+ global plus_all_dcount
+ plus_all_ecount = ecount
+ plus_all_dcount = dcount
+ weak = os.popen('egrep "SSL alert:" %s | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog)
+ wcount = int(weak.readline().rstrip())
+ log.info("Weak ciphers: %d" % wcount)
+ assert wcount <= 29
+
+def test_ticket47838_run_1(topology):
+ """
+ Check nsSSL3Ciphers: +rsa_aes_128_sha,+rsa_aes_256_sha
+ rsa_aes_128_sha, tls_rsa_aes_128_sha, rsa_aes_256_sha, tls_rsa_aes_256_sha are enabled.
+ """
+ _header(topology, 'Test Case 2 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha"')
+
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+rsa_aes_128_sha,+rsa_aes_256_sha')])
+
+ log.info("\n######################### Restarting the server ######################\n")
+ topology.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_0' % (topology.standalone.errlog, topology.standalone.errlog))
+ os.system('touch %s' % (topology.standalone.errlog))
+ topology.standalone.start(timeout=120)
+
+ enabled = os.popen('egrep "SSL alert:" %s | egrep enabled | wc -l' % topology.standalone.errlog)
+ disabled = os.popen('egrep "SSL alert:" %s | egrep disabled | wc -l' % topology.standalone.errlog)
+ ecount = int(enabled.readline().rstrip())
+ dcount = int(disabled.readline().rstrip())
+
+ log.info("Enabled ciphers: %d" % ecount)
+ log.info("Disabled ciphers: %d" % dcount)
+ global plus_all_ecount
+ global plus_all_dcount
+ assert ecount == 2
+ assert dcount == (plus_all_ecount + plus_all_dcount - ecount)
+
+def test_ticket47838_run_2(topology):
+ """
+ Check nsSSL3Ciphers: -all
+ All ciphers are disabled.
+ """
+ _header(topology, 'Test Case 3 - Check the ciphers availability for "-all"')
+
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all')])
+
+ log.info("\n######################### Restarting the server ######################\n")
+ topology.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_1' % (topology.standalone.errlog, topology.standalone.errlog))
+ os.system('touch %s' % (topology.standalone.errlog))
+ topology.standalone.start(timeout=120)
+
+ enabled = os.popen('egrep "SSL alert:" %s | egrep enabled | wc -l' % topology.standalone.errlog)
+ disabled = os.popen('egrep "SSL alert:" %s | egrep disabled | wc -l' % topology.standalone.errlog)
+ ecount = int(enabled.readline().rstrip())
+ dcount = int(disabled.readline().rstrip())
+
+ log.info("Enabled ciphers: %d" % ecount)
+ log.info("Disabled ciphers: %d" % dcount)
+ global plus_all_ecount
+ global plus_all_dcount
+ assert ecount == 0
+ assert dcount == (plus_all_ecount + plus_all_dcount)
+
+def test_ticket47838_run_3(topology):
+ """
+ Check no nsSSL3Ciphers
+ Default ciphers are enabled.
+ """
+ _header(topology, 'Test Case 4 - Check no nssSSL3Chiphers (default setting)')
+
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3Ciphers', '-all')])
+
+ log.info("\n######################### Restarting the server ######################\n")
+ topology.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_2' % (topology.standalone.errlog, topology.standalone.errlog))
+ os.system('touch %s' % (topology.standalone.errlog))
+ topology.standalone.start(timeout=120)
+
+ enabled = os.popen('egrep "SSL alert:" %s | egrep enabled | wc -l' % topology.standalone.errlog)
+ disabled = os.popen('egrep "SSL alert:" %s | egrep disabled | wc -l' % topology.standalone.errlog)
+ ecount = int(enabled.readline().rstrip())
+ dcount = int(disabled.readline().rstrip())
+
+ log.info("Enabled ciphers: %d" % ecount)
+ log.info("Disabled ciphers: %d" % dcount)
+ global plus_all_ecount
+ global plus_all_dcount
+ assert ecount == 12
+ assert dcount == (plus_all_ecount + plus_all_dcount - ecount)
+ weak = os.popen('egrep "SSL alert:" %s | egrep enabled | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog)
+ wcount = int(weak.readline().rstrip())
+ log.info("Weak ciphers in the default setting: %d" % wcount)
+ assert wcount == 0
+
+def test_ticket47838_run_4(topology):
+ """
+ Check nsSSL3Ciphers: default
+ Default ciphers are enabled.
+ """
+ _header(topology, 'Test Case 5 - Check default nssSSL3Chiphers (default setting)')
+
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default')])
+
+ log.info("\n######################### Restarting the server ######################\n")
+ topology.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_3' % (topology.standalone.errlog, topology.standalone.errlog))
+ os.system('touch %s' % (topology.standalone.errlog))
+ topology.standalone.start(timeout=120)
+
+ enabled = os.popen('egrep "SSL alert:" %s | egrep enabled | wc -l' % topology.standalone.errlog)
+ disabled = os.popen('egrep "SSL alert:" %s | egrep disabled | wc -l' % topology.standalone.errlog)
+ ecount = int(enabled.readline().rstrip())
+ dcount = int(disabled.readline().rstrip())
+
+ log.info("Enabled ciphers: %d" % ecount)
+ log.info("Disabled ciphers: %d" % dcount)
+ global plus_all_ecount
+ global plus_all_dcount
+ assert ecount == 12
+ assert dcount == (plus_all_ecount + plus_all_dcount - ecount)
+ weak = os.popen('egrep "SSL alert:" %s | egrep enabled | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog)
+ wcount = int(weak.readline().rstrip())
+ log.info("Weak ciphers in the default setting: %d" % wcount)
+ assert wcount == 0
+
+def test_ticket47838_run_5(topology):
+ """
+ Check nssSSL3Chiphers: +all,-rsa_rc4_128_md5
+ All ciphers are disabled.
+ """
+ _header(topology, 'Test Case 6 - Check nssSSL3Chiphers: +all,-rsa_rc4_128_md5')
+
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all,-rsa_rc4_128_md5')])
+
+ log.info("\n######################### Restarting the server ######################\n")
+ topology.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_4' % (topology.standalone.errlog, topology.standalone.errlog))
+ os.system('touch %s' % (topology.standalone.errlog))
+ topology.standalone.start(timeout=120)
+
+ enabled = os.popen('egrep "SSL alert:" %s | egrep enabled | wc -l' % topology.standalone.errlog)
+ disabled = os.popen('egrep "SSL alert:" %s | egrep disabled | wc -l' % topology.standalone.errlog)
+ ecount = int(enabled.readline().rstrip())
+ dcount = int(disabled.readline().rstrip())
+
+ log.info("Enabled ciphers: %d" % ecount)
+ log.info("Disabled ciphers: %d" % dcount)
+ global plus_all_ecount
+ global plus_all_dcount
+ assert ecount == (plus_all_ecount - 1)
+ assert dcount == (plus_all_dcount + 1)
+
+def test_ticket47838_run_6(topology):
+ """
+ Check nssSSL3Chiphers: -all,+rsa_rc4_128_md5
+ All ciphers are disabled.
+ """
+ _header(topology, 'Test Case 7 - Check nssSSL3Chiphers: -all,+rsa_rc4_128_md5')
+
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all,+rsa_rc4_128_md5')])
+
+ log.info("\n######################### Restarting the server ######################\n")
+ topology.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_5' % (topology.standalone.errlog, topology.standalone.errlog))
+ os.system('touch %s' % (topology.standalone.errlog))
+ topology.standalone.start(timeout=120)
+
+ enabled = os.popen('egrep "SSL alert:" %s | egrep enabled | wc -l' % topology.standalone.errlog)
+ disabled = os.popen('egrep "SSL alert:" %s | egrep disabled | wc -l' % topology.standalone.errlog)
+ ecount = int(enabled.readline().rstrip())
+ dcount = int(disabled.readline().rstrip())
+
+ log.info("Enabled ciphers: %d" % ecount)
+ log.info("Disabled ciphers: %d" % dcount)
+ global plus_all_ecount
+ global plus_all_dcount
+ assert ecount == 1
+ assert dcount == (plus_all_ecount + plus_all_dcount - ecount)
+
+def test_ticket47838_run_7(topology):
+ """
+ Check no nsSSL3Ciphers
+ Default ciphers are enabled.
+ """
+ _header(topology, 'Test Case 8 - Check no nssSSL3Chiphers (default setting) with no errorlog-level')
+
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', None)])
+ topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)])
+
+ log.info("\n######################### Restarting the server ######################\n")
+ topology.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_6' % (topology.standalone.errlog, topology.standalone.errlog))
+ os.system('touch %s' % (topology.standalone.errlog))
+ topology.standalone.start(timeout=120)
+
+ enabled = os.popen('egrep "SSL alert:" %s | egrep enabled | wc -l' % topology.standalone.errlog)
+ disabled = os.popen('egrep "SSL alert:" %s | egrep disabled | wc -l' % topology.standalone.errlog)
+ ecount = int(enabled.readline().rstrip())
+ dcount = int(disabled.readline().rstrip())
+
+ log.info("Enabled ciphers: %d" % ecount)
+ log.info("Disabled ciphers: %d" % dcount)
+ assert ecount == 12
+ assert dcount == 0
+ weak = os.popen('egrep "SSL alert:" %s | egrep enabled | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog)
+ wcount = int(weak.readline().rstrip())
+ log.info("Weak ciphers in the default setting: %d" % wcount)
+ assert wcount == 0
+
+def test_ticket47838_run_8(topology):
+ """
+ Check nssSSL3Chiphers: -TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,
+ +TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
+ +TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA,
+ +TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,
+ +TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,
+ -SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,
+ -SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,
+ -SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5
+ """
+ _header(topology, 'Test Case 9 - Check nssSSL3Chiphers: long list using the NSS Cipher Suite name')
+
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers',
+ '-TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,+TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,+TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA,+TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,+TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5')])
+
+ log.info("\n######################### Restarting the server ######################\n")
+ topology.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_7' % (topology.standalone.errlog, topology.standalone.errlog))
+ os.system('touch %s' % (topology.standalone.errlog))
+ topology.standalone.start(timeout=120)
+
+ enabled = os.popen('egrep "SSL alert:" %s | egrep enabled | wc -l' % topology.standalone.errlog)
+ disabled = os.popen('egrep "SSL alert:" %s | egrep disabled | wc -l' % topology.standalone.errlog)
+ ecount = int(enabled.readline().rstrip())
+ dcount = int(disabled.readline().rstrip())
+
+ log.info("Enabled ciphers: %d" % ecount)
+ log.info("Disabled ciphers: %d" % dcount)
+ global plus_all_ecount
+ global plus_all_dcount
+ assert ecount == 9
+ assert dcount == 0
+ weak = os.popen('egrep "SSL alert:" %s | egrep enabled | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog)
+ wcount = int(weak.readline().rstrip())
+ log.info("Weak ciphers in the default setting: %d" % wcount)
+
+def test_ticket47838_run_9(topology):
+ """
+ NOTE: Currently, this test case is commented out since if the server fails to start,
+ it repeatedly restarted.
+ Check nssSSL3Chiphers: all <== invalid value
+ All ciphers are disabled.
+ """
+ _header(topology, 'Test Case 10 - Check nssSSL3Chiphers: all, which is invalid')
+
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'all')])
+
+ log.info("\n######################### Restarting the server ######################\n")
+ topology.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_7' % (topology.standalone.errlog, topology.standalone.errlog))
+ os.system('touch %s' % (topology.standalone.errlog))
+ topology.standalone.start(timeout=120)
+
+ errmsg = os.popen('egrep "SSL alert:" %s | egrep "invalid ciphers"' % topology.standalone.errlog)
+ if errmsg != "":
+ log.info("Expected error message:")
+ log.info("%s" % errmsg)
+ else:
+ log.info("Expected error message was not found")
+ assert False
+
+ topology.standalone.log.info("ticket47838 was successfully verified.");
+
+def test_ticket47838_final(topology):
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology.standalone.stop(timeout=10)
+
+def run_isolated():
+ '''
+ run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
+ To run isolated without py.test, you need to
+ - edit this file and comment '@pytest.fixture' line before 'topology' function.
+ - set the installation prefix
+ - run this program
+ '''
+ global installation_prefix
+ installation_prefix = None
+
+ topo = topology(True)
+ test_ticket47838_init(topo)
+
+ test_ticket47838_run_0(topo)
+ test_ticket47838_run_1(topo)
+ test_ticket47838_run_2(topo)
+ test_ticket47838_run_3(topo)
+ test_ticket47838_run_4(topo)
+ test_ticket47838_run_5(topo)
+ test_ticket47838_run_6(topo)
+ test_ticket47838_run_7(topo)
+ test_ticket47838_run_8(topo)
+ # test_ticket47838_run_9(topo)
+
+ test_ticket47838_final(topo)
+
+if __name__ == '__main__':
+ run_isolated()
| 0 |
19c62472381cf72bc7bd20694bbd672edde7f94f
|
389ds/389-ds-base
|
bug 614511 - fix coverity null reference - revert macro aci $dn logic
instead of changing the logic to catch matched_val == NULL, make the code
work like it does in earlier releases, but add an explicit check for
matched_val == NULL to avoid using it.
|
commit 19c62472381cf72bc7bd20694bbd672edde7f94f
Author: Rich Megginson <[email protected]>
Date: Wed Sep 1 15:08:35 2010 -0600
bug 614511 - fix coverity null reference - revert macro aci $dn logic
instead of changing the logic to catch matched_val == NULL, make the code
work like it does in earlier releases, but add an explicit check for
matched_val == NULL to avoid using it.
diff --git a/ldap/servers/plugins/acl/acllas.c b/ldap/servers/plugins/acl/acllas.c
index c510eb1e2..a41487e32 100644
--- a/ldap/servers/plugins/acl/acllas.c
+++ b/ldap/servers/plugins/acl/acllas.c
@@ -4178,8 +4178,8 @@ acllas_replace_dn_macro( char *rule, char *matched_val, lasInfo *lasinfo) {
has_macro_levels = 1;
}
- if ( !has_macro_dn ) { /* No $(dn) */
-
+ if ( (!has_macro_dn && !has_macro_levels) || !matched_val ) { /* No ($dn) and no [$dn] ... */
+ /* ... or no value to replace */
/*
* No $dn thing, just return a list with two elements, rule and NULL.
* charray_add will create the list and null terminate it.
@@ -4194,9 +4194,11 @@ acllas_replace_dn_macro( char *rule, char *matched_val, lasInfo *lasinfo) {
*
* First, replace all occurrencers of ($dn) with the matched_val
*/
- patched_rule =
+ if ( has_macro_dn) {
+ patched_rule =
acl_replace_str(rule, ACL_RULE_MACRO_DN_KEY, matched_val);
-
+ }
+
/* If there are no [$dn] we're done */
if ( !has_macro_levels ) {
| 0 |
68e766891d1067b2b86bd48bfa16d0837fa5a737
|
389ds/389-ds-base
|
Ticket 47817 - The error result text message should be obtained just prior to sending result
Bug Description: We do not get pblock result message after the betxn postop plugins
are called. If a betxn plugin modified the ldap result message, then
when the backend returns the results it does an invalid read on the
old pointer of the old result message.
Fix Description: Get the result message after we call the betxn postop plugins.
https://fedorahosted.org/389/ticket/47817
jenkins: passed
Reviewed by: nhosoi(Thanks!)
|
commit 68e766891d1067b2b86bd48bfa16d0837fa5a737
Author: Mark Reynolds <[email protected]>
Date: Tue Jun 17 17:58:54 2014 -0400
Ticket 47817 - The error result text message should be obtained just prior to sending result
Bug Description: We do not get pblock result message after the betxn postop plugins
are called. If a betxn plugin modified the ldap result message, then
when the backend returns the results it does an invalid read on the
old pointer of the old result message.
Fix Description: Get the result message after we call the betxn postop plugins.
https://fedorahosted.org/389/ticket/47817
jenkins: passed
Reviewed by: nhosoi(Thanks!)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
index 181c17378..63fccb1ff 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
@@ -94,8 +94,8 @@ ldbm_back_add( Slapi_PBlock *pb )
char *msg;
int managedsait;
int ldap_result_code = LDAP_SUCCESS;
- char *ldap_result_message= NULL;
- char *ldap_result_matcheddn= NULL;
+ char *ldap_result_message = NULL;
+ char *ldap_result_matcheddn = NULL;
int retry_count = 0;
int disk_full = 0;
modify_context parent_modify_c = {0};
@@ -1227,6 +1227,8 @@ diskfull_return:
/* tell frontend not to free this entry */
slapi_pblock_set(pb, SLAPI_ADD_ENTRY, NULL);
}
+
+ slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message);
}
/* Release SERIAL LOCK */
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
index 915ea0889..39fe70613 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
@@ -75,7 +75,7 @@ ldbm_back_delete( Slapi_PBlock *pb )
modify_context ruv_c = {0};
int rc = 0;
int ldap_result_code= LDAP_SUCCESS;
- char *ldap_result_message= NULL;
+ char *ldap_result_message = NULL;
Slapi_DN *sdnp = NULL;
char *e_uniqueid = NULL;
Slapi_DN nscpEntrySDN;
@@ -1331,6 +1331,7 @@ error_return:
if (!opreturn) {
slapi_pblock_set( pb, SLAPI_PLUGIN_OPRETURN, ldap_result_code ? &ldap_result_code : &retval );
}
+ slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message);
}
/* Release SERIAL LOCK */
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
index d2dc983e6..8791b45e3 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
@@ -901,6 +901,7 @@ error_return:
LDAPDebug1Arg( LDAP_DEBUG_TRACE, "SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN plugin "
"returned error code %d\n", retval );
slapi_pblock_get(pb, SLAPI_RESULT_CODE, &ldap_result_code);
+ slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message);
slapi_pblock_get(pb, SLAPI_PLUGIN_OPRETURN, &opreturn);
if (!opreturn) {
slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, ldap_result_code ? &ldap_result_code : &retval);
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
index 472c2f7dd..09a1fea0b 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
@@ -1430,6 +1430,7 @@ error_return:
if (!opreturn) {
slapi_pblock_set( pb, SLAPI_PLUGIN_OPRETURN, ldap_result_code ? &ldap_result_code : &retval );
}
+ slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message);
}
/* Release SERIAL LOCK */
| 0 |
9974af9d528ba99b8ce8552411eaae2b0cb862c3
|
389ds/389-ds-base
|
Ticket 47855 - Add function to clear tmp directory
Reviewed by: ?
|
commit 9974af9d528ba99b8ce8552411eaae2b0cb862c3
Author: Mark Reynolds <[email protected]>
Date: Mon Jul 14 12:40:06 2014 -0400
Ticket 47855 - Add function to clear tmp directory
Reviewed by: ?
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 702fdd573..5eef20660 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -1928,3 +1928,22 @@ class DirSrv(SimpleLDAPObject):
return None
+ def clearTmpDir(self, filename):
+ """
+ @param filename - the name of the test script calling this function
+ @return - nothing
+
+ Clear the contents of the "tmp" dir, but leave the README file in place.
+ """
+ if os.path.exists(filename):
+ script_name = os.path.basename(filename)
+ if script_name:
+ dir_path = os.path.abspath(filename).replace('tickets/' + script_name, 'tmp/')
+ if dir_path:
+ filelist = [ tmpfile for tmpfile in os.listdir(dir_path) if tmpfile != 'README' ]
+ for tmpfile in filelist:
+ os.remove(os.path.abspath(dir_path + tmpfile))
+ return
+
+ log.fatal('Failed to clear tmp directory (%s)' % filename)
+
| 0 |
bea97a2d7b6437419cc5649645f0c58b6966a46e
|
389ds/389-ds-base
|
Fix attrcrypt usage of nsSymmetricKey
The current attrcrypt is failing because it attempts to store the encryption
symkey in the nsSymmetricKey attribute. This attribute is not defined in the
schema, so it defaults to DirectoryString syntax. Storing the value then fails
syntax validation because the binary values in the key do not conform to
DirectoryString. The code was poorly designed to handle and report errors of
this nature. The real fix is to add nsSymmetricKey as a BINARY syntax
attribute. I also cleaned up the error detection and reporting for this case.
Reviewed by: nkinder (Thanks!)
|
commit bea97a2d7b6437419cc5649645f0c58b6966a46e
Author: Rich Megginson <[email protected]>
Date: Tue Jul 14 12:50:36 2009 -0600
Fix attrcrypt usage of nsSymmetricKey
The current attrcrypt is failing because it attempts to store the encryption
symkey in the nsSymmetricKey attribute. This attribute is not defined in the
schema, so it defaults to DirectoryString syntax. Storing the value then fails
syntax validation because the binary values in the key do not conform to
DirectoryString. The code was poorly designed to handle and report errors of
this nature. The real fix is to add nsSymmetricKey as a BINARY syntax
attribute. I also cleaned up the error detection and reporting for this case.
Reviewed by: nkinder (Thanks!)
diff --git a/ldap/schema/50ns-directory.ldif b/ldap/schema/50ns-directory.ldif
index 00cced97a..943938dae 100644
--- a/ldap/schema/50ns-directory.ldif
+++ b/ldap/schema/50ns-directory.ldif
@@ -110,6 +110,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.54 NAME 'replicaUseSSL' DESC 'Netscape d
attributeTypes: ( 2.16.840.1.113730.3.1.57 NAME 'replicaRoot' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.58 NAME 'replicaBindDn' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.69 NAME 'subtreeACI' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 X-ORIGIN 'Netscape Directory Server 1.0' )
+attributeTypes: ( 2.16.840.1.113730.3.1.2084 NAME 'nsSymmetricKey' DESC 'A symmetric key - currently used by attribute encryption' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-ORIGIN 'attribute encryption' )
objectClasses: ( 2.16.840.1.113730.3.2.23 NAME 'netscapeDirectoryServer' DESC 'Netscape defined objectclass' SUP top MUST ( objectclass ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( nsDirectoryServer-oid NAME 'nsDirectoryServer' DESC 'Netscape defined objectclass' SUP top MUST ( objectclass $ nsServerID ) MAY ( serverHostName $ nsServerPort $ nsSecureServerPort $ nsBindPassword $ nsBindDN $ nsBaseDN ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.8 NAME 'ntUser' DESC 'Netscape defined objectclass' SUP top MUST ( ntUserDomainId ) MAY ( description $ l $ ou $ seeAlso $ ntUserPriv $ ntUserHomeDir $ ntUserComment $ ntUserFlags $ ntUserScriptPath $ ntUserAuthFlags $ ntUserUsrComment $ ntUserParms $ ntUserWorkstations $ ntUserLastLogon $ ntUserLastLogoff $ ntUserAcctExpires $ ntUserMaxStorage $ ntUserUnitsPerWeek $ ntUserLogonHours $ ntUserBadPwCount $ ntUserNumLogons $ ntUserLogonServer $ ntUserCountryCode $ ntUserCodePage $ ntUserUniqueId $ ntUserPrimaryGroupId $ ntUserProfile $ ntUserHomeDirDrive $ ntUserPasswordExpired $ ntUserCreateNewAccount $ ntUserDeleteAccount $ ntUniqueId) X-ORIGIN 'Netscape NT Synchronization' )
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index 4eeb93c1d..42642b24d 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -1837,7 +1837,12 @@ int dblayer_instance_start(backend *be, int mode)
return 0;
}
- attrcrypt_init(inst);
+ if (attrcrypt_init(inst)) {
+ LDAPDebug(LDAP_DEBUG_ANY,
+ "Error: unable to initialize attrcrypt system for %s\n",
+ inst->inst_name, 0, 0);
+ return -1;
+ }
/* Get the name of the directory that holds index files
* for this instance. */
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c b/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c
index a37c0bad0..763bd2c94 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c
@@ -209,9 +209,14 @@ attrcrypt_keymgmt_store_key(ldbm_instance *li, attrcrypt_cipher_state *acs, SECK
slapi_value_free(&key_value);
/* Store the entry */
slapi_add_entry_internal_set_pb(pb, e, NULL, li->inst_li->li_identity, 0);
- if ((rc = slapi_add_internal_pb(pb)) != LDAP_SUCCESS) {
- LDAPDebug(LDAP_DEBUG_ANY, "attrcrypt_keymgmt_store_key: failed to add config key entries to the DSE: %d\n", rc, 0, 0);
- }
+ rc = slapi_add_internal_pb(pb);
+ slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
+ if (rc != LDAP_SUCCESS) {
+ char *resulttext = NULL;
+ slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &resulttext);
+ LDAPDebug(LDAP_DEBUG_ANY, "attrcrypt_keymgmt_store_key: failed to add config key entries to the DSE: %d: %s: %s\n", rc, ldap_err2string(rc), resulttext ? resulttext : "unknown");
+ ret = -1;
+ }
if (entry_string) {
slapi_ch_free((void**)&entry_string);
}
@@ -542,7 +547,7 @@ attrcrypt_get_acs(backend *be, attrcrypt_private *priv)
#if defined(DEBUG_ATTRCRYPT)
static void log_bytes(char* format_string, unsigned char *bytes, size_t length)
{
- size_t max_length = 20;
+ size_t max_length = 40;
size_t truncated_length = (length > max_length) ? max_length : length;
size_t x = 0;
char *print_buffer = NULL;
@@ -586,7 +591,7 @@ attrcrypt_crypto_op(attrcrypt_private *priv, backend *be, struct attrinfo *ai, c
if (encrypt) {
LDAPDebug(LDAP_DEBUG_ANY,"attrcrypt_crypto_op encrypt '%s' (%d)\n", in_data, in_size, 0);
} else {
- log_bytes("attrcrypt_crypto_op decrypt '%s' (%d)\n", in_data, in_size);
+ log_bytes("attrcrypt_crypto_op decrypt '%s' (%d)\n", (unsigned char *)in_data, in_size);
}
#endif
/* Allocate the output buffer */
@@ -623,15 +628,35 @@ attrcrypt_crypto_op(attrcrypt_private *priv, backend *be, struct attrinfo *ai, c
goto error;
} else {
#if defined(DEBUG_ATTRCRYPT)
+ int recurse = 1;
if (encrypt) {
log_bytes("slapd_pk11_DigestFinal '%s' (%d)\n", output_buffer, output_buffer_size1 + output_buffer_size2);
} else {
LDAPDebug(LDAP_DEBUG_ANY,"slapd_pk11_DigestFinal '%s', %u\n", output_buffer, output_buffer_size2, 0);
}
+ if (*out_size == -1) {
+ recurse = 0;
+ }
#endif
*out_size = output_buffer_size1 + output_buffer_size2;
*out_data = (char *)output_buffer;
ret = 0; /* success */
+#if defined(DEBUG_ATTRCRYPT)
+ if (recurse) {
+ char *redo_data = NULL;
+ size_t redo_size = -1;
+ int redo_ret;
+
+ LDAPDebug(LDAP_DEBUG_ANY,"------> check result of crypto op\n", 0, 0, 0);
+ redo_ret = attrcrypt_crypto_op(priv, be, ai, *out_data, *out_size, &redo_data, &redo_size, !encrypt);
+ slapi_log_error(SLAPI_LOG_FATAL, "DEBUG_ATTRCRYPT",
+ "orig length %ld redone length %ld\n", in_size, redo_size);
+ log_bytes("DEBUG_ATTRCRYPT orig bytes '%s' (%d)\n", (unsigned char *)in_data, in_size);
+ log_bytes("DEBUG_ATTRCRYPT redo bytes '%s' (%d)\n", (unsigned char *)redo_data, redo_size);
+
+ LDAPDebug(LDAP_DEBUG_ANY,"<------ check result of crypto op\n", 0, 0, 0);
+ }
+#endif
}
error:
if (sec_context) {
| 0 |
1d217feed941b97a9fac67a011f6e1b9dceeb266
|
389ds/389-ds-base
|
Ticket 48769 - Fix white space in extendedop.c
Bug Description: The addition of the plugin type added white space differences
Fix Description: This change extendop.c from hard tab to soft tab.
https://fedorahosted.org/389/ticket/48769
Author: wibrown
Review by: nhosoi (Thanks!)
|
commit 1d217feed941b97a9fac67a011f6e1b9dceeb266
Author: William Brown <[email protected]>
Date: Fri Mar 18 14:14:39 2016 +1000
Ticket 48769 - Fix white space in extendedop.c
Bug Description: The addition of the plugin type added white space differences
Fix Description: This change extendop.c from hard tab to soft tab.
https://fedorahosted.org/389/ticket/48769
Author: wibrown
Review by: nhosoi (Thanks!)
diff --git a/ldap/servers/slapd/extendop.c b/ldap/servers/slapd/extendop.c
index 840a8981a..50506a5fa 100644
--- a/ldap/servers/slapd/extendop.c
+++ b/ldap/servers/slapd/extendop.c
@@ -78,37 +78,37 @@ static void extop_handle_import_start(Slapi_PBlock *pb, char *extoid,
}
slapi_pblock_set(pb, SLAPI_BACKEND, be);
- slapi_pblock_set( pb, SLAPI_REQUESTOR_ISROOT, &pb->pb_op->o_isroot );
-
- {
- /* Access Control Check to see if the client is
- * allowed to use task import
- */
- char *dummyAttr = "dummy#attr";
- char *dummyAttrs[2] = { NULL, NULL };
- int rc = 0;
- char dn[128];
- Slapi_Entry *feature;
-
- /* slapi_str2entry modify its dn parameter so we must copy
- * this string each time we call it !
- */
- /* This dn is no need to be normalized. */
- PR_snprintf(dn, sizeof(dn), "dn: oid=%s,cn=features,cn=config",
- EXTOP_BULK_IMPORT_START_OID);
-
- dummyAttrs[0] = dummyAttr;
- feature = slapi_str2entry(dn, 0);
- rc = plugin_call_acl_plugin (pb, feature, dummyAttrs, NULL,
- SLAPI_ACL_WRITE, ACLPLUGIN_ACCESS_DEFAULT, NULL);
- slapi_entry_free(feature);
- if (rc != LDAP_SUCCESS)
- {
- /* Client isn't allowed to do this. */
- send_ldap_result(pb, rc, NULL, NULL, 0, NULL);
- goto out;
- }
- }
+ slapi_pblock_set( pb, SLAPI_REQUESTOR_ISROOT, &pb->pb_op->o_isroot );
+
+ {
+ /* Access Control Check to see if the client is
+ * allowed to use task import
+ */
+ char *dummyAttr = "dummy#attr";
+ char *dummyAttrs[2] = { NULL, NULL };
+ int rc = 0;
+ char dn[128];
+ Slapi_Entry *feature;
+
+ /* slapi_str2entry modify its dn parameter so we must copy
+ * this string each time we call it !
+ */
+ /* This dn is no need to be normalized. */
+ PR_snprintf(dn, sizeof(dn), "dn: oid=%s,cn=features,cn=config",
+ EXTOP_BULK_IMPORT_START_OID);
+
+ dummyAttrs[0] = dummyAttr;
+ feature = slapi_str2entry(dn, 0);
+ rc = plugin_call_acl_plugin (pb, feature, dummyAttrs, NULL,
+ SLAPI_ACL_WRITE, ACLPLUGIN_ACCESS_DEFAULT, NULL);
+ slapi_entry_free(feature);
+ if (rc != LDAP_SUCCESS)
+ {
+ /* Client isn't allowed to do this. */
+ send_ldap_result(pb, rc, NULL, NULL, 0, NULL);
+ goto out;
+ }
+ }
if (be->be_wire_import == NULL) {
/* not supported by this backend */
@@ -204,135 +204,135 @@ static void extop_handle_import_done(Slapi_PBlock *pb, char *extoid,
void
do_extended( Slapi_PBlock *pb )
{
- char *extoid = NULL, *errmsg;
- struct berval extval = {0};
- int lderr, rc;
- ber_len_t len;
- ber_tag_t tag;
- const char *name;
-
- LDAPDebug( LDAP_DEBUG_TRACE, "do_extended\n", 0, 0, 0 );
-
- /*
- * Parse the extended request. It looks like this:
- *
- * ExtendedRequest := [APPLICATION 23] SEQUENCE {
- * requestName [0] LDAPOID,
- * requestValue [1] OCTET STRING OPTIONAL
- * }
- */
-
- if ( ber_scanf( pb->pb_op->o_ber, "{a", &extoid )
- == LBER_ERROR ) {
- LDAPDebug( LDAP_DEBUG_ANY,
- "ber_scanf failed (op=extended; params=OID)\n",
- 0, 0, 0 );
- op_shared_log_error_access (pb, "EXT", "???", "decoding error: fail to get extension OID");
- send_ldap_result( pb, LDAP_PROTOCOL_ERROR, NULL, "decoding error", 0,
- NULL );
- goto free_and_return;
- }
- tag = ber_peek_tag(pb->pb_op->o_ber, &len);
-
- if (tag == LDAP_TAG_EXOP_REQ_VALUE) {
- if ( ber_scanf( pb->pb_op->o_ber, "o}", &extval ) == LBER_ERROR ) {
- op_shared_log_error_access (pb, "EXT", "???", "decoding error: fail to get extension value");
- send_ldap_result( pb, LDAP_PROTOCOL_ERROR, NULL, "decoding error", 0,
- NULL );
- goto free_and_return;
- }
- } else {
- if ( ber_scanf( pb->pb_op->o_ber, "}") == LBER_ERROR ) {
- op_shared_log_error_access (pb, "EXT", "???", "decoding error");
- send_ldap_result( pb, LDAP_PROTOCOL_ERROR, NULL, "decoding error", 0,
- NULL );
- goto free_and_return;
- }
- }
- if ( NULL == ( name = extended_op_oid2string( extoid ))) {
- LDAPDebug( LDAP_DEBUG_ARGS, "do_extended: oid (%s)\n", extoid, 0, 0 );
-
- slapi_log_access( LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d EXT oid=\"%s\"\n",
- pb->pb_conn->c_connid, pb->pb_op->o_opid, extoid );
- } else {
- LDAPDebug( LDAP_DEBUG_ARGS, "do_extended: oid (%s-%s)\n",
- extoid, name, 0 );
-
- slapi_log_access( LDAP_DEBUG_STATS,
- "conn=%" NSPRIu64 " op=%d EXT oid=\"%s\" name=\"%s\"\n",
- pb->pb_conn->c_connid, pb->pb_op->o_opid, extoid, name );
- }
-
- /* during a bulk import, only BULK_IMPORT_DONE is allowed!
- * (and this is the only time it's allowed)
- */
- if (pb->pb_conn->c_flags & CONN_FLAG_IMPORT) {
- if (strcmp(extoid, EXTOP_BULK_IMPORT_DONE_OID) != 0) {
- send_ldap_result(pb, LDAP_PROTOCOL_ERROR, NULL, NULL, 0, NULL);
- goto free_and_return;
- }
- extop_handle_import_done(pb, extoid, &extval);
- goto free_and_return;
- }
-
- if (strcmp(extoid, EXTOP_BULK_IMPORT_START_OID) == 0) {
- extop_handle_import_start(pb, extoid, &extval);
- goto free_and_return;
- }
-
- if (strcmp(extoid, START_TLS_OID) != 0) {
- int minssf = config_get_minssf();
-
- /* If anonymous access is disabled and we haven't
- * authenticated yet, only allow startTLS. */
- if ((config_get_anon_access_switch() != SLAPD_ANON_ACCESS_ON) && ((pb->pb_op->o_authtype == NULL) ||
- (strcasecmp(pb->pb_op->o_authtype, SLAPD_AUTH_NONE) == 0))) {
- send_ldap_result( pb, LDAP_INAPPROPRIATE_AUTH, NULL,
- "Anonymous access is not allowed.", 0, NULL );
- goto free_and_return;
- }
-
- /* If the minssf is not met, only allow startTLS. */
- if ((pb->pb_conn->c_sasl_ssf < minssf) && (pb->pb_conn->c_ssl_ssf < minssf) &&
- (pb->pb_conn->c_local_ssf < minssf)) {
- send_ldap_result( pb, LDAP_UNWILLING_TO_PERFORM, NULL,
- "Minimum SSF not met.", 0, NULL );
- goto free_and_return;
- }
- }
-
- /* If a password change is required, only allow the password
- * modify extended operation */
- if (!pb->pb_conn->c_isreplication_session &&
+ char *extoid = NULL, *errmsg;
+ struct berval extval = {0};
+ int lderr, rc;
+ ber_len_t len;
+ ber_tag_t tag;
+ const char *name;
+
+ LDAPDebug( LDAP_DEBUG_TRACE, "do_extended\n", 0, 0, 0 );
+
+ /*
+ * Parse the extended request. It looks like this:
+ *
+ * ExtendedRequest := [APPLICATION 23] SEQUENCE {
+ * requestName [0] LDAPOID,
+ * requestValue [1] OCTET STRING OPTIONAL
+ * }
+ */
+
+ if ( ber_scanf( pb->pb_op->o_ber, "{a", &extoid )
+ == LBER_ERROR ) {
+ LDAPDebug( LDAP_DEBUG_ANY,
+ "ber_scanf failed (op=extended; params=OID)\n",
+ 0, 0, 0 );
+ op_shared_log_error_access (pb, "EXT", "???", "decoding error: fail to get extension OID");
+ send_ldap_result( pb, LDAP_PROTOCOL_ERROR, NULL, "decoding error", 0,
+ NULL );
+ goto free_and_return;
+ }
+ tag = ber_peek_tag(pb->pb_op->o_ber, &len);
+
+ if (tag == LDAP_TAG_EXOP_REQ_VALUE) {
+ if ( ber_scanf( pb->pb_op->o_ber, "o}", &extval ) == LBER_ERROR ) {
+ op_shared_log_error_access (pb, "EXT", "???", "decoding error: fail to get extension value");
+ send_ldap_result( pb, LDAP_PROTOCOL_ERROR, NULL, "decoding error", 0,
+ NULL );
+ goto free_and_return;
+ }
+ } else {
+ if ( ber_scanf( pb->pb_op->o_ber, "}") == LBER_ERROR ) {
+ op_shared_log_error_access (pb, "EXT", "???", "decoding error");
+ send_ldap_result( pb, LDAP_PROTOCOL_ERROR, NULL, "decoding error", 0,
+ NULL );
+ goto free_and_return;
+ }
+ }
+ if ( NULL == ( name = extended_op_oid2string( extoid ))) {
+ LDAPDebug( LDAP_DEBUG_ARGS, "do_extended: oid (%s)\n", extoid, 0, 0 );
+
+ slapi_log_access( LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d EXT oid=\"%s\"\n",
+ pb->pb_conn->c_connid, pb->pb_op->o_opid, extoid );
+ } else {
+ LDAPDebug( LDAP_DEBUG_ARGS, "do_extended: oid (%s-%s)\n",
+ extoid, name, 0 );
+
+ slapi_log_access( LDAP_DEBUG_STATS,
+ "conn=%" NSPRIu64 " op=%d EXT oid=\"%s\" name=\"%s\"\n",
+ pb->pb_conn->c_connid, pb->pb_op->o_opid, extoid, name );
+ }
+
+ /* during a bulk import, only BULK_IMPORT_DONE is allowed!
+ * (and this is the only time it's allowed)
+ */
+ if (pb->pb_conn->c_flags & CONN_FLAG_IMPORT) {
+ if (strcmp(extoid, EXTOP_BULK_IMPORT_DONE_OID) != 0) {
+ send_ldap_result(pb, LDAP_PROTOCOL_ERROR, NULL, NULL, 0, NULL);
+ goto free_and_return;
+ }
+ extop_handle_import_done(pb, extoid, &extval);
+ goto free_and_return;
+ }
+
+ if (strcmp(extoid, EXTOP_BULK_IMPORT_START_OID) == 0) {
+ extop_handle_import_start(pb, extoid, &extval);
+ goto free_and_return;
+ }
+
+ if (strcmp(extoid, START_TLS_OID) != 0) {
+ int minssf = config_get_minssf();
+
+ /* If anonymous access is disabled and we haven't
+ * authenticated yet, only allow startTLS. */
+ if ((config_get_anon_access_switch() != SLAPD_ANON_ACCESS_ON) && ((pb->pb_op->o_authtype == NULL) ||
+ (strcasecmp(pb->pb_op->o_authtype, SLAPD_AUTH_NONE) == 0))) {
+ send_ldap_result( pb, LDAP_INAPPROPRIATE_AUTH, NULL,
+ "Anonymous access is not allowed.", 0, NULL );
+ goto free_and_return;
+ }
+
+ /* If the minssf is not met, only allow startTLS. */
+ if ((pb->pb_conn->c_sasl_ssf < minssf) && (pb->pb_conn->c_ssl_ssf < minssf) &&
+ (pb->pb_conn->c_local_ssf < minssf)) {
+ send_ldap_result( pb, LDAP_UNWILLING_TO_PERFORM, NULL,
+ "Minimum SSF not met.", 0, NULL );
+ goto free_and_return;
+ }
+ }
+
+ /* If a password change is required, only allow the password
+ * modify extended operation */
+ if (!pb->pb_conn->c_isreplication_session &&
pb->pb_conn->c_needpw && (strcmp(extoid, EXTOP_PASSWD_OID) != 0))
- {
- char *dn = NULL;
- slapi_pblock_get(pb, SLAPI_CONN_DN, &dn);
+ {
+ char *dn = NULL;
+ slapi_pblock_get(pb, SLAPI_CONN_DN, &dn);
- (void)slapi_add_pwd_control ( pb, LDAP_CONTROL_PWEXPIRED, 0);
- op_shared_log_error_access (pb, "EXT", dn ? dn : "", "need new password");
- send_ldap_result( pb, LDAP_UNWILLING_TO_PERFORM, NULL, NULL, 0, NULL );
+ (void)slapi_add_pwd_control ( pb, LDAP_CONTROL_PWEXPIRED, 0);
+ op_shared_log_error_access (pb, "EXT", dn ? dn : "", "need new password");
+ send_ldap_result( pb, LDAP_UNWILLING_TO_PERFORM, NULL, NULL, 0, NULL );
- slapi_ch_free_string(&dn);
- goto free_and_return;
- }
+ slapi_ch_free_string(&dn);
+ goto free_and_return;
+ }
- /* decode the optional controls - put them in the pblock */
- if ( (lderr = get_ldapmessage_controls( pb, pb->pb_op->o_ber, NULL )) != 0 )
- {
- char *dn = NULL;
- slapi_pblock_get(pb, SLAPI_CONN_DN, &dn);
+ /* decode the optional controls - put them in the pblock */
+ if ( (lderr = get_ldapmessage_controls( pb, pb->pb_op->o_ber, NULL )) != 0 )
+ {
+ char *dn = NULL;
+ slapi_pblock_get(pb, SLAPI_CONN_DN, &dn);
- op_shared_log_error_access (pb, "EXT", dn ? dn : "", "failed to decode LDAP controls");
- send_ldap_result( pb, lderr, NULL, NULL, 0, NULL );
+ op_shared_log_error_access (pb, "EXT", dn ? dn : "", "failed to decode LDAP controls");
+ send_ldap_result( pb, lderr, NULL, NULL, 0, NULL );
- slapi_ch_free_string(&dn);
- goto free_and_return;
- }
+ slapi_ch_free_string(&dn);
+ goto free_and_return;
+ }
- slapi_pblock_set( pb, SLAPI_EXT_OP_REQ_OID, extoid );
- slapi_pblock_set( pb, SLAPI_EXT_OP_REQ_VALUE, &extval );
- slapi_pblock_set( pb, SLAPI_REQUESTOR_ISROOT, &pb->pb_op->o_isroot);
+ slapi_pblock_set( pb, SLAPI_EXT_OP_REQ_OID, extoid );
+ slapi_pblock_set( pb, SLAPI_EXT_OP_REQ_VALUE, &extval );
+ slapi_pblock_set( pb, SLAPI_REQUESTOR_ISROOT, &pb->pb_op->o_isroot);
/* wibrown 201603 I want to rewrite this to get plugin p, and use that
* rather than all these plugin_call_, that loop over the plugin lists
@@ -340,10 +340,10 @@ do_extended( Slapi_PBlock *pb )
* then we just hand *p into the call functions.
* much more efficient! :)
*/
-
+
slapi_log_error(SLAPI_LOG_TRACE, NULL, "extendop.c calling plugins ... \n");
- rc = plugin_call_exop_plugins( pb, extoid, SLAPI_PLUGIN_EXTENDEDOP);
+ rc = plugin_call_exop_plugins( pb, extoid, SLAPI_PLUGIN_EXTENDEDOP);
slapi_log_error(SLAPI_LOG_TRACE, NULL, "extendop.c called exop, got %d \n", rc);
@@ -391,37 +391,37 @@ do_extended( Slapi_PBlock *pb )
} /* if be */
}
- if ( SLAPI_PLUGIN_EXTENDED_SENT_RESULT != rc ) {
- if ( SLAPI_PLUGIN_EXTENDED_NOT_HANDLED == rc ) {
- lderr = LDAP_PROTOCOL_ERROR; /* no plugin handled the op */
- errmsg = "unsupported extended operation";
- } else {
- errmsg = NULL;
- lderr = rc;
- }
- send_ldap_result( pb, lderr, NULL, errmsg, 0, NULL );
- }
+ if ( SLAPI_PLUGIN_EXTENDED_SENT_RESULT != rc ) {
+ if ( SLAPI_PLUGIN_EXTENDED_NOT_HANDLED == rc ) {
+ lderr = LDAP_PROTOCOL_ERROR; /* no plugin handled the op */
+ errmsg = "unsupported extended operation";
+ } else {
+ errmsg = NULL;
+ lderr = rc;
+ }
+ send_ldap_result( pb, lderr, NULL, errmsg, 0, NULL );
+ }
free_and_return:
- if (extoid)
- slapi_ch_free((void **)&extoid);
- if (extval.bv_val)
- slapi_ch_free((void **)&extval.bv_val);
- return;
+ if (extoid)
+ slapi_ch_free((void **)&extoid);
+ if (extval.bv_val)
+ slapi_ch_free((void **)&extval.bv_val);
+ return;
}
static const char *
extended_op_oid2string( const char *oid )
{
- const char *rval = NULL;
-
- if ( 0 == strcmp(oid, EXTOP_BULK_IMPORT_START_OID)) {
- rval = "Bulk Import Start";
- } else if ( 0 == strcmp(oid, EXTOP_BULK_IMPORT_DONE_OID)) {
- rval = "Bulk Import End";
- } else {
- rval = plugin_extended_op_oid2string( oid );
- }
+ const char *rval = NULL;
+
+ if ( 0 == strcmp(oid, EXTOP_BULK_IMPORT_START_OID)) {
+ rval = "Bulk Import Start";
+ } else if ( 0 == strcmp(oid, EXTOP_BULK_IMPORT_DONE_OID)) {
+ rval = "Bulk Import End";
+ } else {
+ rval = plugin_extended_op_oid2string( oid );
+ }
- return( rval );
+ return( rval );
}
| 0 |
52930da0bb8abe94a56ff6dca5ea57347d3461a9
|
389ds/389-ds-base
|
Issue 50823 - dsctl doesn't work with 'slapd-' in the instance name
Bug Description:
DirSrv.list drops all occurrences of 'slapd-' within a serverid
rendering names containing it damaged.
Fix Description:
Remove only the first occurrence of 'slapd-' in the serverid, which is
the prefix that is expected to be removed.
Fixes https://pagure.io/389-ds-base/issue/50823
Author: Matus Honek <[email protected]>
Review by: Mark, William (thanks!)
|
commit 52930da0bb8abe94a56ff6dca5ea57347d3461a9
Author: Matus Honek <[email protected]>
Date: Wed Jan 29 14:06:04 2020 +0000
Issue 50823 - dsctl doesn't work with 'slapd-' in the instance name
Bug Description:
DirSrv.list drops all occurrences of 'slapd-' within a serverid
rendering names containing it damaged.
Fix Description:
Remove only the first occurrence of 'slapd-' in the serverid, which is
the prefix that is expected to be removed.
Fixes https://pagure.io/389-ds-base/issue/50823
Author: Matus Honek <[email protected]>
Review by: Mark, William (thanks!)
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 7ab8674c9..c77c5a5a0 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -711,7 +711,7 @@ class DirSrv(SimpleLDAPObject, object):
if serverid is None and hasattr(self, 'serverid'):
serverid = self.serverid
elif serverid is not None:
- serverid = serverid.replace('slapd-', '')
+ serverid = serverid.replace('slapd-', '', 1)
if self.serverid is None:
# Need to set the Paths in case it does exist
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
index 6ee71b5d0..77fc4bef6 100644
--- a/src/lib389/lib389/instance/setup.py
+++ b/src/lib389/lib389/instance/setup.py
@@ -219,7 +219,7 @@ class SetupDs(object):
insts = inst.list(serverid=serverid)
if len(insts) != 1:
- log.error("No such instance to remove {}".format(serverid))
+ self.log.error("No such instance to remove {}".format(serverid))
return
inst.allocate(insts[0])
remove_ds_instance(inst, force=True)
| 0 |
c64859d1362cef6db8ddca0cefa6085a7dcef74b
|
389ds/389-ds-base
|
Issue 6432 - Crash during bind when acct policy plugin does not have "alwaysrecordlogin" set
Description:
If alwaysrecordlogin is off then we dereference NULL ptr cfg->login_history_attr
when trying to write the history/time value. Instead we should skip
over this code if it is not set.
Relates: https://github.com/389ds/389-ds-base/issues/6432
Reviewed by: tbordaz(Thanks!)
|
commit c64859d1362cef6db8ddca0cefa6085a7dcef74b
Author: Mark Reynolds <[email protected]>
Date: Wed Dec 4 15:39:58 2024 -0500
Issue 6432 - Crash during bind when acct policy plugin does not have "alwaysrecordlogin" set
Description:
If alwaysrecordlogin is off then we dereference NULL ptr cfg->login_history_attr
when trying to write the history/time value. Instead we should skip
over this code if it is not set.
Relates: https://github.com/389ds/389-ds-base/issues/6432
Reviewed by: tbordaz(Thanks!)
diff --git a/dirsrvtests/tests/suites/plugins/accpol_check_all_state_attrs_test.py b/dirsrvtests/tests/suites/plugins/accpol_check_all_state_attrs_test.py
index b7ffd3d78..e1c53a830 100644
--- a/dirsrvtests/tests/suites/plugins/accpol_check_all_state_attrs_test.py
+++ b/dirsrvtests/tests/suites/plugins/accpol_check_all_state_attrs_test.py
@@ -21,10 +21,11 @@ from lib389._constants import (
PASSWORD,
PLUGIN_ACCT_POLICY,
)
-from lib389.idm.user import (UserAccount, UserAccounts)
+from lib389.idm.user import (UserAccount)
from lib389.plugins import (AccountPolicyPlugin, AccountPolicyConfig)
+from lib389.cos import (CosTemplate, CosPointerDefinition)
from lib389.idm.domain import Domain
-from datetime import datetime, timedelta
+
log = logging.getLogger(__name__)
@@ -36,6 +37,7 @@ NEW_PASSWORD = 'password123'
USER_SELF_MOD_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)'
ANON_ACI = "(targetattr=\"*\")(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare) userdn = \"ldap:///anyone\";)"
+
def test_inactivty_and_expiration(topo):
"""Test account expiration works when we are checking all state attributes
@@ -122,9 +124,79 @@ def test_inactivty_and_expiration(topo):
test_user.bind(NEW_PASSWORD)
+def test_alwaysrecordlogin_off(topo):
+ """Test the server does not crash when alwaysrecordlogin is "off"
+
+ :id: 49eb0993-ee59-48a9-8324-fb965b202ba9
+ :setup: Standalone Instance
+ :steps:
+ 1. Create test user
+ 2. Configure account policy, COS, and restart
+ 3. Bind as test user
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ """
+
+ LOCAL_POLICY = 'cn=Account Inactivation Policy,dc=example,dc=com'
+ TEMPL_COS = 'cn=TempltCoS,ou=people,dc=example,dc=com'
+ DEFIN_COS = 'cn=DefnCoS,ou=people,dc=example,dc=com'
+ TEST_USER_NAME = 'crash'
+ TEST_USER_DN = f'uid={TEST_USER_NAME},ou=people,{DEFAULT_SUFFIX}'
+
+ inst = topo.standalone
+
+ # Create the test user
+ test_user = UserAccount(inst, TEST_USER_DN)
+ test_user.create(properties={
+ 'uid': TEST_USER_NAME,
+ 'cn': TEST_USER_NAME,
+ 'sn': TEST_USER_NAME,
+ 'userPassword': PASSWORD,
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/crash',
+ })
+
+ # Configure account policy plugin
+ plugin = AccountPolicyPlugin(inst)
+ plugin.enable()
+ plugin.set('nsslapd-pluginarg0', ACCP_CONF)
+ accp = AccountPolicyConfig(inst, dn=ACCP_CONF)
+ accp.set('alwaysrecordlogin', 'no')
+ accp.set('stateattrname', 'lastLoginTime')
+ accp.set('altstateattrname', 'passwordexpirationtime')
+ accp.set('specattrname', 'acctPolicySubentry')
+ accp.set('limitattrname', 'accountInactivityLimit')
+ accp.set('accountInactivityLimit', '123456')
+ accp.set('checkAllStateAttrs', 'on')
+ inst.restart()
+ # Local policy
+ laccp = AccountPolicyConfig(inst, dn=LOCAL_POLICY)
+ laccp.create(properties={
+ 'cn': 'Account Inactivation Policy',
+ 'accountInactivityLimit': '12312321'
+ })
+ # COS
+ cos_template = CosTemplate(inst, dn=TEMPL_COS)
+ cos_template.create(properties={'cn': 'TempltCoS',
+ 'acctPolicySubentry': LOCAL_POLICY})
+ cos_def = CosPointerDefinition(inst, dn=DEFIN_COS)
+ cos_def.create(properties={
+ 'cn': 'DefnCoS',
+ 'cosTemplateDn': TEMPL_COS,
+ 'cosAttribute': 'acctPolicySubentry default operational-default'})
+ inst.restart()
+
+ # Bind as test user to make sure the server does not crash
+ conn = test_user.bind(PASSWORD)
+ test_user = UserAccount(conn, TEST_USER_DN)
+ test_user.bind(PASSWORD)
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main(["-s", CURRENT_FILE])
-
diff --git a/ldap/servers/plugins/acctpolicy/acct_plugin.c b/ldap/servers/plugins/acctpolicy/acct_plugin.c
index ba9705f74..220d0f6b2 100644
--- a/ldap/servers/plugins/acctpolicy/acct_plugin.c
+++ b/ldap/servers/plugins/acctpolicy/acct_plugin.c
@@ -372,7 +372,9 @@ acct_record_login(const char *dn)
"acct_record_login - Recorded %s=%s on \"%s\"\n", cfg->always_record_login_attr, timestr, dn);
/* update login history */
- acct_update_login_history(dn, timestr);
+ if (cfg->login_history_attr) {
+ acct_update_login_history(dn, timestr);
+ }
}
done:
| 0 |
2af101b69ece052498158fd50804d843d75ef1a0
|
389ds/389-ds-base
|
Bug 697027 - 6 - minor memory leaks found by Valgrind + TET
https://bugzilla.redhat.com/show_bug.cgi?id=697027
[Case 6]
Description: Moving "csn_free(&attributedeletioncsn)" to the
free_and_return section to fix the leak.
|
commit 2af101b69ece052498158fd50804d843d75ef1a0
Author: Noriko Hosoi <[email protected]>
Date: Fri Apr 15 11:41:41 2011 -0700
Bug 697027 - 6 - minor memory leaks found by Valgrind + TET
https://bugzilla.redhat.com/show_bug.cgi?id=697027
[Case 6]
Description: Moving "csn_free(&attributedeletioncsn)" to the
free_and_return section to fix the leak.
diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c
index a4a67a6fd..7a015ff31 100644
--- a/ldap/servers/slapd/entry.c
+++ b/ldap/servers/slapd/entry.c
@@ -735,6 +735,7 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
int check_for_duplicate_values =
( 0 != ( flags & SLAPI_STR2ENTRY_REMOVEDUPVALS ));
Slapi_Value *value = 0;
+ CSN *attributedeletioncsn= NULL;
CSN *maxcsn= NULL;
char *normdn = NULL;
int strict = 0;
@@ -755,13 +756,14 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
}
while ( (s = ldif_getline( &next )) != NULL )
{
- CSN *attributedeletioncsn= NULL;
CSNSet *valuecsnset= NULL;
int value_state= VALUE_NOTFOUND;
int attr_state= VALUE_NOTFOUND;
int freeval = 0;
struct berval bv_null = {0, NULL};
+ csn_free(&attributedeletioncsn);
+
if ( *s == '\n' || *s == '\0' ) {
break;
}
@@ -779,7 +781,6 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
/*
* Extract the attribute and value CSNs from the attribute type.
*/
- csn_free(&attributedeletioncsn);
csnset_free(&valuecsnset);
value_state= VALUE_NOTFOUND;
attr_state= VALUE_NOTFOUND;
@@ -792,7 +793,6 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
/* ignore deleted values and attributes */
/* the memory below was not allocated by the slapi_ch_ functions */
if (freeval) slapi_ch_free_string(&bvvalue.bv_val);
- csn_free(&attributedeletioncsn);
continue;
}
/* Ignore CSNs */
@@ -848,7 +848,6 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
escape_string( valuecharptr, ebuf2 ), 0 );
/* the memory below was not allocated by the slapi_ch_ functions */
if (freeval) slapi_ch_free_string(&bvvalue.bv_val);
- csn_free(&attributedeletioncsn);
continue;
}
normdn = slapi_create_dn_string("%s", valuecharptr);
@@ -863,7 +862,6 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
slapi_entry_set_dn(e, normdn);
/* the memory below was not allocated by the slapi_ch_ functions */
if (freeval) slapi_ch_free_string(&bvvalue.bv_val);
- csn_free(&attributedeletioncsn);
continue;
}
@@ -873,7 +871,6 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
}
/* the memory below was not allocated by the slapi_ch_ functions */
if (freeval) slapi_ch_free_string(&bvvalue.bv_val);
- csn_free(&attributedeletioncsn);
continue;
}
@@ -881,7 +878,6 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
if ( (flags & SLAPI_STR2ENTRY_NO_ENTRYDN) &&
strcasecmp( type, "entrydn" ) == 0 ) {
if (freeval) slapi_ch_free_string(&bvvalue.bv_val);
- csn_free(&attributedeletioncsn);
continue;
}
@@ -898,7 +894,6 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
}
/* the memory below was not allocated by the slapi_ch_ functions */
if (freeval) slapi_ch_free_string(&bvvalue.bv_val);
- csn_free(&attributedeletioncsn);
continue;
}
@@ -1028,7 +1023,6 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
type, valuecharptr);
slapi_entry_free( e ); e = NULL;
if (freeval) slapi_ch_free_string(&bvvalue.bv_val);
- csn_free(&attributedeletioncsn);
goto free_and_return;
}
}
@@ -1143,7 +1137,6 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
/* Failure adding to value tree */
LDAPDebug( LDAP_DEBUG_ANY, "str2entry_dupcheck: unexpected failure %d constructing value tree\n", rc, 0, 0 );
slapi_entry_free( e ); e = NULL;
- csn_free(&attributedeletioncsn);
goto free_and_return;
}
@@ -1232,6 +1225,7 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
if(sa->sa_attributedeletioncsn!=NULL)
{
attr_set_deletion_csn(*a,sa->sa_attributedeletioncsn);
+ csn_free(&sa->sa_attributedeletioncsn);
}
}
}
@@ -1284,6 +1278,7 @@ free_and_return:
}
slapi_ch_free((void **) &dyn_attrs );
if (value) slapi_value_free(&value);
+ csn_free(&attributedeletioncsn);
csn_free(&maxcsn);
LDAPDebug( LDAP_DEBUG_TRACE, "<= str2entry_dupcheck 0x%x \"%s\"\n",
| 0 |
c38c2a860937cda0cdbe8f05acaa8083feb8caf1
|
389ds/389-ds-base
|
610281 - fix coverity Defect Type: Control flow issues
https://bugzilla.redhat.com/show_bug.cgi?id=610281
11813 DEADCODE Triaged Unassigned Bug Minor Fix Required
send_dirsync_search() ds/ldap/servers/plugins/replication/windows_connection.c
Comment:
op_string points to a static string:
731 op_string = "search";
We don't need to check op_string in slapi_log_error.
|
commit c38c2a860937cda0cdbe8f05acaa8083feb8caf1
Author: Noriko Hosoi <[email protected]>
Date: Fri Jul 2 16:35:16 2010 -0700
610281 - fix coverity Defect Type: Control flow issues
https://bugzilla.redhat.com/show_bug.cgi?id=610281
11813 DEADCODE Triaged Unassigned Bug Minor Fix Required
send_dirsync_search() ds/ldap/servers/plugins/replication/windows_connection.c
Comment:
op_string points to a static string:
731 op_string = "search";
We don't need to check op_string in slapi_log_error.
diff --git a/ldap/servers/plugins/replication/windows_connection.c b/ldap/servers/plugins/replication/windows_connection.c
index 2e24bfed9..7adf3be33 100644
--- a/ldap/servers/plugins/replication/windows_connection.c
+++ b/ldap/servers/plugins/replication/windows_connection.c
@@ -758,7 +758,7 @@ send_dirsync_search(Repl_Connection *conn)
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
"%s: Failed to get %s operation: LDAP error %d (%s)\n",
agmt_get_long_name(conn->agmt),
- op_string ? op_string : "NULL", rc, ldap_err2string(rc));
+ op_string, rc, ldap_err2string(rc));
conn->last_ldap_error = rc;
if (IS_DISCONNECT_ERROR(rc))
{
| 0 |
708df4b77abaf8260f717f3d8e3a0bd6b47bd36c
|
389ds/389-ds-base
|
fix compiler warnings
|
commit 708df4b77abaf8260f717f3d8e3a0bd6b47bd36c
Author: Ludwig Krispenz <[email protected]>
Date: Thu Jul 18 15:32:05 2013 +0200
fix compiler warnings
diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c
index ee7c18f28..4006d539f 100644
--- a/ldap/servers/slapd/entry.c
+++ b/ldap/servers/slapd/entry.c
@@ -746,12 +746,11 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
char *type;
struct berval bvtype;
str2entry_attr *sa;
- int i, j;
+ int i;
char *next=NULL;
char *valuecharptr=NULL;
struct berval bvvalue;
int rc;
- int fast_dup_check = 0;
entry_attrs *ea = NULL;
int tree_attr_checking = 0;
int big_entry_attr_presence_check = 0;
@@ -991,7 +990,6 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
if ( prev_attr==NULL )
{
/* Haven't seen this type yet */
- fast_dup_check = 1;
if ( nattrs == maxattrs )
{
/* Out of space - reallocate */
@@ -1022,15 +1020,6 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
{
/* Get the comparison function for later use */
attr_get_value_cmp_fn( &attrs[nattrs].sa_attr, &(attrs[nattrs].sa_comparefn));
- /*
- * If the compare function wasn't available,
- * we have to revert to AVL-tree-based dup checking,
- * which uses index keys for comparisons
- */
- if (NULL == attrs[nattrs].sa_comparefn)
- {
- fast_dup_check = 0;
- }
/*
* If we are maintaining the attribute tree,
* then add the new attribute to the tree.
@@ -1043,16 +1032,6 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
prev_attr = &attrs[nattrs];
nattrs++;
} else { /* prev_attr != NULL */
- if ( check_for_duplicate_values ) {
- /*
- * If the compare function wasn't available,
- * we have to revert to AVL-tree-based dup checking,
- * which uses index keys for comparisons
- */
- if (NULL == prev_attr->sa_comparefn) {
- fast_dup_check = 0;
- }
- }
}
sa = prev_attr; /* For readability */
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index d171f994c..d548baf4e 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -4752,7 +4752,7 @@ void slapi_valueset_add_value(Slapi_ValueSet *vs, const Slapi_Value *addval);
* \see slapi_valueset_first_value()
* \see slapi_valueset_next_value()
*/
-void slapi_valueset_add_value_ext(Slapi_ValueSet *vs, Slapi_Value *addval, unsigned long flags);
+void slapi_valueset_add_value_ext(Slapi_ValueSet *vs, const Slapi_Value *addval, unsigned long flags);
int slapi_valueset_add_attr_value_ext(const Slapi_Attr *a, Slapi_ValueSet *vs, Slapi_Value *addval, unsigned long flags);
/**
diff --git a/ldap/servers/slapd/valueset.c b/ldap/servers/slapd/valueset.c
index 76d03e17d..416a838f8 100644
--- a/ldap/servers/slapd/valueset.c
+++ b/ldap/servers/slapd/valueset.c
@@ -870,7 +870,7 @@ slapi_valueset_add_value(Slapi_ValueSet *vs, const Slapi_Value *addval)
}
void
-slapi_valueset_add_value_ext(Slapi_ValueSet *vs, Slapi_Value *addval, unsigned long flags)
+slapi_valueset_add_value_ext(Slapi_ValueSet *vs, const Slapi_Value *addval, unsigned long flags)
{
Slapi_Value *oneval[2];
oneval[0]= (Slapi_Value*)addval;
@@ -928,13 +928,13 @@ static int
valueset_value_cmp( const Slapi_Attr *a, const Slapi_Value *v1, const Slapi_Value *v2 )
{
- if ( a == NULL || slapi_attr_is_dn_syntax_attr(a)) {
+ if ( a == NULL || slapi_attr_is_dn_syntax_attr((Slapi_Attr *)a)) {
/* if no attr is provided just do a utf8compare */
/* for all the values the first step of normalization is done,
* case folding still needs to be done
*/
/* would this be enough ?: return (strcasecmp(v1->bv.bv_val, v2->bv.bv_val)); */
- return (slapi_utf8casecmp(v1->bv.bv_val, v2->bv.bv_val));
+ return (slapi_utf8casecmp((unsigned char*)v1->bv.bv_val, (unsigned char*)v2->bv.bv_val));
} else {
/* slapi_value_compare doesn't work, it only returns 0 or -1
return (slapi_value_compare(a, v1, v2));
| 0 |
407bdaa00d1da9f5ff53d66a2e012b17ad658907
|
389ds/389-ds-base
|
Issue 5772 - ONE LEVEL search fails to return sub-suffixes (#6219)
Problem: ONE LEVEL scoped search fails to return sub-suffixes entries
Reason: When such search is done, a one level search is done on the main suffix and base search are done on any matching sub-suffix. But main suffix is processed search (to ensure that parent entries are returned before children ones when searching subtree) and ldbm_back_search change the filter to (&(parentid=xxx)old_filter) so the filter test reject the entry on the sub-suffixes.
Solution: Revert the backend list when doing one level search so that the sub-suffixes are processed first
and restore the base dn for the main suffix.
Alternative rejected: reset the filter when discivering a sub-suffix. Not so easy because filter is altered by the rewriteres.
And systematic duplication is an useless overhead if there is no matching sub-suffixes (which is the usual case)
Issue: #5772
Reviewed by: @tbordaz, @droideck (Thanks!)
|
commit 407bdaa00d1da9f5ff53d66a2e012b17ad658907
Author: progier389 <[email protected]>
Date: Thu Jun 13 15:17:36 2024 +0200
Issue 5772 - ONE LEVEL search fails to return sub-suffixes (#6219)
Problem: ONE LEVEL scoped search fails to return sub-suffixes entries
Reason: When such search is done, a one level search is done on the main suffix and base search are done on any matching sub-suffix. But main suffix is processed search (to ensure that parent entries are returned before children ones when searching subtree) and ldbm_back_search change the filter to (&(parentid=xxx)old_filter) so the filter test reject the entry on the sub-suffixes.
Solution: Revert the backend list when doing one level search so that the sub-suffixes are processed first
and restore the base dn for the main suffix.
Alternative rejected: reset the filter when discivering a sub-suffix. Not so easy because filter is altered by the rewriteres.
And systematic duplication is an useless overhead if there is no matching sub-suffixes (which is the usual case)
Issue: #5772
Reviewed by: @tbordaz, @droideck (Thanks!)
diff --git a/dirsrvtests/tests/suites/mapping_tree/regression_test.py b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
index 5ccf8ff87..6d157aefa 100644
--- a/dirsrvtests/tests/suites/mapping_tree/regression_test.py
+++ b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
@@ -91,7 +91,6 @@ EXPECTED_ENTRIES = (("dc=parent", 39), ("dc=child1,dc=parent", 13), ("dc=child2,
@pytest.mark.skipif(not has_orphan_attribute, reason = "compatibility attribute not yet implemented in this version")
def test_sub_suffixes(topo, orphan_param):
""" check the entries found on suffix/sub-suffix
- used int
:id: 5b4421c2-d851-11ec-a760-482ae39447e5
:feature: mapping-tree
@@ -121,8 +120,41 @@ def test_sub_suffixes(topo, orphan_param):
log.info(f'Verifying domain component entries count for search under {suffix} ...')
entries = topo.standalone.search_s(suffix, ldap.SCOPE_SUBTREE, "(dc=*)")
assert len(entries) == expected
- log.info('Found {expected} domain component entries as expected while searching {suffix}')
+ log.info(f'Found {expected} domain component entries as expected while searching {suffix}')
log.info('Test PASSED')
+def test_one_level_search_on_sub_suffixes(topo):
+ """ Perform one level scoped search accross suffix and sub-suffix
+
+ :id: 92f3139e-280e-11ef-a989-482ae39447e5
+ :feature: mapping-tree
+ :setup: Standalone instance with 3 additional backends:
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
+ :steps:
+ 1. Perform a ONE LEVEL search on dc=parent
+ 2. Check that all expected entries have been returned
+ 3. Check that only the expected entries have been returned
+ :expectedresults:
+ 1. Success
+ 2. each expected dn should be in the result set
+ 3. Number of returned entries should be the same as the number of expected entries
+ """
+ expected_dns = ( 'dc=child1,dc=parent',
+ 'dc=child2,dc=parent',
+ 'ou=accounting,dc=parent',
+ 'ou=product development,dc=parent',
+ 'ou=product testing,dc=parent',
+ 'ou=human resources,dc=parent',
+ 'ou=payroll,dc=parent',
+ 'ou=people,dc=parent',
+ 'ou=groups,dc=parent', )
+ entries = topo.standalone.search_s("dc=parent", ldap.SCOPE_ONELEVEL, "(objectClass=*)",
+ attrlist=("dc","ou"), escapehatch='i am sure')
+ log.info(f'one level search on dc=parent returned the following entries: {entries}')
+ dns = [ entry.dn for entry in entries ]
+ for dn in expected_dns:
+ assert dn in dns
+ assert len(entries) == len(expected_dns)
+
diff --git a/ldap/servers/slapd/filterentry.c b/ldap/servers/slapd/filterentry.c
index 4de4aa66e..d2c7e3082 100644
--- a/ldap/servers/slapd/filterentry.c
+++ b/ldap/servers/slapd/filterentry.c
@@ -240,6 +240,36 @@ slapi_filter_test_ext(
}
+static const char *
+filter_type_as_string(int filter_type)
+{
+ switch (filter_type) {
+ case LDAP_FILTER_AND:
+ return "&";
+ case LDAP_FILTER_OR:
+ return "|";
+ case LDAP_FILTER_NOT:
+ return "!";
+ case LDAP_FILTER_EQUALITY:
+ return "=";
+ case LDAP_FILTER_SUBSTRINGS:
+ return "*";
+ case LDAP_FILTER_GE:
+ return ">=";
+ case LDAP_FILTER_LE:
+ return "<=";
+ case LDAP_FILTER_PRESENT:
+ return "=*";
+ case LDAP_FILTER_APPROX:
+ return "~";
+ case LDAP_FILTER_EXT:
+ return "EXT";
+ default:
+ return "?";
+ }
+}
+
+
int
test_ava_filter(
Slapi_PBlock *pb,
@@ -253,7 +283,13 @@ test_ava_filter(
{
int rc;
- slapi_log_err(SLAPI_LOG_FILTER, "test_ava_filter", "=>\n");
+ if (slapi_is_loglevel_set(SLAPI_LOG_FILTER)) {
+ char *val = slapi_berval_get_string_copy(&ava->ava_value);
+ char buf[BUFSIZ];
+ slapi_log_err(SLAPI_LOG_FILTER, "test_ava_filter", "=> AVA: %s%s%s\n",
+ ava->ava_type, filter_type_as_string(ftype), escape_string(val, buf));
+ slapi_ch_free_string(&val);
+ }
*access_check_done = 0;
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index f77043afa..540597f45 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -219,6 +219,7 @@ cache_return_target_entry(Slapi_PBlock *pb, Slapi_Backend *be, Slapi_Operation *
operation_set_target_entry_id(operation, 0);
}
}
+
/*
* Returns: 0 - if the operation is successful
* < 0 - if operation fails.
@@ -481,6 +482,20 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
while (be_list[index] && be_list[index + 1]) {
index++;
}
+ if (scope == LDAP_SCOPE_ONELEVEL) {
+ /*
+ * ONE LEVEL searches may ends up on multiple backends
+ * with a ONE LEVEL search on a suffix and a BASE search on its
+ * subsuffixes. Because LDAP_SCOPE_ONELEVEL rewrite the filter
+ * the backends should be reversed so that the BASE search(es)
+ * are done first (with the original filter).
+ */
+ for (int idx = 0; idx <= index/2; idx++) {
+ be = be_list[index-idx];
+ be_list[index-idx] = be_list[idx];
+ be_list[idx] = be;
+ }
+ }
be = be_list[index];
} else {
be = NULL;
@@ -779,7 +794,6 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
(slapi_sdn_get_ndn_len(basesdn) == 0)) {
int tmp_scope = LDAP_SCOPE_BASE;
slapi_pblock_set(pb, SLAPI_SEARCH_SCOPE, &tmp_scope);
-
if (free_sdn) {
slapi_pblock_get(pb, SLAPI_SEARCH_TARGET_SDN, &sdn);
slapi_sdn_free(&sdn);
@@ -790,6 +804,12 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
} else if (slapi_sdn_issuffix(basesdn, be_suffix)) {
int tmp_scope = LDAP_SCOPE_ONELEVEL;
slapi_pblock_set(pb, SLAPI_SEARCH_SCOPE, &tmp_scope);
+ if (free_sdn) {
+ slapi_pblock_get(pb, SLAPI_SEARCH_TARGET_SDN, &sdn);
+ slapi_sdn_free(&sdn);
+ sdn = slapi_sdn_dup(basesdn);
+ slapi_pblock_set(pb, SLAPI_SEARCH_TARGET_SDN, (void *)sdn);
+ }
} else {
slapi_sdn_done(&monitorsdn);
goto next_be;
| 0 |
5e285f630c74ad24a1b1e902621fe8a2b54be544
|
389ds/389-ds-base
|
Issue 50378 - ACI's with IPv4 and IPv6 bind rules do not work for IPv6 clients
Description:
Add a new test case for #50378 instead of the older one that was testing
an unsupported corner case (ip=*).
Relates: https://pagure.io/389-ds-base/issue/50378
Reviewed by: mreynolds (Thanks!)
|
commit 5e285f630c74ad24a1b1e902621fe8a2b54be544
Author: Viktor Ashirov <[email protected]>
Date: Fri Jun 21 16:41:34 2019 +0200
Issue 50378 - ACI's with IPv4 and IPv6 bind rules do not work for IPv6 clients
Description:
Add a new test case for #50378 instead of the older one that was testing
an unsupported corner case (ip=*).
Relates: https://pagure.io/389-ds-base/issue/50378
Reviewed by: mreynolds (Thanks!)
diff --git a/dirsrvtests/tests/suites/acl/keywords_test.py b/dirsrvtests/tests/suites/acl/keywords_test.py
index c8c19127b..6a494a4b6 100644
--- a/dirsrvtests/tests/suites/acl/keywords_test.py
+++ b/dirsrvtests/tests/suites/acl/keywords_test.py
@@ -430,30 +430,33 @@ def test_dnsalias_keyword_test_nodns_cannot(topo, add_user, aci_of_user):
with pytest.raises(ldap.INSUFFICIENT_ACCESS):
org.replace("seeAlso", "cn=1")
-
-def test_user_can_access_the_data_when_connecting_from_any_machine_2(topo, add_user, aci_of_user):
[email protected]
[email protected]
[email protected]("ip_addr", ['127.0.0.1', "[::1]"])
+def test_user_can_access_from_ipv4_or_ipv6_address(topo, add_user, aci_of_user, ip_addr):
"""
- User can access the data when connecting from any machine as per the ACI.
+ User can modify the data when accessing the server from the allowed IPv4 and IPv6 addresses
:id:461e761e-7ac5-11e8-9ae4-8c16451d917b
:setup: Standalone Server
:steps:
- 1. Add test entry
- 2. Add ACI
- 3. User should follow ACI role
+ 1. Add ACI that has both IPv4 and IPv6
+ 2. Connect from one of the IPs allowed in ACI
+ 3. Modify an attribute
:expectedresults:
- 1. Entry should be added
- 2. Operation should succeed
- 3. Operation should succeed
+ 1. ACI should be added
+ 2. Conection should be successful
+ 3. Operation should be successful
"""
- # Add ACI
+ # Add ACI that contains both IPv4 and IPv6
Domain(topo.standalone, DEFAULT_SUFFIX).\
- add("aci", f'(target ="ldap:///{IP_OU_KEY}")(targetattr=*)'
+ add("aci", f'(target ="ldap:///{IP_OU_KEY}")(targetattr=*) '
f'(version 3.0; aci "IP aci"; allow(all) '
- f'userdn = "ldap:///{FULLIP_KEY}" and ip = "*" ;)')
+ f'userdn = "ldap:///{FULLIP_KEY}" and (ip = "127.0.0.1" or ip = "::1");)')
# Create a new connection for this test.
- conn = UserAccount(topo.standalone, FULLIP_KEY).bind(PW_DM)
+ conn = UserAccount(topo.standalone, FULLIP_KEY).bind(PW_DM, uri=f'ldap://{ip_addr}:{topo.standalone.port}')
+
# Perform Operation
OrganizationalUnit(conn, IP_OU_KEY).replace("seeAlso", "cn=1")
| 0 |
299169ecbd2a7d4ef805465234112ce6ecaa7e77
|
389ds/389-ds-base
|
Ticket 49024 - Fix CI test failures and defaults.inf
Description: Fix error_log path in the defaults.inf file.
Ticket 1347760 - set access_log path properly.
pwdPolicy_warning_test - replace a month with 30 day,
because it will cause large test time execution in the end of
a month, when next month has less days then the current.
https://fedorahosted.org/389/ticket/49024
Reviewed by: nhosoi, wbrown (Thanks!)
|
commit 299169ecbd2a7d4ef805465234112ce6ecaa7e77
Author: Simon Pichugin <[email protected]>
Date: Tue Nov 1 09:38:19 2016 +0100
Ticket 49024 - Fix CI test failures and defaults.inf
Description: Fix error_log path in the defaults.inf file.
Ticket 1347760 - set access_log path properly.
pwdPolicy_warning_test - replace a month with 30 day,
because it will cause large test time execution in the end of
a month, when next month has less days then the current.
https://fedorahosted.org/389/ticket/49024
Reviewed by: nhosoi, wbrown (Thanks!)
diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py
index 4647e243c..f3e57f412 100644
--- a/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py
+++ b/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py
@@ -442,7 +442,7 @@ def test_with_different_password_states(topology, global_policy, add_user):
try:
log.info("Expiring user's password by moving the"\
" system date past the valid period")
- subprocess.check_call(['/usr/bin/date', '-s', 'next month'])
+ subprocess.check_call(['/usr/bin/date', '-s', '+30 day'])
log.info('Wait for the server to pick up new date')
time.sleep(5)
@@ -455,7 +455,7 @@ def test_with_different_password_states(topology, global_policy, add_user):
log.info("Bind Failed, error: {:s}".format(str(ex)))
log.info("Resetting the system date")
- subprocess.check_call(['/usr/bin/date', '-s', 'last month'])
+ subprocess.check_call(['/usr/bin/date', '-s', '-30 day'])
log.info('Wait for the server to pick up new date')
time.sleep(5)
diff --git a/dirsrvtests/tests/tickets/ticket1347760_test.py b/dirsrvtests/tests/tickets/ticket1347760_test.py
index a142ada0f..48643cc2d 100644
--- a/dirsrvtests/tests/tickets/ticket1347760_test.py
+++ b/dirsrvtests/tests/tickets/ticket1347760_test.py
@@ -13,6 +13,7 @@ import logging
import pytest
from subprocess import Popen
from lib389 import DirSrv, Entry
+from lib389.paths import Paths
from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
@@ -38,11 +39,6 @@ BOGUSSUFFIX = 'uid=bogus,ou=people,dc=bogus'
GROUPOU = 'ou=groups,%s' % DEFAULT_SUFFIX
BOGUSOU = 'ou=OU,%s' % DEFAULT_SUFFIX
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
class TopologyStandalone(object):
def __init__(self, standalone):
@@ -224,6 +220,11 @@ def test_ticket1347760(topology):
log.info('Deleting aci in %s.' % DEFAULT_SUFFIX)
topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', None)])
+ log.info('While binding as DM, acquire an access log path')
+ ds_paths = Paths(serverid=topology.standalone.serverid,
+ instance=topology.standalone)
+ file_path = ds_paths.access_log
+
log.info('Bind case 1. the bind user has no rights to read the entry itself, bind should be successful.')
log.info('Bind as {%s,%s} who has no access rights.' % (BINDDN, BINDPW))
try:
@@ -232,7 +233,6 @@ def test_ticket1347760(topology):
log.info('Desc ' + e.message['desc'])
assert False
- file_path = os.path.join(topology.standalone.prefix, 'var/log/dirsrv/slapd-%s/access' % topology.standalone.serverid)
file_obj = open(file_path, "r")
log.info('Access log path: %s' % file_path)
diff --git a/ldap/admin/src/defaults.inf.in b/ldap/admin/src/defaults.inf.in
index c0469b349..7729c06d7 100644
--- a/ldap/admin/src/defaults.inf.in
+++ b/ldap/admin/src/defaults.inf.in
@@ -48,7 +48,7 @@ lock_dir = @localstatedir@/lock/dirsrv/slapd-{instance_name}
log_dir = @localstatedir@/log/dirsrv/slapd-{instance_name}
access_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/access
audit_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/audit
-error_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/error
+error_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/errors
inst_dir = @localstatedir@/lib/dirsrv/slapd-{instance_name}
db_dir = @localstatedir@/lib/dirsrv/slapd-{instance_name}/db
backup_dir = @localstatedir@/lib/dirsrv/slapd-{instance_name}/bak
| 0 |
04b61372fa58bdec73302006372dc6190fb82b73
|
389ds/389-ds-base
|
Ticket 47398 - memberOf on a user is converted to lowercase
Bug Description:
In order to compare the groups that an entry is memberof, all
entry DN is normalized. When finally we decide to add (in a target entry)
the group DN as a memberof value, we store the lowercase normalized value.
Fix Description:
We keep the processus of comparison with normalized value but at the
time we do the actual MOD on the target entry, we use the group entry
DN (normalized but not lowercase) rather that the strict normalized one.
Changes in memberof_get_groups_callback are for ADD,DEL,MOD ops
Change in memberof_modop_one_replace_r is for MODRDN of leaf entry
Change in memberof_replace_dn_from_groups is for MODRDN on group entry
When retrieving the groups that an entry is memberof, the lowercase normalized DNs
of the groups are kept in the callback_data (group_norm_vals).
Because to know if a new group is in the groups already evaluated it needs
lowercase normalized value.
https://fedorahosted.org/389/ticket/47398
Reviewed by: Noriko Hosoi (Thanks Noriko for the review and the comments. I change slightly
the fix description to take your comment into account).
Platforms tested: F17 acceptance and unit tests.
Verify that the number of call of slapi_dn_normalization_ext is identical
with/without the fix. (does not break https://fedorahosted.org/389/ticket/412)
Flag Day: no
Doc impact: no
|
commit 04b61372fa58bdec73302006372dc6190fb82b73
Author: Thierry bordaz (tbordaz) <[email protected]>
Date: Tue Oct 8 10:29:42 2013 +0200
Ticket 47398 - memberOf on a user is converted to lowercase
Bug Description:
In order to compare the groups that an entry is memberof, all
entry DN is normalized. When finally we decide to add (in a target entry)
the group DN as a memberof value, we store the lowercase normalized value.
Fix Description:
We keep the processus of comparison with normalized value but at the
time we do the actual MOD on the target entry, we use the group entry
DN (normalized but not lowercase) rather that the strict normalized one.
Changes in memberof_get_groups_callback are for ADD,DEL,MOD ops
Change in memberof_modop_one_replace_r is for MODRDN of leaf entry
Change in memberof_replace_dn_from_groups is for MODRDN on group entry
When retrieving the groups that an entry is memberof, the lowercase normalized DNs
of the groups are kept in the callback_data (group_norm_vals).
Because to know if a new group is in the groups already evaluated it needs
lowercase normalized value.
https://fedorahosted.org/389/ticket/47398
Reviewed by: Noriko Hosoi (Thanks Noriko for the review and the comments. I change slightly
the fix description to take your comment into account).
Platforms tested: F17 acceptance and unit tests.
Verify that the number of call of slapi_dn_normalization_ext is identical
with/without the fix. (does not break https://fedorahosted.org/389/ticket/412)
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
index 5aa22fd3d..443b9ae6e 100644
--- a/ldap/servers/plugins/memberof/memberof.c
+++ b/ldap/servers/plugins/memberof/memberof.c
@@ -91,6 +91,7 @@ typedef struct _memberof_get_groups_data
MemberOfConfig *config;
Slapi_Value *memberdn_val;
Slapi_ValueSet **groupvals;
+ Slapi_ValueSet **group_norm_vals;
} memberof_get_groups_data;
/*** function prototypes ***/
@@ -726,8 +727,8 @@ memberof_replace_dn_from_groups(Slapi_PBlock *pb, MemberOfConfig *config,
* using the same grouping attribute. */
for (i = 0; config->groupattrs && config->groupattrs[i]; i++)
{
- replace_dn_data data = {(char *)slapi_sdn_get_ndn(pre_sdn),
- (char *)slapi_sdn_get_ndn(post_sdn),
+ replace_dn_data data = {(char *)slapi_sdn_get_dn(pre_sdn),
+ (char *)slapi_sdn_get_dn(post_sdn),
config->groupattrs[i]};
groupattrs[0] = config->groupattrs[i];
@@ -1361,7 +1362,7 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config,
if(LDAP_MOD_REPLACE == mod_op)
{
- replace_val[0] = (char *)slapi_sdn_get_ndn(replace_with_sdn);
+ replace_val[0] = (char *)slapi_sdn_get_dn(replace_with_sdn);
replace_val[1] = 0;
replace_mod.mod_op = LDAP_MOD_ADD;
@@ -1688,15 +1689,17 @@ Slapi_ValueSet *
memberof_get_groups(MemberOfConfig *config, Slapi_DN *member_sdn)
{
Slapi_ValueSet *groupvals = slapi_valueset_new();
+ Slapi_ValueSet *group_norm_vals = slapi_valueset_new();
Slapi_Value *memberdn_val =
slapi_value_new_string(slapi_sdn_get_ndn(member_sdn));
slapi_value_set_flags(memberdn_val, SLAPI_ATTR_FLAG_NORMALIZED_CIS);
- memberof_get_groups_data data = {config, memberdn_val, &groupvals};
+ memberof_get_groups_data data = {config, memberdn_val, &groupvals, &group_norm_vals};
memberof_get_groups_r(config, member_sdn, &data);
slapi_value_free(&memberdn_val);
+ slapi_valueset_free(group_norm_vals);
return groupvals;
}
@@ -1718,9 +1721,12 @@ memberof_get_groups_r(MemberOfConfig *config, Slapi_DN *member_sdn,
int memberof_get_groups_callback(Slapi_Entry *e, void *callback_data)
{
Slapi_DN *group_sdn = slapi_entry_get_sdn(e);
- char *group_dn = slapi_entry_get_ndn(e);
- Slapi_Value *group_dn_val = 0;
+ char *group_ndn = slapi_entry_get_ndn(e);
+ char *group_dn = slapi_entry_get_dn(e);
+ Slapi_Value *group_ndn_val = 0;
+ Slapi_Value *group_dn_val = 0;
Slapi_ValueSet *groupvals = *((memberof_get_groups_data*)callback_data)->groupvals;
+ Slapi_ValueSet *group_norm_vals = *((memberof_get_groups_data*)callback_data)->group_norm_vals;
int rc = 0;
if(slapi_is_shutting_down()){
@@ -1737,21 +1743,21 @@ int memberof_get_groups_callback(Slapi_Entry *e, void *callback_data)
}
/* get the DN of the group */
- group_dn_val = slapi_value_new_string(group_dn);
+ group_ndn_val = slapi_value_new_string(group_ndn);
/* group_dn is case-normalized */
- slapi_value_set_flags(group_dn_val, SLAPI_ATTR_FLAG_NORMALIZED_CIS);
+ slapi_value_set_flags(group_ndn_val, SLAPI_ATTR_FLAG_NORMALIZED_CIS);
/* check if e is the same as our original member entry */
if (0 == memberof_compare(((memberof_get_groups_data*)callback_data)->config,
- &((memberof_get_groups_data*)callback_data)->memberdn_val, &group_dn_val))
+ &((memberof_get_groups_data*)callback_data)->memberdn_val, &group_ndn_val))
{
/* A recursive group caused us to find our original
* entry we passed to memberof_get_groups(). We just
* skip processing this entry. */
slapi_log_error( SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
"memberof_get_groups_callback: group recursion"
- " detected in %s\n" ,group_dn);
- slapi_value_free(&group_dn_val);
+ " detected in %s\n" ,group_ndn);
+ slapi_value_free(&group_ndn_val);
goto bail;
}
@@ -1760,8 +1766,8 @@ int memberof_get_groups_callback(Slapi_Entry *e, void *callback_data)
* in config. We only need this attribute for it's syntax so the comparison can be
* performed. Since all of the grouping attributes are validated to use the Dinstinguished
* Name syntax, we can safely just use the first group_slapiattr. */
- if (groupvals && slapi_valueset_find(
- ((memberof_get_groups_data*)callback_data)->config->group_slapiattrs[0], groupvals, group_dn_val))
+ if (group_norm_vals && slapi_valueset_find(
+ ((memberof_get_groups_data*)callback_data)->config->group_slapiattrs[0], group_norm_vals, group_ndn_val))
{
/* we either hit a recursive grouping, or an entry is
* a member of a group through multiple paths. Either
@@ -1769,15 +1775,17 @@ int memberof_get_groups_callback(Slapi_Entry *e, void *callback_data)
* already gone through this part of the grouping hierarchy. */
slapi_log_error( SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
"memberof_get_groups_callback: possible group recursion"
- " detected in %s\n" ,group_dn);
- slapi_value_free(&group_dn_val);
+ " detected in %s\n" ,group_ndn);
+ slapi_value_free(&group_ndn_val);
goto bail;
}
/* Push group_dn_val into the valueset. This memory is now owned
* by the valueset. */
+ group_dn_val = slapi_value_new_string(group_dn);
slapi_valueset_add_value_ext(groupvals, group_dn_val, SLAPI_VALUE_FLAG_PASSIN);
-
+ slapi_valueset_add_value_ext(group_norm_vals, group_ndn_val, SLAPI_VALUE_FLAG_PASSIN);
+
/* now recurse to find parent groups of e */
memberof_get_groups_r(((memberof_get_groups_data*)callback_data)->config,
group_sdn, callback_data);
| 0 |
5c89dd8f9c8eb77c967574412d049d55565bb364
|
389ds/389-ds-base
|
Ticket 49432 - filter optimise crash
Bug Description: In a certain condition with a filter, when we
removed the equality candidate to optimise it, with a nested
and, during the merge process we would segfault
Fix Description: Fix the merge subfilter process to be cleaner
and work in all conditions. Merge the set of filter tests to
cmocka in addition to the python tests to help catch this earlier
https://pagure.io/389-ds-base/issue/49432
Author: wibrown
Review by: mreynolds (Thanks!)
|
commit 5c89dd8f9c8eb77c967574412d049d55565bb364
Author: William Brown <[email protected]>
Date: Mon Oct 30 14:19:56 2017 +1000
Ticket 49432 - filter optimise crash
Bug Description: In a certain condition with a filter, when we
removed the equality candidate to optimise it, with a nested
and, during the merge process we would segfault
Fix Description: Fix the merge subfilter process to be cleaner
and work in all conditions. Merge the set of filter tests to
cmocka in addition to the python tests to help catch this earlier
https://pagure.io/389-ds-base/issue/49432
Author: wibrown
Review by: mreynolds (Thanks!)
diff --git a/Makefile.am b/Makefile.am
index 8e24957c7..c7f7b8a5d 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1994,6 +1994,7 @@ TESTS = test_slapd \
test_slapd_SOURCES = test/main.c \
test/libslapd/test.c \
test/libslapd/counters/atomic.c \
+ test/libslapd/filter/optimise.c \
test/libslapd/pblock/analytics.c \
test/libslapd/pblock/v3_compat.c \
test/libslapd/operation/v3_compat.c \
diff --git a/ldap/servers/slapd/filter.c b/ldap/servers/slapd/filter.c
index 5e75871f5..883093964 100644
--- a/ldap/servers/slapd/filter.c
+++ b/ldap/servers/slapd/filter.c
@@ -1555,22 +1555,28 @@ filter_prioritise_element(Slapi_Filter **list, Slapi_Filter **head, Slapi_Filter
static void
filter_merge_subfilter(Slapi_Filter **list, Slapi_Filter **f_prev, Slapi_Filter **f_cur, Slapi_Filter **f_next) {
- /* Cut our current AND/OR out */
- if (*f_prev != NULL) {
- (*f_prev)->f_next = (*f_cur)->f_next;
- } else if (*list == *f_cur) {
- *list = (*f_cur)->f_next;
- }
- (*f_next) = (*f_cur)->f_next;
- /* Look ahead to the end of our list, without the f_cur. */
- Slapi_Filter *f_cur_tail = *list;
+ /* First, graft in the new item between f_cur and f_cur -> f_next */
+ Slapi_Filter *remainder = (*f_cur)->f_next;
+ (*f_cur)->f_next = (*f_cur)->f_list;
+ /* Go to the end of the newly grafted list, and put in our remainder. */
+ Slapi_Filter *f_cur_tail = *f_cur;
while (f_cur_tail->f_next != NULL) {
f_cur_tail = f_cur_tail->f_next;
}
- /* Now append our descendant into the tail */
- f_cur_tail->f_next = (*f_cur)->f_list;
- /* Finally free the remainder */
+ f_cur_tail->f_next = remainder;
+
+ /* Now indicate to the caller what the next element is. */
+ *f_next = (*f_cur)->f_next;
+
+ /* Now that we have grafted our list in, cut out f_cur */
+ if (*f_prev != NULL) {
+ (*f_prev)->f_next = *f_next;
+ } else if (*list == *f_cur) {
+ *list = *f_next;
+ }
+
+ /* Finally free the f_cur (and/or) */
slapi_filter_free(*f_cur, 0);
}
diff --git a/test/libslapd/filter/optimise.c b/test/libslapd/filter/optimise.c
new file mode 100644
index 000000000..bcf4ccd79
--- /dev/null
+++ b/test/libslapd/filter/optimise.c
@@ -0,0 +1,83 @@
+/** BEGIN COPYRIGHT BLOCK
+ * Copyright (C) 2017 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * License: GPL (version 3 or any later version).
+ * See LICENSE for details.
+ * END COPYRIGHT BLOCK **/
+
+#include "../../test_slapd.h"
+
+/* To access filter optimise */
+#include <slapi-private.h>
+
+void
+test_libslapd_filter_optimise(void **state __attribute__((unused)))
+{
+ char *test_filters[] = {
+ "(&(uid=uid1)(sn=last1)(givenname=first1))",
+ "(&(uid=uid1)(&(sn=last1)(givenname=first1)))",
+ "(&(uid=uid1)(&(&(sn=last1))(&(givenname=first1))))",
+ "(&(uid=*)(sn=last3)(givenname=*))",
+ "(&(uid=*)(&(sn=last3)(givenname=*)))",
+ "(&(uid=uid5)(&(&(sn=*))(&(givenname=*))))",
+ "(&(objectclass=*)(uid=*)(sn=last*))",
+ "(&(objectclass=*)(uid=*)(sn=last1))",
+
+ "(|(uid=uid1)(sn=last1)(givenname=first1))",
+ "(|(uid=uid1)(|(sn=last1)(givenname=first1)))",
+ "(|(uid=uid1)(|(|(sn=last1))(|(givenname=first1))))",
+ "(|(objectclass=*)(sn=last1)(|(givenname=first1)))",
+ "(|(&(objectclass=*)(sn=last1))(|(givenname=first1)))",
+ "(|(&(objectclass=*)(sn=last))(|(givenname=first1)))",
+
+ "(&(uid=uid1)(!(cn=NULL)))",
+ "(&(!(cn=NULL))(uid=uid1))",
+ "(&(uid=*)(&(!(uid=1))(!(givenname=first1))))",
+
+ "(&(|(uid=uid1)(uid=NULL))(sn=last1))",
+ "(&(|(uid=uid1)(uid=NULL))(!(sn=NULL)))",
+ "(&(|(uid=uid1)(sn=last2))(givenname=first1))",
+ "(|(&(uid=uid1)(!(uid=NULL)))(sn=last2))",
+ "(|(&(uid=uid1)(uid=NULL))(sn=last2))",
+ "(&(uid=uid5)(sn=*)(cn=*)(givenname=*)(uid=u*)(sn=la*)(cn=full*)(givenname=f*)(uid>=u)(!(givenname=NULL)))",
+ "(|(&(objectclass=*)(sn=last))(&(givenname=first1)))",
+
+ "(&(uid=uid1)(sn=last1)(givenname=NULL))",
+ "(&(uid=uid1)(&(sn=last1)(givenname=NULL)))",
+ "(&(uid=uid1)(&(&(sn=last1))(&(givenname=NULL))))",
+ "(&(uid=uid1)(&(&(sn=last1))(&(givenname=NULL)(sn=*)))(|(sn=NULL)))",
+ "(&(uid=uid1)(&(&(sn=last*))(&(givenname=first*)))(&(sn=NULL)))",
+
+ "(|(uid=NULL)(sn=NULL)(givenname=NULL))",
+ "(|(uid=NULL)(|(sn=NULL)(givenname=NULL)))",
+ "(|(uid=NULL)(|(|(sn=NULL))(|(givenname=NULL))))",
+
+ "(uid>=uid3)",
+ "(&(uid=*)(uid>=uid3))",
+ "(|(uid>=uid3)(uid<=uid5))",
+ "(&(uid>=uid3)(uid<=uid5))",
+ "(|(&(uid>=uid3)(uid<=uid5))(uid=*))",
+
+ "(|(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)"
+ "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)"
+ "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)"
+ "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)"
+ "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)"
+ "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)"
+ "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)"
+ "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)"
+ "(uid=*))",
+ NULL
+ };
+
+ for (size_t i = 0; test_filters[i] != NULL; i++) {
+ char *filter_str = slapi_ch_strdup(test_filters[i]);
+
+ struct slapi_filter *filter = slapi_str2filter(filter_str);
+ slapi_filter_optimise(filter);
+ slapi_filter_free(filter, 1);
+ slapi_ch_free_string(&filter_str);
+ }
+}
+
diff --git a/test/libslapd/test.c b/test/libslapd/test.c
index ffa650d5e..02c6c03a4 100644
--- a/test/libslapd/test.c
+++ b/test/libslapd/test.c
@@ -28,6 +28,7 @@ run_libslapd_tests(void)
cmocka_unit_test(test_libslapd_operation_v3c_target_spec),
cmocka_unit_test(test_libslapd_counters_atomic_usage),
cmocka_unit_test(test_libslapd_counters_atomic_overflow),
+ cmocka_unit_test(test_libslapd_filter_optimise),
cmocka_unit_test(test_libslapd_pal_meminfo),
cmocka_unit_test(test_libslapd_util_cachesane),
};
diff --git a/test/test_slapd.h b/test/test_slapd.h
index ad4f73f86..efccaea78 100644
--- a/test/test_slapd.h
+++ b/test/test_slapd.h
@@ -26,6 +26,9 @@ int run_plugin_tests(void);
/* libslapd */
void test_libslapd_hello(void **state);
+/* libslapd-filter-optimise */
+void test_libslapd_filter_optimise(void **state);
+
/* libslapd-pblock-analytics */
void test_libslapd_pblock_analytics(void **state);
| 0 |
ae05276447e919c49e2ce81e3435f949471685b8
|
389ds/389-ds-base
|
146804 - Added extra validity check for purge_csn - [email protected]
|
commit ae05276447e919c49e2ce81e3435f949471685b8
Author: Nathan Kinder <[email protected]>
Date: Tue Feb 1 22:57:01 2005 +0000
146804 - Added extra validity check for purge_csn - [email protected]
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index 08293b054..2c2fd14d0 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -2812,7 +2812,8 @@ _replica_get_purge_csn_nolock(const Replica *r)
purge_csn = csn_dup (csns[i-1]);
/* set purge_csn to the most recent maxcsn - purge_delay */
- csn_set_time(purge_csn, csn_get_time(purge_csn) - r->repl_purge_delay);
+ if ((csn_get_time(purge_csn) - r->repl_purge_delay) > 0)
+ csn_set_time(purge_csn, csn_get_time(purge_csn) - r->repl_purge_delay);
}
if (csns)
| 0 |
e227c5be7d359138bb54414b8f910e774c05083f
|
389ds/389-ds-base
|
Issue 2526 - revert API change in slapi_be_getsuffix()
Description: The public slapi API was changed in the previous commit and
this broke several other projects using 389 DS. This patch
restore the API, but it ignores the unused value:
const Slapi_DN *slapi_be_getsuffix(Slapi_Backend *be, int unused);
Relates: https://github.com/389ds/389-ds-base/issues/2526
Reviewed by: mreynolds
|
commit e227c5be7d359138bb54414b8f910e774c05083f
Author: Mark Reynolds <[email protected]>
Date: Sun Oct 25 12:46:13 2020 -0400
Issue 2526 - revert API change in slapi_be_getsuffix()
Description: The public slapi API was changed in the previous commit and
this broke several other projects using 389 DS. This patch
restore the API, but it ignores the unused value:
const Slapi_DN *slapi_be_getsuffix(Slapi_Backend *be, int unused);
Relates: https://github.com/389ds/389-ds-base/issues/2526
Reviewed by: mreynolds
diff --git a/ldap/servers/plugins/acl/acllist.c b/ldap/servers/plugins/acl/acllist.c
index 37ce7b1ea..e80c567c3 100644
--- a/ldap/servers/plugins/acl/acllist.c
+++ b/ldap/servers/plugins/acl/acllist.c
@@ -129,7 +129,7 @@ acl_be_state_change_fnc(void *handle __attribute__((unused)), char *be_name, int
* Just get the first suffix--if there are multiple XXX ?
*/
- if ((sdn = slapi_be_getsuffix(be)) == NULL) {
+ if ((sdn = slapi_be_getsuffix(be, 0)) == NULL) {
slapi_log_err(SLAPI_LOG_ACL, plugin_name,
"acl_be_state_change_fnc - Failed to retrieve backend--NOT activating it's acis\n");
return;
@@ -156,7 +156,7 @@ acl_be_state_change_fnc(void *handle __attribute__((unused)), char *be_name, int
* In fact there can onlt be one sufffix here.
*/
- if ((sdn = slapi_be_getsuffix(be)) == NULL) {
+ if ((sdn = slapi_be_getsuffix(be, 0)) == NULL) {
slapi_log_err(SLAPI_LOG_ACL, plugin_name,
"acl_be_state_change_fnc - Failed to retrieve backend--NOT activating it's acis\n");
return;
diff --git a/ldap/servers/plugins/chainingdb/cb_instance.c b/ldap/servers/plugins/chainingdb/cb_instance.c
index 65b6083ef..dfcc430a6 100644
--- a/ldap/servers/plugins/chainingdb/cb_instance.c
+++ b/ldap/servers/plugins/chainingdb/cb_instance.c
@@ -1674,7 +1674,7 @@ cb_instance_search_config_callback(Slapi_PBlock *pb __attribute__((unused)),
slapi_rwlock_rdlock(inst->rwl_config_lock);
if (inst->inst_be) {
- if ((aSuffix = slapi_be_getsuffix(inst->inst_be))) {
+ if ((aSuffix = slapi_be_getsuffix(inst->inst_be, 0))) {
val.bv_val = (char *)slapi_sdn_get_dn(aSuffix);
val.bv_len = strlen(val.bv_val);
if (val.bv_len) {
diff --git a/ldap/servers/plugins/chainingdb/cb_test.c b/ldap/servers/plugins/chainingdb/cb_test.c
index 1e7298d48..02a1937a1 100644
--- a/ldap/servers/plugins/chainingdb/cb_test.c
+++ b/ldap/servers/plugins/chainingdb/cb_test.c
@@ -42,7 +42,7 @@ cb_back_test(Slapi_PBlock *pb)
printf("Begin test instance %s.\n", inst->inst_name);
- aSuffix = slapi_be_getsuffix(be);
+ aSuffix = slapi_be_getsuffix(be, 0);
/* Remove leading white spaces */
for (aSuffixString = slapi_sdn_get_dn(aSuffix);
*aSuffixString == ' '; aSuffixString++) {
diff --git a/ldap/servers/plugins/chainingdb/cb_utils.c b/ldap/servers/plugins/chainingdb/cb_utils.c
index 006cdae73..dfd5dd92c 100644
--- a/ldap/servers/plugins/chainingdb/cb_utils.c
+++ b/ldap/servers/plugins/chainingdb/cb_utils.c
@@ -342,7 +342,7 @@ cb_be_state_change(void *handle __attribute__((unused)), char *be_name, int old_
}
/* get the suffix for the local backend */
- tmpsdn = slapi_be_getsuffix(the_be);
+ tmpsdn = slapi_be_getsuffix(the_be, 0);
if (!tmpsdn) {
return;
} else {
@@ -355,7 +355,7 @@ cb_be_state_change(void *handle __attribute__((unused)), char *be_name, int old_
/* only look at chaining backends */
if (slapi_be_is_flag_set(chainbe, SLAPI_BE_FLAG_REMOTE_DATA)) {
/* get the suffix */
- const Slapi_DN *tmpcbsuf = slapi_be_getsuffix(chainbe);
+ const Slapi_DN *tmpcbsuf = slapi_be_getsuffix(chainbe, 0);
if (tmpcbsuf) {
/* make a copy - to be safe */
Slapi_DN *cbsuffix = slapi_sdn_dup(tmpcbsuf);
@@ -367,9 +367,9 @@ cb_be_state_change(void *handle __attribute__((unused)), char *be_name, int old_
if (cbinst) {
/* the backend is disabled if the state is not ON */
cbinst->associated_be_is_disabled = (new_be_state != SLAPI_BE_STATE_ON);
- slapi_log_err(SLAPI_LOG_PLUGIN, CB_PLUGIN_SUBSYSTEM, "cb_be_state_change - Set the "
- "state of chainbe for %s to %d\n",
- slapi_sdn_get_dn(cbsuffix), (new_be_state != SLAPI_BE_STATE_ON));
+ slapi_log_err(SLAPI_LOG_PLUGIN, CB_PLUGIN_SUBSYSTEM,
+ "cb_be_state_change - Set the state of chainbe for %s to %d\n",
+ slapi_sdn_get_dn(cbsuffix), (new_be_state != SLAPI_BE_STATE_ON));
}
}
slapi_sdn_free(&cbsuffix);
diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
index 92fe0fcc8..16c625bb0 100644
--- a/ldap/servers/plugins/dna/dna.c
+++ b/ldap/servers/plugins/dna/dna.c
@@ -2884,7 +2884,7 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
/* Find the backend suffix where the shared config is stored. */
range_sdn = slapi_sdn_new_dn_byref(range_dn);
if ((be = slapi_be_select(range_sdn)) != NULL) {
- be_suffix = slapi_sdn_get_dn(slapi_be_getsuffix(be));
+ be_suffix = slapi_sdn_get_dn(slapi_be_getsuffix(be, 0));
}
/* Fetch the "cn=replica" entry for the backend that stores
@@ -2976,7 +2976,7 @@ dna_get_replica_bind_creds(char *range_dn, struct dnaServer *server, char **bind
/* Find the backend suffix where the shared config is stored. */
range_sdn = slapi_sdn_new_normdn_byref(range_dn);
if ((be = slapi_be_select(range_sdn)) != NULL) {
- be_suffix = slapi_sdn_get_dn(slapi_be_getsuffix(be));
+ be_suffix = slapi_sdn_get_dn(slapi_be_getsuffix(be, 0));
}
/* Fetch the replication agreement entry */
diff --git a/ldap/servers/plugins/linkedattrs/fixup_task.c b/ldap/servers/plugins/linkedattrs/fixup_task.c
index 95ab3a36f..f47ccf6d1 100644
--- a/ldap/servers/plugins/linkedattrs/fixup_task.c
+++ b/ldap/servers/plugins/linkedattrs/fixup_task.c
@@ -430,7 +430,7 @@ linked_attrs_add_backlinks_callback(Slapi_Entry *e, void *callback_data)
Slapi_DN *linksdn = slapi_sdn_new_normdn_byref(linkdn);
if ((be = slapi_be_select(linksdn))) {
- perform_update = slapi_sdn_issuffix(targetsdn, slapi_be_getsuffix(be));
+ perform_update = slapi_sdn_issuffix(targetsdn, slapi_be_getsuffix(be, 0));
}
slapi_sdn_free(&linksdn);
diff --git a/ldap/servers/plugins/linkedattrs/linked_attrs.c b/ldap/servers/plugins/linkedattrs/linked_attrs.c
index 79d78ed4c..fe585041c 100644
--- a/ldap/servers/plugins/linkedattrs/linked_attrs.c
+++ b/ldap/servers/plugins/linkedattrs/linked_attrs.c
@@ -1443,7 +1443,7 @@ linked_attrs_mod_backpointers(Slapi_PBlock *pb, char *linkdn, char *type, char *
Slapi_DN *linksdn = slapi_sdn_new_normdn_byref(linkdn);
if ((be = slapi_be_select(linksdn))) {
- perform_update = slapi_sdn_issuffix(targetsdn, slapi_be_getsuffix(be));
+ perform_update = slapi_sdn_issuffix(targetsdn, slapi_be_getsuffix(be, 0));
}
slapi_sdn_free(&linksdn);
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
index 0925611b7..e9e1ec4c7 100644
--- a/ldap/servers/plugins/memberof/memberof.c
+++ b/ldap/servers/plugins/memberof/memberof.c
@@ -799,7 +799,7 @@ memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn
break;
}
}
- if ((base_sdn = (Slapi_DN *)slapi_be_getsuffix(be)) == NULL) {
+ if ((base_sdn = (Slapi_DN *)slapi_be_getsuffix(be, 0)) == NULL) {
if (!all_backends) {
break;
} else {
@@ -1549,7 +1549,7 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_o
break;
}
}
- if ((base_sdn = (Slapi_DN *)slapi_be_getsuffix(be)) == NULL) {
+ if ((base_sdn = (Slapi_DN *)slapi_be_getsuffix(be, 0)) == NULL) {
if (!all_backends) {
break;
} else {
diff --git a/ldap/servers/plugins/posix-winsync/posix-group-func.c b/ldap/servers/plugins/posix-winsync/posix-group-func.c
index 9a7517f58..7bd0b9eec 100644
--- a/ldap/servers/plugins/posix-winsync/posix-group-func.c
+++ b/ldap/servers/plugins/posix-winsync/posix-group-func.c
@@ -301,7 +301,7 @@ posix_winsync_foreach_parent(Slapi_Entry *entry, char **attrs, plugin_search_ent
for (be = slapi_get_first_backend(&cookie); be;
be = slapi_get_next_backend(cookie)) {
- const Slapi_DN *base_sdn = slapi_be_getsuffix(be);
+ const Slapi_DN *base_sdn = slapi_be_getsuffix(be, 0);
if (base_sdn == NULL) {
continue;
}
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
index 79ee3b438..457a5675e 100644
--- a/ldap/servers/plugins/replication/cl5_api.c
+++ b/ldap/servers/plugins/replication/cl5_api.c
@@ -4388,7 +4388,7 @@ cl5Export(Slapi_PBlock *pb)
slapi_pblock_get(pb, SLAPI_BACKEND_INSTANCE_NAME, &instance_name);
slapi_pblock_get(pb, SLAPI_DB2LDIF_FILE, &instance_ldif);
slapi_pblock_get(pb, SLAPI_BACKEND, &be);
- replica = replica_get_replica_from_dn(slapi_be_getsuffix(be));
+ replica = replica_get_replica_from_dn(slapi_be_getsuffix(be, 0));
if (replica == NULL) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
"cl5Export - No replica defined for instance %s\n", instance_name);
diff --git a/ldap/servers/plugins/replication/repl5_mtnode_ext.c b/ldap/servers/plugins/replication/repl5_mtnode_ext.c
index 1e739e3b8..08a58613b 100644
--- a/ldap/servers/plugins/replication/repl5_mtnode_ext.c
+++ b/ldap/servers/plugins/replication/repl5_mtnode_ext.c
@@ -201,7 +201,7 @@ replica_get_for_backend(const char *be_name)
if (NULL == be)
return NULL;
- suffix = slapi_be_getsuffix(be);
+ suffix = slapi_be_getsuffix(be, 0);
return replica_get_replica_from_dn(suffix);
diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c
index 301e9fa00..f41dbc72d 100644
--- a/ldap/servers/plugins/replication/urp.c
+++ b/ldap/servers/plugins/replication/urp.c
@@ -1606,7 +1606,7 @@ urp_find_tombstone_for_glue (Slapi_PBlock *pb, char *sessionid, const Slapi_Entr
Slapi_PBlock *newpb;
const char *basedn = slapi_sdn_get_dn(parentdn);
char *escaped_basedn;
- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn);
+ escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn);
char *conflict_csnstr = (char*)slapi_entry_attr_get_ref((Slapi_Entry *)entry, "conflictcsn");
CSN *conflict_csn = csn_new_by_string(conflict_csnstr);
diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c
index db0eee5d6..de99ba233 100644
--- a/ldap/servers/plugins/roles/roles_cache.c
+++ b/ldap/servers/plugins/roles/roles_cache.c
@@ -459,7 +459,7 @@ roles_cache_trigger_update_suffix(void *handle __attribute__((unused)), char *be
/* Backend back on line or new one created*/
backend = slapi_be_select_by_instance_name(be_name);
if (backend != NULL) {
- be_suffix_dn = slapi_be_getsuffix(backend);
+ be_suffix_dn = slapi_be_getsuffix(backend, 0);
top_suffix_dn = roles_cache_get_top_suffix((Slapi_DN *)be_suffix_dn);
}
@@ -842,7 +842,7 @@ roles_cache_change_notify(Slapi_PBlock *pb)
}
}
#endif
- Slapi_DN *top_suffix = roles_cache_get_top_suffix((Slapi_DN *)slapi_be_getsuffix(be));
+ Slapi_DN *top_suffix = roles_cache_get_top_suffix((Slapi_DN *)slapi_be_getsuffix(be, 0));
if (top_suffix != NULL) {
dn = slapi_sdn_get_dn(sdn);
@@ -1679,7 +1679,7 @@ roles_cache_find_roles_in_suffix(Slapi_DN *target_entry_dn, roles_cache_def **li
*list_of_roles = NULL;
backend = slapi_mapping_tree_find_backend_for_sdn(target_entry_dn);
if ((backend != NULL) && !slapi_be_is_flag_set(backend, SLAPI_BE_FLAG_REMOTE_DATA)) {
- Slapi_DN *suffix = roles_cache_get_top_suffix((Slapi_DN *)slapi_be_getsuffix(backend));
+ Slapi_DN *suffix = roles_cache_get_top_suffix((Slapi_DN *)slapi_be_getsuffix(backend, 0));
roles_cache_def *current_role = roles_list;
/* Go through all the roles list and trigger the associated structure */
diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c
index 5724d7d9d..598c6868d 100644
--- a/ldap/servers/plugins/sync/sync_persist.c
+++ b/ldap/servers/plugins/sync/sync_persist.c
@@ -5,6 +5,9 @@
* License: GPL (version 3 or any later version).
* See LICENSE for details.
* END COPYRIGHT BLOCK **/
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
#include <nspr4/prlog.h>
#include <bits/stdint-intn.h>
diff --git a/ldap/servers/plugins/usn/usn_cleanup.c b/ldap/servers/plugins/usn/usn_cleanup.c
index 2530ce79f..bdb55e6b1 100644
--- a/ldap/servers/plugins/usn/usn_cleanup.c
+++ b/ldap/servers/plugins/usn/usn_cleanup.c
@@ -279,7 +279,7 @@ usn_cleanup_add(Slapi_PBlock *pb,
/* suffix is not given, but backend is; get the suffix */
if (!suffix && backend) {
be = slapi_be_select_by_instance_name(backend);
- be_suffix = slapi_be_getsuffix(be);
+ be_suffix = slapi_be_getsuffix(be, 0);
if (be_suffix) {
suffix = slapi_ch_strdup(slapi_sdn_get_ndn(be_suffix));
} else {
diff --git a/ldap/servers/slapd/back-ldbm/ancestorid.c b/ldap/servers/slapd/back-ldbm/ancestorid.c
index 8fe7df0cf..581bbf4c3 100644
--- a/ldap/servers/slapd/back-ldbm/ancestorid.c
+++ b/ldap/servers/slapd/back-ldbm/ancestorid.c
@@ -218,7 +218,7 @@ ldbm_ancestorid_index_entry(
ret = ldbm_ancestorid_index_update(be,
slapi_entry_get_sdn_const(e->ep_entry),
- slapi_be_getsuffix(be),
+ slapi_be_getsuffix(be, 0),
0, 1, e->ep_id, NULL, flags, txn);
return ret;
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
index d9510fc7f..8d6c322db 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
@@ -422,7 +422,7 @@ bdb_fetch_subtrees(backend *be, char **include, int *err)
/* for each subtree spec... */
for (i = 0; include[i]; i++) {
IDList *idl = NULL;
- const char *suffix = slapi_sdn_get_ndn(slapi_be_getsuffix(be));
+ const char *suffix = slapi_sdn_get_ndn(slapi_be_getsuffix(be, 0));
char *parentdn = slapi_ch_strdup(suffix);
char *nextdn = NULL;
int matched = 0;
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_compare.c b/ldap/servers/slapd/back-ldbm/ldbm_compare.c
index f1fb0319e..e655019a2 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_compare.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_compare.c
@@ -54,7 +54,7 @@ ldbm_back_compare(Slapi_PBlock *pb)
return -1;
}
/* get the namespace dn */
- namespace_dn = (Slapi_DN *)slapi_be_getsuffix(be);
+ namespace_dn = (Slapi_DN *)slapi_be_getsuffix(be, 0);
if ((e = find_entry(pb, be, addr, &txn, NULL)) == NULL) {
ret = -1; /* error result sent by find_entry() */
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c
index cead42e0d..8ff17a50d 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c
@@ -755,7 +755,7 @@ ldbm_instance_search_config_entry_callback(Slapi_PBlock *pb __attribute__((unuse
/* show the suffixes */
attrlist_delete(&e->e_attrs, CONFIG_INSTANCE_SUFFIX);
- suffix = slapi_be_getsuffix(inst->inst_be);
+ suffix = slapi_be_getsuffix(inst->inst_be, 0);
if (suffix != NULL) {
val.bv_val = (char *)slapi_sdn_get_dn(suffix);
val.bv_len = strlen(val.bv_val);
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
index 35c5429f7..5c7041eb0 100644
--- a/ldap/servers/slapd/backend.c
+++ b/ldap/servers/slapd/backend.c
@@ -183,12 +183,8 @@ slapi_be_addsuffix(Slapi_Backend *be, const Slapi_DN *suffix)
be_addsuffix(be, suffix);
}
-/*
- * The Slapi_DN pointer will always be valid even though the array
- * itself may be changing due to the addition of a suffix.
- */
const Slapi_DN *
-slapi_be_getsuffix(Slapi_Backend *be)
+slapi_be_getsuffix(Slapi_Backend *be, int n __attribute__((unused)))
{
if (be && be->be_state != BE_STATE_DELETED) {
return be->be_suffix;
diff --git a/ldap/servers/slapd/configdse.c b/ldap/servers/slapd/configdse.c
index 0daf540d5..27ca129d0 100644
--- a/ldap/servers/slapd/configdse.c
+++ b/ldap/servers/slapd/configdse.c
@@ -201,7 +201,7 @@ read_config_dse(Slapi_PBlock *pb __attribute__((unused)),
while (be) {
if (be->be_private) {
const Slapi_DN *base = NULL;
- base = slapi_be_getsuffix(be);
+ base = slapi_be_getsuffix(be, 0);
if (base != NULL) {
val.bv_val = (void *)slapi_sdn_get_dn(base); /* jcm: had to cast away const */
val.bv_len = strlen(val.bv_val);
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index 6ca7bd061..c8b45bfdb 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -678,7 +678,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
*/
/* that means we only support one suffix per backend */
- be_suffix = slapi_be_getsuffix(be);
+ be_suffix = slapi_be_getsuffix(be, 0);
if (be_list[0] == NULL) {
next_be = NULL;
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
index a234d395a..9daf3b151 100644
--- a/ldap/servers/slapd/result.c
+++ b/ldap/servers/slapd/result.c
@@ -1225,7 +1225,7 @@ send_all_attrs(Slapi_Entry *e, char **attrs, Slapi_Operation *op, Slapi_PBlock *
/* get the namespace dn */
slapi_pblock_get(pb, SLAPI_BACKEND, (void *)&be);
- namespace_dn = (Slapi_DN *)slapi_be_getsuffix(be);
+ namespace_dn = (Slapi_DN *)slapi_be_getsuffix(be, 0);
/* Get the attribute value from the vattr service */
/* ctx will be freed by attr_context_ungrok() */
@@ -1379,7 +1379,7 @@ send_specific_attrs(Slapi_Entry *e, char **attrs, Slapi_Operation *op, Slapi_PBl
/* get the namespace dn */
slapi_pblock_get(pb, SLAPI_BACKEND, (void *)&be);
- namespace_dn = (Slapi_DN *)slapi_be_getsuffix(be);
+ namespace_dn = (Slapi_DN *)slapi_be_getsuffix(be, 0);
/* Get the attribute value from the vattr service */
/* This call handles subtype, as well.
diff --git a/ldap/servers/slapd/rootdse.c b/ldap/servers/slapd/rootdse.c
index 864f6a29b..24c27d494 100644
--- a/ldap/servers/slapd/rootdse.c
+++ b/ldap/servers/slapd/rootdse.c
@@ -149,7 +149,7 @@ read_root_dse(Slapi_PBlock *pb,
continue;
/* tolerate a backend under construction containing no suffix */
- if ((be_suffix = slapi_be_getsuffix(be)) == NULL)
+ if ((be_suffix = slapi_be_getsuffix(be, 0)) == NULL)
continue;
if ((base = (char *)slapi_sdn_get_dn(be_suffix)) == NULL)
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index d9ffa3db5..5b9b29a49 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -6398,7 +6398,7 @@ int slapi_be_logchanges(Slapi_Backend *be);
int slapi_be_issuffix(const Slapi_Backend *be, const Slapi_DN *suffix);
void slapi_be_addsuffix(Slapi_Backend *be, const Slapi_DN *suffix);
char *slapi_be_get_name(Slapi_Backend *be);
-const Slapi_DN *slapi_be_getsuffix(Slapi_Backend *be);
+const Slapi_DN *slapi_be_getsuffix(Slapi_Backend *be, int n);
Slapi_Backend *slapi_get_first_backend(char **cookie);
Slapi_Backend *slapi_get_next_backend(char *cookie);
int slapi_be_private(Slapi_Backend *be);
diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c
index 34b3b75e1..8d82d379f 100644
--- a/ldap/servers/slapd/task.c
+++ b/ldap/servers/slapd/task.c
@@ -2467,7 +2467,7 @@ task_fixup_tombstones_add(Slapi_PBlock *pb,
if ((backend = slapi_entry_attr_get_charray(e, TASK_TOMBSTONE_FIXUP_BACKEND))) {
for (i = 0; backend && backend[i]; i++) {
if ((be = slapi_be_select_by_instance_name(backend[i]))) {
- if ((base_sdn = slapi_be_getsuffix(be))) {
+ if ((base_sdn = slapi_be_getsuffix(be, 0))) {
slapi_ch_array_add(&base, slapi_ch_strdup(slapi_sdn_get_ndn(base_sdn)));
} else {
/* failed to get a suffix */
@@ -2495,7 +2495,7 @@ task_fixup_tombstones_add(Slapi_PBlock *pb,
/* Gather all the backends */
be = slapi_get_first_backend(&cookie);
while (be) {
- if ((base_sdn = slapi_be_getsuffix(be)) && !be->be_private) {
+ if ((base_sdn = slapi_be_getsuffix(be, 0)) && !be->be_private) {
const char *suf = slapi_sdn_get_ndn(base_sdn);
/* Need to skip the retro changelog */
if (strcmp(suf, "cn=changelog")) {
diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c
index b3dd08085..7c70f4157 100644
--- a/ldap/servers/slapd/vattr.c
+++ b/ldap/servers/slapd/vattr.c
@@ -576,7 +576,7 @@ vattr_test_filter(Slapi_PBlock *pb,
/* get the namespace this entry belongs to */
sdn = slapi_entry_get_sdn(e);
be = slapi_be_select(sdn);
- namespace_dn = (Slapi_DN *)slapi_be_getsuffix(be);
+ namespace_dn = (Slapi_DN *)slapi_be_getsuffix(be, 0);
/* Look for attribute in the map */
@@ -1784,7 +1784,7 @@ slapi_vattrspi_regattr(vattr_sp_handle *h, char *type_name_to_register, char *DN
slapi_sdn_set_dn_byref(&original_dn, DN);
be = slapi_be_select(&original_dn);
- namespace_dn = (Slapi_DN *)slapi_be_getsuffix(be);
+ namespace_dn = (Slapi_DN *)slapi_be_getsuffix(be, 0);
if (namespace_dn && be != defbackend_get_backend()) /* just in case someone thinks "" is a good namespace */
{
diff --git a/src/rewriters/adfilter.c b/src/rewriters/adfilter.c
index 3a6b3802c..fcea027c8 100644
--- a/src/rewriters/adfilter.c
+++ b/src/rewriters/adfilter.c
@@ -407,7 +407,7 @@ adfilter_rewrite_objectCategory(Slapi_PBlock *pb)
return SEARCH_REWRITE_CALLBACK_CONTINUE;
}
if ((be = slapi_be_select(sdn)) != NULL) {
- be_suffix = slapi_sdn_get_dn(slapi_be_getsuffix(be));
+ be_suffix = slapi_sdn_get_dn(slapi_be_getsuffix(be, 0));
}
/* prepare the argument of filter apply callback: a format and
| 0 |
16be5c6aa9af8c8baf5695dbd3e7f3b045c8cfaa
|
389ds/389-ds-base
|
Ticket 47586 - CI tests: follow up
Bug Description:
Uncomment fixture definition
Fix Description:
https://fedorahosted.org/389/ticket/47586
Reviewed by: ?
Platforms tested: F19
Flag Day: no
Doc impact: no
|
commit 16be5c6aa9af8c8baf5695dbd3e7f3b045c8cfaa
Author: Thierry bordaz (tbordaz) <[email protected]>
Date: Wed Nov 27 13:48:18 2013 +0100
Ticket 47586 - CI tests: follow up
Bug Description:
Uncomment fixture definition
Fix Description:
https://fedorahosted.org/389/ticket/47586
Reviewed by: ?
Platforms tested: F19
Flag Day: no
Doc impact: no
diff --git a/dirsrvtests/tickets/ticket47490_test.py b/dirsrvtests/tickets/ticket47490_test.py
index b4b006797..b6e591eb8 100644
--- a/dirsrvtests/tickets/ticket47490_test.py
+++ b/dirsrvtests/tickets/ticket47490_test.py
@@ -125,7 +125,7 @@ def trigger_schema_push(topology):
time.sleep(1)
loop += 1
-#@pytest.fixture(scope="module")
[email protected](scope="module")
def topology(request):
'''
This fixture is used to create a replicated topology for the 'module'.
| 0 |
3329b180bb379717df93da4a580e6efa6a2efbc3
|
389ds/389-ds-base
|
Issue 5051 - RFE - ADSync flatten tree (#5192)
Issue 5051 - RFE - ADSync flatten tree
Bug Description: ADSync would try to preserve the tree
structure within AD when replicated to 389. This often
led to sync failing unless the administrator manually
recreated the OU structure since we DID NOT create
those OU's automatically
Fix Description: Allow flattening the content of the AD
tree into the ds-subtree. Since AD guarantees that ALL
RDN's are unique across the directory, this is safe
because there can never be a conflict. Additionally
due to how this is implemented, it means that when an
AD entry is relocated in the AD tree, it does not move
in 389, but continues to receive updates and changes
fixes: https://github.com/389ds/389-ds-base/issues/5051
Author: William Brown <[email protected]>
Review by: @mreynolds389 (thanks!)
|
commit 3329b180bb379717df93da4a580e6efa6a2efbc3
Author: Firstyear <[email protected]>
Date: Tue Mar 8 15:52:59 2022 +1000
Issue 5051 - RFE - ADSync flatten tree (#5192)
Issue 5051 - RFE - ADSync flatten tree
Bug Description: ADSync would try to preserve the tree
structure within AD when replicated to 389. This often
led to sync failing unless the administrator manually
recreated the OU structure since we DID NOT create
those OU's automatically
Fix Description: Allow flattening the content of the AD
tree into the ds-subtree. Since AD guarantees that ALL
RDN's are unique across the directory, this is safe
because there can never be a conflict. Additionally
due to how this is implemented, it means that when an
AD entry is relocated in the AD tree, it does not move
in 389, but continues to receive updates and changes
fixes: https://github.com/389ds/389-ds-base/issues/5051
Author: William Brown <[email protected]>
Review by: @mreynolds389 (thanks!)
diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif
index 3b0ad0a97..5bb7a4f90 100644
--- a/ldap/schema/02common.ldif
+++ b/ldap/schema/02common.ldif
@@ -154,6 +154,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2357 NAME 'nsTaskCurrentItem' DESC 'Slap
attributeTypes: ( 2.16.840.1.113730.3.1.2358 NAME 'nsTaskTotalItems' DESC 'Slapi Task total items' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2359 NAME 'nsTaskCreated' DESC 'Slapi Task creation date' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2375 NAME 'nsTaskWarning' DESC 'Slapi Task warning code' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
+attributeTypes: ( 2.16.840.1.113730.3.1.2389 NAME 'winSyncFlattenTree' DESC 'When set to on, will flatten tree structure in AD replication' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
#
# objectclasses:
#
@@ -182,7 +183,7 @@ objectClasses: ( 2.16.840.1.113730.3.2.99 NAME 'cosSuperDefinition' DESC 'Netsca
objectClasses: ( 2.16.840.1.113730.3.2.100 NAME 'cosClassicDefinition' DESC 'Netscape defined objectclass' SUP cosSuperDefinition MAY ( cosTemplateDn $ cosspecifier ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.101 NAME 'cosPointerDefinition' DESC 'Netscape defined objectclass' SUP cosSuperDefinition MAY ( cosTemplateDn ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.102 NAME 'cosIndirectDefinition' DESC 'Netscape defined objectclass' SUP cosSuperDefinition MAY ( cosIndirectSpecifier ) X-ORIGIN 'Netscape Directory Server' )
-objectClasses: ( 2.16.840.1.113730.3.2.503 NAME 'nsDSWindowsReplicationAgreement' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsDS5ReplicaHost $ nsDS5ReplicaPort $ nsDS5ReplicaTransportInfo $ nsDS5ReplicaBindDN $ nsDS5ReplicaCredentials $ nsDS5ReplicaBindMethod $ nsDS5ReplicaRoot $ nsDS5ReplicatedAttributeList $ nsDS5ReplicaUpdateSchedule $ nsds5BeginReplicaRefresh $ description $ nsds50ruv $ nsruvReplicaLastModified $ nsds5ReplicaTimeout $ nsds5replicaChangesSentSinceStartup $ nsds5replicaLastUpdateEnd $ nsds5replicaLastUpdateStart $ nsds5replicaLastUpdateStatus $ nsds5replicaUpdateInProgress $ nsds5replicaLastInitEnd $ nsds5replicaLastInitStart $ nsds5replicaLastInitStatus $ nsds5debugreplicatimeout $ nsds5replicaBusyWaitTime $ nsds5replicaSessionPauseTime $ nsds7WindowsReplicaSubtree $ nsds7DirectoryReplicaSubtree $ nsds7NewWinUserSyncEnabled $ nsds7NewWinGroupSyncEnabled $ nsds7WindowsDomain $ nsds7DirsyncCookie $ winSyncInterval $ oneWaySync $ winSyncMoveAction $ nsds5ReplicaEnabled $ winSyncDirectoryFilter $ winSyncWindowsFilter $ winSyncSubtreePair ) X-ORIGIN 'Netscape Directory Server' )
+objectClasses: ( 2.16.840.1.113730.3.2.503 NAME 'nsDSWindowsReplicationAgreement' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsDS5ReplicaHost $ nsDS5ReplicaPort $ nsDS5ReplicaTransportInfo $ nsDS5ReplicaBindDN $ nsDS5ReplicaCredentials $ nsDS5ReplicaBindMethod $ nsDS5ReplicaRoot $ nsDS5ReplicatedAttributeList $ nsDS5ReplicaUpdateSchedule $ nsds5BeginReplicaRefresh $ description $ nsds50ruv $ nsruvReplicaLastModified $ nsds5ReplicaTimeout $ nsds5replicaChangesSentSinceStartup $ nsds5replicaLastUpdateEnd $ nsds5replicaLastUpdateStart $ nsds5replicaLastUpdateStatus $ nsds5replicaUpdateInProgress $ nsds5replicaLastInitEnd $ nsds5replicaLastInitStart $ nsds5replicaLastInitStatus $ nsds5debugreplicatimeout $ nsds5replicaBusyWaitTime $ nsds5replicaSessionPauseTime $ nsds7WindowsReplicaSubtree $ nsds7DirectoryReplicaSubtree $ nsds7NewWinUserSyncEnabled $ nsds7NewWinGroupSyncEnabled $ nsds7WindowsDomain $ nsds7DirsyncCookie $ winSyncInterval $ oneWaySync $ winSyncMoveAction $ nsds5ReplicaEnabled $ winSyncDirectoryFilter $ winSyncWindowsFilter $ winSyncSubtreePair $ winSyncFlattenTree ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.128 NAME 'costemplate' DESC 'Netscape defined objectclass' SUP top MAY ( cn $ cospriority ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.304 NAME 'nsView' DESC 'Netscape defined objectclass' SUP top AUXILIARY MAY ( nsViewFilter $ description ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.316 NAME 'nsAttributeEncryption' DESC 'Netscape defined objectclass' SUP top MUST ( cn $ nsEncryptionAlgorithm ) X-ORIGIN 'Netscape Directory Server' )
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 1c8de8fec..a1f009cbd 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -176,6 +176,7 @@ extern const char *type_winsyncMoveAction;
extern const char *type_winSyncWindowsFilter;
extern const char *type_winSyncDirectoryFilter;
extern const char *type_winSyncSubtreePair;
+extern const char *type_winSyncFlattenTree;
/* To Allow Consumer Initialization when adding an agreement - */
extern const char *type_nsds5BeginReplicaRefresh;
diff --git a/ldap/servers/plugins/replication/repl_globals.c b/ldap/servers/plugins/replication/repl_globals.c
index 6d71671a9..56ac84f1a 100644
--- a/ldap/servers/plugins/replication/repl_globals.c
+++ b/ldap/servers/plugins/replication/repl_globals.c
@@ -131,6 +131,7 @@ const char *type_winsyncMoveAction = "winSyncMoveAction";
const char *type_winSyncWindowsFilter = "winSyncWindowsFilter";
const char *type_winSyncDirectoryFilter = "winSyncDirectoryFilter";
const char *type_winSyncSubtreePair = "winSyncSubtreePair";
+const char *type_winSyncFlattenTree = "winSyncFlattenTree";
/* To Allow Consumer Initialization when adding an agreement - */
const char *type_nsds5BeginReplicaRefresh = "nsds5BeginReplicaRefresh";
diff --git a/ldap/servers/plugins/replication/windows_private.c b/ldap/servers/plugins/replication/windows_private.c
index 2362000a3..3147d86f5 100644
--- a/ldap/servers/plugins/replication/windows_private.c
+++ b/ldap/servers/plugins/replication/windows_private.c
@@ -35,6 +35,7 @@ struct windowsprivate
PRBool dirsync_cookie_has_more;
PRBool create_users_from_dirsync;
PRBool create_groups_from_dirsync;
+ PRBool flatten_tree;
char *windows_domain;
int isnt4;
int iswin2k3;
@@ -277,6 +278,16 @@ windows_parse_config_entry(Repl_Agmt *ra, const char *type, Slapi_Entry *e)
slapi_ch_array_free(parray);
retval = 1;
}
+ if (type == NULL || slapi_attr_types_equivalent(type, type_winSyncFlattenTree)) {
+ tmpstr = (char *)slapi_entry_attr_get_ref(e, type_winSyncFlattenTree);
+ if (NULL != tmpstr && true_value_from_string(tmpstr)) {
+ windows_private_set_flatten_tree(ra, PR_TRUE);
+ } else {
+ windows_private_set_flatten_tree(ra, PR_FALSE);
+ }
+ retval = 1;
+ }
+
windows_private_set_windows_treetop(ra, NULL);
windows_private_set_directory_treetop(ra, NULL);
@@ -348,8 +359,6 @@ windows_private_new()
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_new\n");
-
dp = (Dirsync_Private *)slapi_ch_calloc(sizeof(Dirsync_Private), 1);
dp->dirsync_maxattributecount = -1;
@@ -363,7 +372,6 @@ windows_private_new()
dp->windows_treetop = NULL;
dp->directory_treetop = NULL;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_new\n");
return dp;
}
@@ -373,7 +381,6 @@ windows_agreement_delete(Repl_Agmt *ra)
const subtreePair *sp;
Dirsync_Private *dp = (Dirsync_Private *)agmt_get_priv(ra);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_delete\n");
PR_ASSERT(dp != NULL);
@@ -405,7 +412,6 @@ windows_agreement_delete(Repl_Agmt *ra)
slapi_ch_free((void **)&dp->subtree_pairs);
slapi_ch_free((void **)&dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_delete\n");
}
int
@@ -413,15 +419,11 @@ windows_private_get_isnt4(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_isnt4\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_isnt4\n");
-
return dp->isnt4;
}
@@ -430,16 +432,12 @@ windows_private_set_isnt4(const Repl_Agmt *ra, int isit)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_set_isnt4\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
dp->isnt4 = isit;
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_set_isnt4\n");
}
int
@@ -447,15 +445,11 @@ windows_private_get_iswin2k3(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_iswin2k3\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_iswin2k3\n");
-
return dp->iswin2k3;
}
@@ -464,16 +458,12 @@ windows_private_set_iswin2k3(const Repl_Agmt *ra, int isit)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_set_iswin2k3\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
dp->iswin2k3 = isit;
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_set_iswin2k3\n");
}
/* Returns a copy of the Slapi_Filter pointer. The caller should not free it */
@@ -482,8 +472,6 @@ windows_private_get_directory_filter(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_directory_filter\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
@@ -508,8 +496,6 @@ windows_private_get_directory_filter(const Repl_Agmt *ra)
slapi_ch_free_string(&string_filter);
}
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_directory_filter\n");
-
return dp->directory_filter;
}
@@ -519,8 +505,6 @@ windows_private_get_windows_filter(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_windows_filter\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
@@ -541,8 +525,6 @@ windows_private_get_windows_filter(const Repl_Agmt *ra)
}
}
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_windows_filter\n");
-
return dp->windows_filter;
}
@@ -552,8 +534,6 @@ windows_private_get_deleted_filter(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_deleted_filter\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
@@ -566,8 +546,6 @@ windows_private_get_deleted_filter(const Repl_Agmt *ra)
slapi_ch_free_string(&string_filter);
}
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_deleted_filter\n");
-
return dp->deleted_filter;
}
@@ -577,15 +555,11 @@ windows_private_get_windows_subtree(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_windows_subtree\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_windows_subtree\n");
-
return dp->windows_subtree;
}
@@ -594,15 +568,11 @@ windows_private_get_windows_domain(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_windows_domain\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_windows_domain\n");
-
return dp->windows_domain;
}
@@ -611,8 +581,6 @@ windows_private_set_windows_domain(const Repl_Agmt *ra, char *domain)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_set_windows_domain\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
@@ -620,8 +588,6 @@ windows_private_set_windows_domain(const Repl_Agmt *ra, char *domain)
slapi_ch_free_string(&dp->windows_domain);
dp->windows_domain = domain;
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_set_windows_domain\n");
}
/* Returns a copy of the Slapi_DN pointer, no need to free it */
@@ -630,15 +596,11 @@ windows_private_get_directory_subtree(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_directory_replarea\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_directory_replarea\n");
-
return dp->directory_subtree;
}
@@ -649,8 +611,6 @@ windows_private_set_windows_subtree(const Repl_Agmt *ra, Slapi_DN *sdn)
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_set_windows_replarea\n");
-
PR_ASSERT(ra);
PR_ASSERT(sdn);
@@ -659,8 +619,6 @@ windows_private_set_windows_subtree(const Repl_Agmt *ra, Slapi_DN *sdn)
slapi_sdn_free(&dp->windows_subtree);
dp->windows_subtree = sdn;
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_set_windows_replarea\n");
}
/* Takes a copy of the sdn passed in */
@@ -670,8 +628,6 @@ windows_private_set_directory_subtree(const Repl_Agmt *ra, Slapi_DN *sdn)
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_set_directory_replarea\n");
-
PR_ASSERT(ra);
PR_ASSERT(sdn);
@@ -680,8 +636,6 @@ windows_private_set_directory_subtree(const Repl_Agmt *ra, Slapi_DN *sdn)
slapi_sdn_free(&dp->directory_subtree);
dp->directory_subtree = sdn;
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_set_directory_replarea\n");
}
PRBool
@@ -689,14 +643,10 @@ windows_private_create_users(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_create_users\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_create_users\n");
-
return dp->create_users_from_dirsync;
}
@@ -706,15 +656,11 @@ windows_private_set_create_users(const Repl_Agmt *ra, PRBool value)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_set_create_users\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
dp->create_users_from_dirsync = value;
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_set_create_users\n");
}
PRBool
@@ -722,14 +668,10 @@ windows_private_create_groups(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_create_groups\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_create_groups\n");
-
return dp->create_groups_from_dirsync;
}
@@ -739,15 +681,11 @@ windows_private_set_create_groups(const Repl_Agmt *ra, PRBool value)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_set_create_groups\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
dp->create_groups_from_dirsync = value;
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_set_create_groups\n");
}
@@ -756,14 +694,10 @@ windows_private_get_one_way(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_one_way\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_one_way\n");
-
return dp->one_way;
}
@@ -773,15 +707,11 @@ windows_private_set_one_way(const Repl_Agmt *ra, int value)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_set_one_way\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
dp->one_way = value;
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_set_one_way\n");
}
const char *
@@ -789,14 +719,10 @@ windows_private_get_windows_userfilter(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_windows_userfilter\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_windows_userfilter\n");
-
return dp->windows_userfilter;
}
@@ -806,8 +732,6 @@ windows_private_set_windows_userfilter(const Repl_Agmt *ra, char *filter)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_set_windows_userfilter\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
@@ -815,8 +739,6 @@ windows_private_set_windows_userfilter(const Repl_Agmt *ra, char *filter)
slapi_ch_free_string(&dp->windows_userfilter);
dp->windows_userfilter = filter;
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_set_windows_userfilter\n");
}
const char *
@@ -824,14 +746,10 @@ windows_private_get_directory_userfilter(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_directory_userfilter\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_directory_userfilter\n");
-
return dp->directory_userfilter;
}
@@ -841,8 +759,6 @@ windows_private_set_directory_userfilter(const Repl_Agmt *ra, char *filter)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_set_directory_userfilter\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
@@ -850,8 +766,6 @@ windows_private_set_directory_userfilter(const Repl_Agmt *ra, char *filter)
slapi_ch_free_string(&dp->directory_userfilter);
dp->directory_userfilter = filter;
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_set_directory_userfilter\n");
}
const subtreePair *
@@ -859,14 +773,10 @@ windows_private_get_subtreepairs(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_subtreepairs\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_subtreepairs\n");
-
return dp->subtree_pairs;
}
@@ -876,8 +786,6 @@ windows_private_set_subtreepairs(const Repl_Agmt *ra, char **parray)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_set_subtreepairs\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
@@ -885,10 +793,33 @@ windows_private_set_subtreepairs(const Repl_Agmt *ra, char **parray)
free_subtree_pairs(&(dp->subtree_pairs));
dp->subtree_pairs = create_subtree_pairs(parray);
+}
+
+void
+windows_private_set_flatten_tree(const Repl_Agmt *ra, PRBool value)
+{
+ Dirsync_Private *dp;
+
+ PR_ASSERT(ra);
+ dp = (Dirsync_Private *)agmt_get_priv(ra);
+ PR_ASSERT(dp);
+
+ dp->flatten_tree = value;
+}
+
+PRBool
+windows_private_get_flatten_tree(const Repl_Agmt *ra)
+{
+ Dirsync_Private *dp;
+
+ PR_ASSERT(ra);
+ dp = (Dirsync_Private *)agmt_get_priv(ra);
+ PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_set_subtreepairs\n");
+ return dp->flatten_tree;
}
+
/*
* winSyncSubtreePair: <DS_SUBTREE>:<WINDOWS_SUBTREE>
* E.g.,
@@ -963,14 +894,10 @@ windows_private_get_windows_treetop(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_windows_treetop\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_windows_treetop\n");
-
return dp->windows_treetop;
}
@@ -980,8 +907,6 @@ windows_private_set_windows_treetop(const Repl_Agmt *ra, char *treetop)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_set_windows_treetop\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
@@ -1017,8 +942,6 @@ windows_private_set_windows_treetop(const Repl_Agmt *ra, char *treetop)
dp->windows_treetop = slapi_sdn_dup(windows_subtree);
}
}
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_set_windows_treetop\n");
}
const Slapi_DN *
@@ -1026,14 +949,10 @@ windows_private_get_directory_treetop(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_directory_treetop\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_directory_treetop\n");
-
return dp->directory_treetop;
}
@@ -1043,8 +962,6 @@ windows_private_set_directory_treetop(const Repl_Agmt *ra, char *treetop)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_set_directory_treetop\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
@@ -1080,8 +997,6 @@ windows_private_set_directory_treetop(const Repl_Agmt *ra, char *treetop)
dp->directory_treetop = slapi_sdn_dup(directory_subtree);
}
}
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_set_directory_treetop\n");
}
/*
@@ -1098,8 +1013,6 @@ windows_private_dirsync_control(const Repl_Agmt *ra)
Dirsync_Private *dp;
char iscritical = PR_TRUE;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_dirsync_control\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
@@ -1116,9 +1029,6 @@ windows_private_dirsync_control(const Repl_Agmt *ra)
ber_free(ber, 1);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_dirsync_control\n");
-
-
return control;
}
@@ -1143,8 +1053,6 @@ windows_private_update_dirsync_control(const Repl_Agmt *ra, LDAPControl **contro
int return_value = LDAP_SUCCESS;
#endif
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_update_dirsync_control\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
@@ -1208,9 +1116,6 @@ windows_private_update_dirsync_control(const Repl_Agmt *ra, LDAPControl **contro
#ifdef FOR_DEBUGGING
slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name,
"<= windows_private_update_dirsync_control - rc=%d\n", return_value);
-#else
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name,
- "<= windows_private_update_dirsync_control\n");
#endif
}
@@ -1219,15 +1124,11 @@ windows_private_dirsync_has_more(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_dirsync_has_more\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_dirsync_has_more\n");
-
return dp->dirsync_cookie_has_more;
}
@@ -1236,16 +1137,12 @@ windows_private_null_dirsync_cookie(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_null_dirsync_control\n");
-
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
dp->dirsync_cookie_len = 0;
slapi_ch_free_string(&dp->dirsync_cookie);
dp->dirsync_cookie = NULL;
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_null_dirsync_control\n");
}
static Slapi_Mods *
@@ -1272,8 +1169,6 @@ windows_private_save_dirsync_cookie(const Repl_Agmt *ra)
int rc = 0;
Slapi_Mods *mods = NULL;
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_save_dirsync_cookie\n");
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
@@ -1307,7 +1202,6 @@ windows_private_save_dirsync_cookie(const Repl_Agmt *ra)
slapi_mods_free(&mods);
slapi_sdn_free(&sdn);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_save_dirsync_cookie\n");
return rc;
}
@@ -1325,7 +1219,6 @@ windows_private_load_dirsync_cookie(const Repl_Agmt *ra)
Slapi_Entry *entry = NULL;
Slapi_Attr *attr = NULL;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_load_dirsync_cookie\n");
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
@@ -1368,8 +1261,6 @@ windows_private_load_dirsync_cookie(const Repl_Agmt *ra)
slapi_sdn_free(&sdn);
slapi_pblock_destroy(pb);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_load_dirsync_cookie\n");
-
return rc;
}
@@ -1379,13 +1270,9 @@ windows_private_get_raw_entry(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_raw_entry\n");
-
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_raw_entry\n");
-
return dp->raw_entry;
}
@@ -1395,8 +1282,6 @@ windows_private_set_raw_entry(const Repl_Agmt *ra, Slapi_Entry *e)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_set_raw_entry\n");
-
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
@@ -1408,8 +1293,6 @@ windows_private_set_raw_entry(const Repl_Agmt *ra, Slapi_Entry *e)
slapi_entry_free(dp->raw_entry);
dp->raw_entry = e;
}
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_set_raw_entry\n");
}
/* Setting keep to 1 will cause the current raw entry to remain, even if
@@ -1420,14 +1303,10 @@ windows_private_set_keep_raw_entry(const Repl_Agmt *ra, int keep)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_set_keep_raw_entry\n");
-
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
dp->keep_raw_entry = keep;
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_set_keep_raw_entry\n");
}
int
@@ -1435,13 +1314,9 @@ windows_private_get_keep_raw_entry(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_keep_raw_entry\n");
-
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_keep_raw_entry\n");
-
return dp->keep_raw_entry;
}
@@ -1450,13 +1325,9 @@ windows_private_get_api_cookie(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_api_cookie\n");
-
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_api_cookie\n");
-
return dp->api_cookie;
}
@@ -1465,13 +1336,9 @@ windows_private_set_api_cookie(Repl_Agmt *ra, void *api_cookie)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_set_api_cookie\n");
-
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
dp->api_cookie = api_cookie;
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_set_api_cookie\n");
}
time_t
@@ -1479,15 +1346,11 @@ windows_private_get_sync_interval(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_sync_interval\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_sync_interval\n");
-
return dp->sync_interval;
}
@@ -1497,8 +1360,6 @@ windows_private_set_sync_interval(Repl_Agmt *ra, char *str)
Dirsync_Private *dp;
time_t tmpval = 0;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_set_sync_interval\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
@@ -1507,8 +1368,6 @@ windows_private_set_sync_interval(Repl_Agmt *ra, char *str)
if (str && (tmpval = (time_t)atol(str))) {
dp->sync_interval = tmpval;
}
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_set_sync_interval\n");
}
int
@@ -1516,15 +1375,11 @@ windows_private_get_move_action(const Repl_Agmt *ra)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_get_move_action\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_get_move_action\n");
-
return dp->move_action;
}
@@ -1533,15 +1388,11 @@ windows_private_set_move_action(const Repl_Agmt *ra, int value)
{
Dirsync_Private *dp;
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_private_set_move_action\n");
-
PR_ASSERT(ra);
dp = (Dirsync_Private *)agmt_get_priv(ra);
PR_ASSERT(dp);
dp->move_action = value;
-
- slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= windows_private_set_move_action\n");
}
static PRCallOnceType winsync_callOnce = {0, 0, 0};
@@ -2694,30 +2545,18 @@ static void *test_winsync_api_v3[] = {
static int
test_winsync_plugin_start(Slapi_PBlock *pb __attribute__((unused)))
{
- slapi_log_err(SLAPI_LOG_TRACE, test_winsync_plugin_name,
- "--> test_winsync_plugin_start -- begin\n");
-
if (slapi_apib_register(WINSYNC_v3_0_GUID, test_winsync_api_v3)) {
slapi_log_err(SLAPI_LOG_ERR, test_winsync_plugin_name,
"test_winsync_plugin_start - Failed to register winsync api -- end\n");
return -1;
}
-
- slapi_log_err(SLAPI_LOG_TRACE, test_winsync_plugin_name,
- "<-- test_winsync_plugin_start -- end\n");
return 0;
}
static int
test_winsync_plugin_close(Slapi_PBlock *pb __attribute__((unused)))
{
- slapi_log_err(SLAPI_LOG_TRACE, test_winsync_plugin_name,
- "--> test_winsync_plugin_close -- begin\n");
-
slapi_apib_unregister(WINSYNC_v3_0_GUID);
-
- slapi_log_err(SLAPI_LOG_TRACE, test_winsync_plugin_name,
- "<-- test_winsync_plugin_close -- end\n");
return 0;
}
@@ -2727,9 +2566,6 @@ test_winsync_plugin_close(Slapi_PBlock *pb __attribute__((unused)))
int
test_winsync_plugin_init(Slapi_PBlock *pb)
{
- slapi_log_err(SLAPI_LOG_TRACE, test_winsync_plugin_name,
- "--> test_winsync_plugin_init -- begin\n");
-
if (slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION,
SLAPI_PLUGIN_VERSION_01) != 0 ||
slapi_pblock_set(pb, SLAPI_PLUGIN_START_FN,
@@ -2751,8 +2587,6 @@ test_winsync_plugin_init(Slapi_PBlock *pb)
return -1;
}
- slapi_log_err(SLAPI_LOG_TRACE, test_winsync_plugin_name,
- "<-- test_winsync_plugin_init -- end\n");
return 0;
}
diff --git a/ldap/servers/plugins/replication/windows_protocol_util.c b/ldap/servers/plugins/replication/windows_protocol_util.c
index 215ca3cdb..844e1c85f 100644
--- a/ldap/servers/plugins/replication/windows_protocol_util.c
+++ b/ldap/servers/plugins/replication/windows_protocol_util.c
@@ -4038,6 +4038,7 @@ map_entry_dn_inbound_ext(Slapi_Entry *e, Slapi_DN **dn, const Repl_Agmt *ra, int
const subtreePair *subtree_pairs = windows_private_get_subtreepairs(ra);
const subtreePair *sp = NULL;
const Slapi_DN *remote_sdn = slapi_entry_get_sdn_const(e);
+ const PRBool flatten_tree = windows_private_get_flatten_tree(ra);
if (subtree_pairs) {
for (sp = subtree_pairs; sp && sp->ADsubtree; sp++) {
@@ -4056,15 +4057,21 @@ map_entry_dn_inbound_ext(Slapi_Entry *e, Slapi_DN **dn, const Repl_Agmt *ra, int
goto error;
}
- if (sp) {
- container_str = extract_container(slapi_entry_get_sdn_const(e), sp->ADsubtree);
- } else {
- container_str = extract_container(slapi_entry_get_sdn_const(e),
- windows_private_get_windows_subtree(ra));
+ if (!flatten_tree) {
+ if (sp) {
+ container_str = extract_container(slapi_entry_get_sdn_const(e), sp->ADsubtree);
+ } else {
+ container_str = extract_container(slapi_entry_get_sdn_const(e),
+ windows_private_get_windows_subtree(ra));
+ }
}
/* Local DNs for users and groups are different */
if (is_user) {
- new_dn_string = slapi_create_dn_string("uid=\"%s\",%s%s", username, container_str, suffix);
+ if (flatten_tree) {
+ new_dn_string = slapi_create_dn_string("uid=\"%s\",%s", username, suffix);
+ } else {
+ new_dn_string = slapi_create_dn_string("uid=\"%s\",%s%s", username, container_str, suffix);
+ }
winsync_plugin_call_get_new_ds_user_dn_cb(ra,
windows_private_get_raw_entry(ra),
e,
@@ -4072,7 +4079,11 @@ map_entry_dn_inbound_ext(Slapi_Entry *e, Slapi_DN **dn, const Repl_Agmt *ra, int
sp ? sp->DSsubtree : windows_private_get_directory_subtree(ra),
sp ? sp->ADsubtree : windows_private_get_windows_subtree(ra));
} else {
- new_dn_string = slapi_create_dn_string("cn=\"%s\",%s%s", username, container_str, suffix);
+ if (flatten_tree) {
+ new_dn_string = slapi_create_dn_string("cn=\"%s\",%s", username, suffix);
+ } else {
+ new_dn_string = slapi_create_dn_string("cn=\"%s\",%s%s", username, container_str, suffix);
+ }
if (is_group) {
winsync_plugin_call_get_new_ds_group_dn_cb(ra,
windows_private_get_raw_entry(ra),
@@ -4087,6 +4098,10 @@ map_entry_dn_inbound_ext(Slapi_Entry *e, Slapi_DN **dn, const Repl_Agmt *ra, int
* which is normalized. Thus, we can use _normdn_.
*/
new_dn = slapi_sdn_new_normdn_passin(new_dn_string);
+ slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name,
+ "map_entry_dn_inbound - %s - mapped entry to dn [%s]\n",
+ agmt_get_long_name(ra),
+ slapi_sdn_get_dn(new_dn));
} else {
/* Error, no username */
retval = ENTRY_NOTFOUND;
diff --git a/ldap/servers/plugins/replication/windowsrepl.h b/ldap/servers/plugins/replication/windowsrepl.h
index 251f008b8..c70d85787 100644
--- a/ldap/servers/plugins/replication/windowsrepl.h
+++ b/ldap/servers/plugins/replication/windowsrepl.h
@@ -48,6 +48,8 @@ Slapi_Filter *windows_private_get_directory_filter(const Repl_Agmt *ra);
Slapi_Filter *windows_private_get_windows_filter(const Repl_Agmt *ra);
Slapi_Filter *windows_private_get_deleted_filter(const Repl_Agmt *ra);
const char *windows_private_get_purl(const Repl_Agmt *ra);
+void windows_private_set_flatten_tree(const Repl_Agmt *ra, PRBool value);
+PRBool windows_private_get_flatten_tree(const Repl_Agmt *ra);
/*
* The raw entry is the last raw entry read from AD - raw as opposed
* "cooked" - that is, having had schema processing done
diff --git a/src/lib389/cli/dscontainer b/src/lib389/cli/dscontainer
index e03a1e050..ca5110be1 100755
--- a/src/lib389/cli/dscontainer
+++ b/src/lib389/cli/dscontainer
@@ -294,6 +294,19 @@ binddn = cn=Directory Manager
# If we have been requested to re-index, do so now ...
_begin_check_reindex()
+ loglevel = os.getenv("ERRORLOG_LEVEL")
+ if loglevel is not None:
+ try:
+ n_loglevel = str(int(loglevel) | 266354688)
+ log.info(f"Set log level to {loglevel} | DEFAULT")
+ loglevel = n_loglevel
+ except:
+ log.error("Invalid ERRORLOG_LEVEL value, setting default ...")
+ loglevel = "266354688"
+ else:
+ # See /ldap/servers/slapd/slap.h SLAPD_DEFAULT_ERRORLOG_LEVEL
+ loglevel = "266354688"
+
# Yep! Run it ...
# Now unlike a normal lib389 start, we use subprocess and don't fork!
# TODO: Should we pass in a loglevel from env?
@@ -308,8 +321,7 @@ binddn = cn=Directory Manager
# This container version doesn't actually use or need the pidfile to track
# the process.
# "-i", "/data/run/slapd-localhost.pid",
- # See /ldap/servers/slapd/slap.h SLAPD_DEFAULT_ERRORLOG_LEVEL
- "-d", "266354688",
+ "-d", loglevel,
], stdout=None, stderr=None, env=os.environ.copy())
# Setup the process and shutdown handler in an init-esque fashion.
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
index 4416084fe..2d405dfd5 100644
--- a/src/lib389/lib389/cli_conf/replication.py
+++ b/src/lib389/lib389/cli_conf/replication.py
@@ -950,6 +950,8 @@ def add_winsync_agmt(inst, basedn, log, args):
properties['nsds5replicaupdateschedule'] = args.schedule
if frac_list is not None:
properties['nsds5replicatedattributelist'] = frac_list
+ if args.flatten_tree is True:
+ properties['winsyncflattentree'] = "on"
# Create the agmt
try:
@@ -1579,6 +1581,7 @@ def create_parser(subparsers):
winsync_agmt_add_parser.add_argument('--busy-wait-time', help="Sets the amount of time in seconds a supplier should wait after "
"a consumer sends back a busy response before making another attempt to acquire access")
winsync_agmt_add_parser.add_argument('--session-pause-time', help="Sets the amount of time in seconds a supplier should wait between update sessions")
+ winsync_agmt_add_parser.add_argument('--flatten-tree', action='store_true', default=False, help="By default, the tree structure of AD is preserved into 389. This MAY cause replication to fail in some cases, as you may need to create missing OU's to recreate the same treestructure. This setting when enabled, removes the tree structure of AD and flattens all entries into the ds-subtree. This does NOT affect or change the tree structure of the AD directory.")
winsync_agmt_add_parser.add_argument('--init', action='store_true', default=False, help="Initializes the agreement after creating it")
# Set - Note can not use add's parent args because for "set" there are no "required=True" args
| 0 |
79214f5bc9cf54ccc5df4a8fe7c0fcaf0fc42a3e
|
389ds/389-ds-base
|
Issue 5666 - CLI - Add timeout parameter for tasks
Description: Add a timeout argument for all dsconf tasks
relates: https://github.com/389ds/389-ds-base/issues/5666
Reviewed by: spichugi & jchapman(Thanks!!)
|
commit 79214f5bc9cf54ccc5df4a8fe7c0fcaf0fc42a3e
Author: Mark Reynolds <[email protected]>
Date: Wed Feb 15 17:08:30 2023 -0500
Issue 5666 - CLI - Add timeout parameter for tasks
Description: Add a timeout argument for all dsconf tasks
relates: https://github.com/389ds/389-ds-base/issues/5666
Reviewed by: spichugi & jchapman(Thanks!!)
diff --git a/dirsrvtests/tests/suites/clu/dsconf_tasks_test.py b/dirsrvtests/tests/suites/clu/dsconf_tasks_test.py
new file mode 100644
index 000000000..ac1109bc9
--- /dev/null
+++ b/dirsrvtests/tests/suites/clu/dsconf_tasks_test.py
@@ -0,0 +1,219 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2023 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import logging
+import pytest
+import os
+from lib389._constants import DEFAULT_SUFFIX
+# from lib389.topologies import topology_m1 as topo
+from lib389.topologies import topology_st as topo
+from lib389.tasks import (ImportTask, ExportTask, BackupTask, RestoreTask, AutomemberRebuildMembershipTask,
+ AutomemberAbortRebuildTask, MemberUidFixupTask, MemberOfFixupTask, USNTombstoneCleanupTask,
+ DBCompactTask, EntryUUIDFixupTask, SchemaReloadTask, SyntaxValidateTask,
+ FixupLinkedAttributesTask, DBCompactTask)
+from lib389.plugins import USNPlugin, POSIXWinsyncPlugin, LinkedAttributesPlugin, AutoMembershipPlugin, MemberOfPlugin
+from lib389.dbgen import dbgen_users
+from lib389.idm.user import UserAccount
+from lib389.idm.group import Groups
+from lib389.idm.posixgroup import PosixGroups # not sure if this is need yet MARK
+
+log = logging.getLogger(__name__)
+
+
+def test_task_timeout(topo):
+ """All thath te timeoutsetting works for all "tasks"
+
+ :id: 6a6f5176-76bf-424d-bc10-d33bdfa529eb
+ :setup: Standalone Instance
+ :steps:
+ 1. Test timeout for import task
+ 2. Test timeout for export task
+ 3. Test timeout for schema validate task
+ 4. Test timeout for schema reload task
+ 5. Test timeout for automember rebuild
+ 6. Test timeout for automember abort
+ 7. Test timeout for usn cleanup task
+ 8. Test timeout for posix group fixup task
+ 9. Test timeout for member UID fixup task
+ 10. Test timeout for memberof fixup task
+ 11. Test timeout for entryuuid fixup task
+ 12. Test timeout for linked attrs fixup task
+ 13. test timeout for db compact task
+ :expectedresults:
+ 1. Task timed out
+ 2. Task timed out
+ 3. Task timed out
+ 4. Task timed out
+ 5. Task timed out
+ 6. Task timed out
+ 7. Task timed out
+ 8. Task timed out
+ 9. Task timed out
+ 10. Task timed out
+ 11. Task timed out
+ 12. Task timed out
+ 13. Task timed out
+ """
+
+ #inst = topo.ms['supplier1'] --> this leads to a deadlock when testing MemberOfFixupTask
+ inst = topo.standalone
+
+ # Enable plugins
+ plugins = [USNPlugin, POSIXWinsyncPlugin, LinkedAttributesPlugin, AutoMembershipPlugin, MemberOfPlugin]
+ for plugin in plugins:
+ plugin(inst).enable()
+ inst.restart()
+
+ # Test timeout for import task, first create LDIF
+ import_ldif = inst.ldifdir + '/import_task_timeout.ldif'
+ dbgen_users(inst, 100000, import_ldif, DEFAULT_SUFFIX, parent="ou=people," + DEFAULT_SUFFIX, generic=True)
+
+ task = ImportTask(inst)
+ task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX)
+ task.wait(timeout=.5, sleep_interval=.5)
+ assert task.get_exit_code() is None
+ task.wait(timeout=0)
+
+ # Test timeout for export task
+ export_ldif = inst.ldifdir + '/export_task_timeout.ldif'
+ task = ExportTask(inst)
+ task.export_suffix_to_ldif(export_ldif, DEFAULT_SUFFIX)
+ task.wait(timeout=.5, sleep_interval=.5)
+ assert task.get_exit_code() is None
+ task.wait(timeout=0)
+
+ # Test timeout for schema validate task
+ task = SyntaxValidateTask(inst).create(properties={
+ 'basedn': DEFAULT_SUFFIX,
+ 'filter': "objectClass=*"
+ })
+ task.wait(timeout=.5, sleep_interval=.5)
+ assert task.get_exit_code() is None
+ task.wait(timeout=0)
+
+ # Test timeout for schema reload task (runs too fast)
+ """
+ task = SchemaReloadTask(inst).create(properties={
+ 'schemadir': inst.schemadir,
+ })
+ task.wait(timeout=.5, sleep_interval=.5)
+ assert task.get_exit_code() is None
+ task.wait(timeout=0)
+ """
+
+ # Test timeout for automember rebuild
+ task = AutomemberRebuildMembershipTask(inst).create(properties={
+ 'basedn': DEFAULT_SUFFIX,
+ 'filter': "objectClass=*"
+ })
+ task.wait(timeout=.5, sleep_interval=.5)
+ assert task.get_exit_code() is None
+ task.wait(timeout=0)
+
+ # Test timeout for automember abort (runs too fast)
+ """
+ AutomemberRebuildMembershipTask(inst).create(properties={
+ 'basedn': DEFAULT_SUFFIX,
+ 'filter': "objectClass=*"
+ })
+ task = AutomemberAbortRebuildTask(inst).create()
+ task.wait(timeout=.5, sleep_interval=.5)
+ assert task.get_exit_code() is None
+ task.wait(timeout=0)
+ """
+
+ # Test timeout for usn cleanup task, first delete a bunch of users
+ for idx in range(1, 1001):
+ entry_idx = str(idx).zfill(6)
+ dn = f"uid=user{entry_idx},ou=people,{DEFAULT_SUFFIX}"
+ UserAccount(inst, dn=dn).delete()
+ task = USNTombstoneCleanupTask(inst).create(properties={
+ 'suffix': DEFAULT_SUFFIX,
+ })
+ task.wait(timeout=.5, sleep_interval=.5)
+ assert task.get_exit_code() is None
+ task.wait(timeout=0)
+
+ # Test timeout for Posix Group fixup task (runs too fast)
+ """
+ groups = PosixGroups(inst, DEFAULT_SUFFIX)
+ start_range = 10000
+ for idx in range(1, 10):
+ group_props = {
+ 'cn': 'test_posix_group_' + str(idx),
+ 'objectclass': ['posixGroup', 'groupofuniquenames'],
+ 'gidNumber': str(idx)
+ }
+ group = groups.create(properties=group_props)
+ for user_idx in range(start_range, start_range + 1000):
+ entry_idx = str(user_idx).zfill(6)
+ dn = f"uid=user{entry_idx},ou=people,{DEFAULT_SUFFIX}"
+ group.add('memberuid', dn)
+ group.add('uniquemember', dn)
+ start_range += 1000
+
+ task = MemberUidFixupTask(inst).create(properties={
+ 'basedn': DEFAULT_SUFFIX,
+ 'filter': "objectClass=*"
+ })
+ task.wait(timeout=.5, sleep_interval=.5)
+ assert task.get_exit_code() is None
+ task.wait(timeout=0)
+ """
+
+ # Test timeout for memberOf fixup task
+ groups = Groups(inst, DEFAULT_SUFFIX)
+ group_props = {'cn': 'test_group'}
+ group = groups.create(properties=group_props)
+ for idx in range(5000, 6000):
+ entry_idx = str(idx).zfill(6)
+ dn = f"uid=user{entry_idx},ou=people,{DEFAULT_SUFFIX}"
+ group.add_member(dn)
+
+ task = MemberOfFixupTask(inst).create(properties={
+ 'basedn': DEFAULT_SUFFIX,
+ 'filter': "objectClass=*"
+ })
+ task.wait(timeout=.5, sleep_interval=.5)
+ assert task.get_exit_code() is None
+ task.wait(timeout=0)
+
+ # Test timeout for entryuuid fixup task
+ task = EntryUUIDFixupTask(inst).create(properties={
+ 'basedn': DEFAULT_SUFFIX,
+ 'filter': "objectClass=*"
+ })
+ task.wait(timeout=.5, sleep_interval=.5)
+ assert task.get_exit_code() is None
+ task.wait(timeout=0)
+
+ # test timeout for linked attrs fixup (runs too fast)
+ """
+ task = FixupLinkedAttributesTask(inst).create(properties={
+ 'basedn': DEFAULT_SUFFIX,
+ 'filter': "objectClass=*"
+ })
+ task.wait(timeout=.5, sleep_interval=.5)
+ assert task.get_exit_code() is None
+ task.wait(timeout=0)
+ """
+
+ # Test time out for db compact task (runs too fast)
+ """
+ task = DBCompactTask(inst).create()
+ task.wait(timeout=.5, sleep_interval=.5)
+ assert task.get_exit_code() is None
+ task.wait(timeout=0)
+ """
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main(["-s", CURRENT_FILE])
diff --git a/dirsrvtests/tests/suites/clu/fixup_test.py b/dirsrvtests/tests/suites/clu/fixup_test.py
index cc6cda2f1..c37824a26 100644
--- a/dirsrvtests/tests/suites/clu/fixup_test.py
+++ b/dirsrvtests/tests/suites/clu/fixup_test.py
@@ -83,6 +83,7 @@ def test_posix_winsync_fixup(topology_st, set_log_file_and_ldif):
args = FakeArgs()
args.DN = DEFAULT_SUFFIX
args.filter = None
+ args.timeout = 0
log.info('Run Fixup task')
do_fixup(standalone, DEFAULT_SUFFIX, log, args)
diff --git a/dirsrvtests/tests/suites/import/import_warning_test.py b/dirsrvtests/tests/suites/import/import_warning_test.py
index ee1deb54a..255a67cbf 100644
--- a/dirsrvtests/tests/suites/import/import_warning_test.py
+++ b/dirsrvtests/tests/suites/import/import_warning_test.py
@@ -106,6 +106,7 @@ def test_import_warning(topology_st):
args.only_core = False
args.include_suffixes = 'dc=example,dc=com'
args.exclude_suffixes = None
+ args.timeout = 0
log.info('Import the LDIF file')
backend_import(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
diff --git a/ldap/servers/plugins/syntaxes/validate_task.c b/ldap/servers/plugins/syntaxes/validate_task.c
index d069a3351..a634fd762 100644
--- a/ldap/servers/plugins/syntaxes/validate_task.c
+++ b/ldap/servers/plugins/syntaxes/validate_task.c
@@ -184,7 +184,7 @@ syntax_validate_task_thread(void *arg)
slapi_task_begin(task, 1);
slapi_task_log_notice(task, "Syntax validation task starting (arg: %s) ...\n",
td->filter_str);
- slapi_log_err(SLAPI_LOG_ERR, SYNTAX_PLUGIN_SUBSYSTEM,
+ slapi_log_err(SLAPI_LOG_INFO, SYNTAX_PLUGIN_SUBSYSTEM,
"syntax_validate_task_thread - Starting (base: \"%s\", filter: \"%s\") ...\n",
td->dn, td->filter_str);
@@ -206,7 +206,7 @@ syntax_validate_task_thread(void *arg)
slapi_task_log_status(task, "Syntax validate task complete. Found %" PRIu64
" invalid entries.\n",
slapi_counter_get_value(td->invalid_entries));
- slapi_log_err(SLAPI_LOG_ERR, SYNTAX_PLUGIN_SUBSYSTEM, "syntax_validate_task_thread - Complete."
+ slapi_log_err(SLAPI_LOG_INFO, SYNTAX_PLUGIN_SUBSYSTEM, "syntax_validate_task_thread - Complete."
" Found %" PRIu64 " invalid entries.\n",
slapi_counter_get_value(td->invalid_entries));
slapi_task_inc_progress(task);
diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py
index 07e3df246..7a74ef7a6 100644
--- a/src/lib389/lib389/cli_conf/backend.py
+++ b/src/lib389/lib389/cli_conf/backend.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2022 Red Hat, Inc.
+# Copyright (C) 2023 Red Hat, Inc.
# Copyright (C) 2019 William Brown <[email protected]>
# All rights reserved.
#
@@ -259,7 +259,7 @@ def backend_import(inst, basedn, log, args):
task = mc.import_ldif(ldifs=args.ldifs, chunk_size=args.chunks_size, encrypted=args.encrypted,
gen_uniq_id=args.gen_uniq_id, only_core=args.only_core, include_suffixes=args.include_suffixes,
exclude_suffixes=args.exclude_suffixes)
- task.wait(timeout=None)
+ task.wait(timeout=args.timeout)
result = task.get_exit_code()
warning = task.get_task_warn()
@@ -269,7 +269,10 @@ def backend_import(inst, basedn, log, args):
else:
log.info("The import task has finished successfully, with warning code {}, check the logs for more detail".format(warning))
else:
- raise ValueError("Import task failed\n-------------------------\n{}".format(ensure_str(task.get_task_log())))
+ if result is None:
+ raise ValueError(f"Import task has not completed\n-------------------------\n{ensure_str(task.get_task_log())}")
+ else:
+ raise ValueError(f"Import task failed\n-------------------------\n{ensure_str(task.get_task_log())}")
def backend_export(inst, basedn, log, args):
@@ -291,13 +294,16 @@ def backend_export(inst, basedn, log, args):
encrypted=args.encrypted, min_base64=args.min_base64, no_dump_uniq_id=args.no_dump_uniq_id,
replication=args.replication, not_folded=args.not_folded, no_seq_num=args.no_seq_num,
include_suffixes=args.include_suffixes, exclude_suffixes=args.exclude_suffixes)
- task.wait(timeout=None)
+ task.wait(timeout=args.timeout)
result = task.get_exit_code()
if task.is_complete() and result == 0:
log.info("The export task has finished successfully")
else:
- raise ValueError("Export task failed\n-------------------------\n{}".format(ensure_str(task.get_task_log())))
+ if result is None:
+ raise ValueError(f"Export task did not complete\n-------------------------\n{ensure_str(task.get_task_log())}")
+ else:
+ raise ValueError(f"Export task failed\n-------------------------\n{ensure_str(task.get_task_log())}")
def is_db_link(inst, rdn):
@@ -818,9 +824,14 @@ def backend_compact(inst, basedn, log, args):
if args.only_changelog:
task_properties = {'justChangelog': 'yes'}
task.create(properties=task_properties)
- task.wait()
- if task.get_exit_code() != 0:
- raise ValueError("Failed to create Database Compaction Task")
+ task.wait(timeout=args.timeout)
+ result = task.get_exit_code()
+ if result != 0:
+ if result is None:
+ raise ValueError("Database Compaction Task has not completed")
+ else:
+ raise ValueError(f"Database Compaction Task failed, error: {result}")
+
log.info("Successfully started Database Compaction Task")
@@ -1106,6 +1117,8 @@ def create_parser(subparsers):
help="Specifies the suffixes or the subtrees to be included")
import_parser.add_argument('-x', '--exclude-suffixes', nargs='+',
help="Specifies the suffixes to be excluded")
+ import_parser.add_argument('--timeout', type=int, default=0,
+ help="Set a timeout to wait for the export task. Default is 0 (no timeout)")
#######################################################
# Export LDIF
@@ -1135,6 +1148,8 @@ def create_parser(subparsers):
help="Specifies the suffixes or the subtrees to be included")
export_parser.add_argument('-x', '--exclude-suffixes', nargs='+',
help="Specifies the suffixes to be excluded")
+ export_parser.add_argument('--timeout', default=0, type=int,
+ help="Set a timeout to wait for the export task. Default is 0 (no timeout)")
#######################################################
# Create a new backend database
@@ -1171,3 +1186,5 @@ def create_parser(subparsers):
compact_parser = subcommands.add_parser('compact-db', help='Compact the database and the replication changelog')
compact_parser.set_defaults(func=backend_compact)
compact_parser.add_argument('--only-changelog', action='store_true', help='Compacts only the replication change log')
+ compact_parser.add_argument('--timeout', default=0, type=int,
+ help="Set a timeout to wait for the compaction task. Default is 0 (no timeout)")
diff --git a/src/lib389/lib389/cli_conf/backup.py b/src/lib389/lib389/cli_conf/backup.py
index 7c8df69c7..32f5db2f1 100644
--- a/src/lib389/lib389/cli_conf/backup.py
+++ b/src/lib389/lib389/cli_conf/backup.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2018 Red Hat, Inc.
+# Copyright (C) 2023 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -11,27 +11,33 @@ def backup_create(inst, basedn, log, args):
log = log.getChild('backup_create')
task = inst.backup_online(archive=args.archive, db_type=args.db_type)
- task.wait()
+ task.wait(timeout=args.timeout)
result = task.get_exit_code()
if task.is_complete() and result == 0:
log.info("The backup create task has finished successfully")
else:
- raise ValueError("The backup create task has failed with the error code: ({})".format(result))
+ if result is None:
+ raise ValueError("The backup create task has not completed. Please check server's error log for more information")
+ else:
+ raise ValueError(f"The backup create task has failed with the error code: ({result})")
def backup_restore(inst, basedn, log, args):
log = log.getChild('backup_restore')
task = inst.restore_online(archive=args.archive, db_type=args.db_type)
- task.wait()
+ task.wait(timeout=args.timeout)
result = task.get_exit_code()
task_log = task.get_task_log()
if task.is_complete() and result == 0:
log.info("The backup restore task has finished successfully")
else:
- raise ValueError("The backup restore task has failed with the error code: {}\n{}".format(result, task_log))
+ if result is None:
+ raise ValueError(f"The backup restore task has not completed. Please check server's error log for more information\n{task_log}")
+ else:
+ raise ValueError(f"The backup restore task has failed with the error code: {result}\n{task_log}")
def create_parser(subparsers):
@@ -39,17 +45,21 @@ def create_parser(subparsers):
subcommands = backup_parser.add_subparsers(help="action")
- create_parser = subcommands.add_parser('create', help="Creates a backup of the database")
- create_parser.set_defaults(func=backup_create)
- create_parser.add_argument('archive', nargs='?', default=None,
- help="Sets the directory where to store the backup files. "
- "Format: instance_name-year_month_date_hour_minutes_seconds. "
- "Default: /var/lib/dirsrv/slapd-instance/bak/ ")
- create_parser.add_argument('-t', '--db-type', default="ldbm database",
- help="Sets the database type. Default: ldbm database")
+ create_backup_parser = subcommands.add_parser('create', help="Creates a backup of the database")
+ create_backup_parser.set_defaults(func=backup_create)
+ create_backup_parser.add_argument('archive', nargs='?', default=None,
+ help="Sets the directory where to store the backup files. "
+ "Format: instance_name-year_month_date_hour_minutes_seconds. "
+ "Default: /var/lib/dirsrv/slapd-instance/bak/ ")
+ create_backup_parser.add_argument('-t', '--db-type', default="ldbm database",
+ help="Sets the database type. Default: ldbm database")
+ create_backup_parser.add_argument('--timeout', type=int, default=120,
+ help="Sets the task timeout. Default is 120 seconds,")
restore_parser = subcommands.add_parser('restore', help="Restores a database from a backup")
restore_parser.set_defaults(func=backup_restore)
restore_parser.add_argument('archive', help="Set the directory that contains the backup files")
restore_parser.add_argument('-t', '--db-type', default="ldbm database",
help="Sets the database type. Default: ldbm database")
+ restore_parser.add_argument('--timeout', type=int, default=120,
+ help="Sets the task timeout. Default is 120 seconds.")
diff --git a/src/lib389/lib389/cli_conf/plugins/automember.py b/src/lib389/lib389/cli_conf/plugins/automember.py
index 97d6bd659..8851aab32 100644
--- a/src/lib389/lib389/cli_conf/plugins/automember.py
+++ b/src/lib389/lib389/cli_conf/plugins/automember.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2022 Red Hat, Inc.
+# Copyright (C) 2023 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -158,12 +158,15 @@ def fixup(inst, basedn, log, args):
fixup_task = plugin.fixup(args.DN, args.filter, args.cleanup)
if args.wait:
log.info(f'Waiting for fixup task "{fixup_task.dn}" to complete. You can safely exit by pressing Control C ...')
- fixup_task.wait(timeout=None)
+ fixup_task.wait(timeout=args.timeout)
exitcode = fixup_task.get_exit_code()
if exitcode != 0:
- log.error(f'Rebuild membership task "{fixup_task.dn}" for {args.DN} has failed (error {exitcode}). Please, check logs')
+ if excit_Code is None:
+ raise ValueError(f'Rebuild membership task "{fixup_task.dn}" for {args.DN} has not completed. Please, check logs')
+ else:
+ raise ValueError(f'Rebuild membership task "{fixup_task.dn}" for {args.DN} has failed (error {exitcode}). Please, check logs')
else:
- log.info('Fixup task successfully completed')
+ raise ValueError('Fixup task successfully completed')
else:
log.info(f'Successfully added task entry "{fixup_task.dn}". This task is running in the background. To track its progress you can use the "fixup-status" command.')
@@ -179,10 +182,13 @@ def abort(inst, basedn, log, args):
if not plugin.status():
log.error("'%s' is disabled. Abort rebuild membership task can't be executed" % plugin.rdn)
fixup_task = plugin.abort_fixup(args.DN, args.filter)
- fixup_task.wait()
+ fixup_task.wait(timeout=args.timeout)
exitcode = fixup_task.get_exit_code()
if exitcode != 0:
- log.error('Abort rebuild membership task for %s has failed. Please, check logs')
+ if exitcode is None:
+ raise ValueError('Abort rebuild membership task has not completed. Please, check logs')
+ else:
+ raise ValueError('Abort rebuild membership task has failed. Please, check logs')
else:
log.info('Successfully added abort task entry')
@@ -267,6 +273,8 @@ def create_parser(subparsers):
help="Clean up previous group memberships before rebuilding")
fixup_task.add_argument('--wait', action='store_true',
help="Wait for the task to finish, this could take a long time")
+ fixup_task.add_argument('--timeout', default=0, type=int,
+ help="Set a timeout to wait for the fixup task. Default is 0 (no timeout)")
fixup_status = subcommands.add_parser('fixup-status', help='Check the status of a fix-up task')
fixup_status.set_defaults(func=do_fixup_status)
@@ -277,3 +285,5 @@ def create_parser(subparsers):
abort_fixup = subcommands.add_parser('abort-fixup', help='Abort the rebuild membership task.')
abort_fixup.set_defaults(func=abort)
+ abort_fixup.add_argument('--timeout', default=0, type=int,
+ help="Set a timeout to wait for the abort task. Default is 0 (no timeout)")
\ No newline at end of file
diff --git a/src/lib389/lib389/cli_conf/plugins/entryuuid.py b/src/lib389/lib389/cli_conf/plugins/entryuuid.py
index 3d2807005..af5ffd4e3 100644
--- a/src/lib389/lib389/cli_conf/plugins/entryuuid.py
+++ b/src/lib389/lib389/cli_conf/plugins/entryuuid.py
@@ -1,6 +1,6 @@
# --- BEGIN COPYRIGHT BLOCK ---
# Copyright (C) 2021 William Brown <[email protected]>
-# Copyright (C) 2022 Red Hat, Inc.
+# Copyright (C) 2023 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -12,6 +12,7 @@ from lib389.plugins import EntryUUIDPlugin, EntryUUIDFixupTasks
from lib389.cli_conf import add_generic_plugin_parsers
from lib389.utils import get_task_status
+
def do_fixup(inst, basedn, log, args):
plugin = EntryUUIDPlugin(inst)
log.info('Attempting to add task entry...')
@@ -21,10 +22,13 @@ def do_fixup(inst, basedn, log, args):
fixup_task = plugin.fixup(args.DN, args.filter)
if args.wait:
log.info(f'Waiting for fixup task "{fixup_task.dn}" to complete. You can safely exit by pressing Control C ...')
- fixup_task.wait(timeout=None)
+ fixup_task.wait(timeout=args.timeout)
exitcode = fixup_task.get_exit_code()
if exitcode != 0:
- log.error(f'EntryUUID fixup task "{fixup_task.dn}" for {args.DN} has failed (error {exitcode}). Please, check logs')
+ if exitcode is None:
+ raise ValueError(f'EntryUUID fixup task "{fixup_task.dn}" for {args.DN} has not completed. Please, check logs')
+ else:
+ raise ValueError(f'EntryUUID fixup task "{fixup_task.dn}" for {args.DN} has failed (error {exitcode}). Please, check logs')
else:
log.info('Fixup task successfully completed')
else:
@@ -35,6 +39,7 @@ def do_fixup_status(inst, basedn, log, args):
get_task_status(inst, log, EntryUUIDFixupTasks, dn=args.dn, show_log=args.show_log,
watch=args.watch, use_json=args.json)
+
def create_parser(subparsers):
referint = subparsers.add_parser('entryuuid', help='Manage and configure EntryUUID plugin')
subcommands = referint.add_subparsers(help='action')
@@ -49,6 +54,8 @@ def create_parser(subparsers):
'will have their EntryUUID attribute regenerated if not present.')
fixup.add_argument('--wait', action='store_true',
help="Wait for the task to finish, this could take a long time")
+ fixup.add_argument('--timeout', type=int, default=0,
+ help="Sets the task timeout. Default is 0 (no timeout)")
fixup_status = subcommands.add_parser('fixup-status', help='Check the status of a fix-up task')
fixup_status.set_defaults(func=do_fixup_status)
diff --git a/src/lib389/lib389/cli_conf/plugins/memberof.py b/src/lib389/lib389/cli_conf/plugins/memberof.py
index 1a7613817..2d0f80c57 100644
--- a/src/lib389/lib389/cli_conf/plugins/memberof.py
+++ b/src/lib389/lib389/cli_conf/plugins/memberof.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2022 Red Hat, Inc.
+# Copyright (C) 2023 Red Hat, Inc.
# Copyright (C) 2019 William Brown <[email protected]>
# All rights reserved.
#
@@ -80,10 +80,13 @@ def do_fixup(inst, basedn, log, args):
fixup_task = plugin.fixup(args.DN, args.filter)
if args.wait:
log.info(f'Waiting for fixup task "{fixup_task.dn}" to complete. You can safely exit by pressing Control C ...')
- fixup_task.wait(timeout=None)
+ fixup_task.wait(timeout=args.timeout)
exitcode = fixup_task.get_exit_code()
if exitcode != 0:
- log.error(f'MemberOf fixup task "{fixup_task.dn}" for {args.DN} has failed (error {exitcode}). Please, check logs')
+ if existcode is None:
+ raise ValueError(f'MemberOf fixup task "{fixup_task.dn}" for {args.DN} has not completed. Please, check logs')
+ else:
+ raise ValueError(f'MemberOf fixup task "{fixup_task.dn}" for {args.DN} has failed (error {exitcode}). Please, check logs')
else:
log.info('Fixup task successfully completed')
else:
@@ -94,6 +97,7 @@ def do_fixup_status(inst, basedn, log, args):
get_task_status(inst, log, MemberOfFixupTasks, dn=args.dn, show_log=args.show_log,
watch=args.watch, use_json=args.json)
+
def _add_parser_args(parser):
parser.add_argument('--attr',
help='Specifies the attribute in the user entry for the Directory Server '
@@ -154,6 +158,8 @@ def create_parser(subparsers):
'their memberOf attribute regenerated.')
fixup.add_argument('--wait', action='store_true',
help="Wait for the task to finish, this could take a long time")
+ fixup.add_argument('--timeout', type=int, default=0,
+ help="Sets the task timeout. ,Default is 0 (no timeout)")
fixup_status = subcommands.add_parser('fixup-status', help='Check the status of a fix-up task')
fixup_status.set_defaults(func=do_fixup_status)
diff --git a/src/lib389/lib389/cli_conf/plugins/posix_winsync.py b/src/lib389/lib389/cli_conf/plugins/posix_winsync.py
index 37a8118c8..8a97ef422 100644
--- a/src/lib389/lib389/cli_conf/plugins/posix_winsync.py
+++ b/src/lib389/lib389/cli_conf/plugins/posix_winsync.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2019 Red Hat, Inc.
+# Copyright (C) 2023 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -31,10 +31,13 @@ def do_fixup(inst, basedn, log, args):
log.error(f"'{plugin.rdn}' is disabled. Fix up task can't be executed")
return
fixup_task = plugin.fixup(args.DN, args.filter)
- fixup_task.wait()
+ fixup_task.wait(timeout=args.timeout)
exitcode = fixup_task.get_exit_code()
if exitcode != 0:
- log.error(f'MemberUID task for {args.DN} has failed. Please, check logs')
+ if exitcode is None:
+ raise ValueError(f'MemberUID task for {args.DN} has not completed. Please, check logs')
+ else:
+ raise ValueError(f'MemberUID task for {args.DN} has failed. Please, check logs')
else:
log.info('Successfully added task entry')
@@ -73,3 +76,5 @@ def create_parser(subparsers):
help='Filter for entries to fix up.\n If omitted, all entries with objectclass '
'inetuser/inetadmin/nsmemberof under the specified base will have '
'their memberOf attribute regenerated.')
+ fixup.add_argument('--timeout', default=120, type=int,
+ help="Set a timeout to wait for the fixup task. Default is 120 seconds")
\ No newline at end of file
diff --git a/src/lib389/lib389/cli_conf/plugins/usn.py b/src/lib389/lib389/cli_conf/plugins/usn.py
index ac09500fc..bbc55da75 100644
--- a/src/lib389/lib389/cli_conf/plugins/usn.py
+++ b/src/lib389/lib389/cli_conf/plugins/usn.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2019 Red Hat, Inc.
+# Copyright (C) 2023 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -36,10 +36,13 @@ def tombstone_cleanup(inst, basedn, log, args):
if not plugin.status():
log.error("'%s' is disabled. Fix up task can't be executed" % plugin.rdn)
task = plugin.cleanup(args.suffix, args.backend, args.max_usn)
- task.wait()
+ task.wait(timeout=args.timeout)
exitcode = task.get_exit_code()
if exitcode != 0:
- log.error('USM tombstone cleanup task has failed. Please, check logs')
+ if exitcode is None:
+ raise ValueError('USM tombstone cleanup task has not completed. Please, check logs')
+ else:
+ raise ValueError('USM tombstone cleanup task has failed. Please, check logs')
else:
log.info('Successfully added task entry')
@@ -69,3 +72,5 @@ def create_parser(subparsers):
'specified. Backend instance in which USN tombstone entries (backend)')
cleanup_parser.add_argument('-m', '--max-usn', type=int, help='Sets the highest USN value to delete when '
'removing tombstone entries (max_usn_to_delete)')
+ cleanup_parser.add_argument('--timeout', type=int, default=120,
+ help="Sets the cleanup task timeout. Default is 120 seconds,")
diff --git a/src/lib389/lib389/cli_conf/schema.py b/src/lib389/lib389/cli_conf/schema.py
index 7f3d616b2..60bfa02d8 100644
--- a/src/lib389/lib389/cli_conf/schema.py
+++ b/src/lib389/lib389/cli_conf/schema.py
@@ -1,5 +1,6 @@
# --- BEGIN COPYRIGHT BLOCK ---
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
+# Copyright (C) 2023 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -189,7 +190,7 @@ def reload_schema(inst, basedn, log, args):
print('Attempting to add task entry... This will fail if Schema Reload plug-in is not enabled.')
task = schema.reload(args.schemadir)
if args.wait:
- task.wait()
+ task.wait(timeout=args.timeout)
rc = task.get_exit_code()
if rc == 0:
print("Schema reload task ({}) successfully finished.".format(task.dn))
@@ -204,7 +205,7 @@ def validate_syntax(inst, basedn, log, args):
schema = Schema(inst)
log.info('Attempting to add task entry...')
validate_task = schema.validate_syntax(args.DN, args.filter)
- validate_task.wait()
+ validate_task.wait(timeout=args.timeout)
exitcode = validate_task.get_exit_code()
if exitcode != 0:
log.error(f'Validate syntax task for {args.DN} has failed. Please, check logs')
@@ -389,19 +390,24 @@ def create_parser(subparsers):
reload_parser.set_defaults(func=reload_schema)
reload_parser.add_argument('-d', '--schemadir', help="directory where schema files are located")
reload_parser.add_argument('--wait', action='store_true', default=False, help="Wait for the reload task to complete")
+ reload_parser.add_argument('--timeout', default=120, type=int,
+ help="Set a timeout to wait for the reload task. Default is 120 seconds")
validate_parser = schema_subcommands.add_parser('validate-syntax',
- help='Run a task to check every modification to attributes to make sure '
- 'that the new value has the required syntax for that attribute type')
+ help='Run a task to check that all attributes in an entry have the correct syntax')
validate_parser.set_defaults(func=validate_syntax)
validate_parser.add_argument('DN', help="Base DN that contains entries to validate")
validate_parser.add_argument('-f', '--filter', help='Filter for entries to validate.\n'
'If omitted, all entries with filter "(objectclass=*)" are validated')
+ validate_parser.add_argument('--timeout', default=120, type=int,
+ help="Set a timeout to wait for the validation task. Default is 120 seconds")
import_oldap_schema_parser = schema_subcommands.add_parser('import-openldap-file',
- help='Import an openldap formatted dynamic schema ldifs. These will contain values like olcAttributeTypes and olcObjectClasses.')
+ help='Import an openldap formatted dynamic schema ldifs. '
+ 'These will contain values like olcAttributeTypes and olcObjectClasses.')
import_oldap_schema_parser.set_defaults(func=import_openldap_schema_file)
import_oldap_schema_parser.add_argument('schema_file', help="Path to the openldap dynamic schema ldif to import")
import_oldap_schema_parser.add_argument('--confirm',
default=False, action='store_true',
- help="Confirm that you want to apply these schema migration actions to the 389-ds instance. By default no actions are taken.")
+ help="Confirm that you want to apply these schema migration actions to the "
+ "389-ds instance. By default no actions are taken.")
diff --git a/src/lib389/lib389/dbgen.py b/src/lib389/lib389/dbgen.py
index e65e8d88b..63034f413 100644
--- a/src/lib389/lib389/dbgen.py
+++ b/src/lib389/lib389/dbgen.py
@@ -83,6 +83,7 @@ objectClass: person
objectClass: organizationalPerson
objectClass: inetOrgPerson
objectclass: inetUser
+objectclass: posixAccount
cn: {CN}
sn: {LAST}
uid: {UID}
@@ -103,6 +104,9 @@ l: {LOCATION}
ou: {OU}
mail: {UID}@example.com
mail: {UIDNUMBER}@example.com
+uidNumber: {UIDNUMBER}
+gidNumber: {UIDNUMBER}
+homeDirectory: /home/{UID}
postalAddress: 518, Dept #851, Room#{OU}
title: {TITLE}
usercertificate;binary:: MIIBvjCCASegAwIBAgIBAjANBgkqhkiG9w0BAQQFADAnMQ8wDQYD
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
index cfba46d0c..193805780 100644
--- a/src/lib389/lib389/tasks.py
+++ b/src/lib389/lib389/tasks.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2021 Red Hat, Inc.
+# Copyright (C) 2023 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -88,15 +88,14 @@ class Task(DSLdapObject):
return None
return None
- def wait(self, timeout=120):
+ def wait(self, timeout=120, sleep_interval=2):
"""Wait until task is complete."""
time_passed = 0
- sleep_interval = 2
- if timeout is None:
+ if timeout is None or timeout == 0:
self._log.debug("No timeout is set, this may take a long time ...")
- while timeout is None or time_passed < timeout:
+ while timeout is None or timeout == 0 or time_passed < timeout:
if self.is_complete():
break
time_passed = time_passed + sleep_interval
@@ -169,7 +168,7 @@ class FixupLinkedAttributesTask(Task):
class MemberUidFixupTask(Task):
- """A single instance of memberOf task entry
+ """A single instance of posix group fix task entry
:param instance: An instance
:type instance: lib389.DirSrv
| 0 |
65fef7efba36dd75d41344d423283047ce07e818
|
389ds/389-ds-base
|
fix pcre build issues
Reviewed by: nkinder (Thanks!)
|
commit 65fef7efba36dd75d41344d423283047ce07e818
Author: Rich Megginson <[email protected]>
Date: Wed Aug 12 09:03:32 2009 -0600
fix pcre build issues
Reviewed by: nkinder (Thanks!)
diff --git a/Makefile.am b/Makefile.am
index c49162b71..6bfad0864 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -558,7 +558,7 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \
ldap/servers/slapd/vattr.c \
$(libavl_a_SOURCES)
-libslapd_la_CPPFLAGS = $(PLUGIN_CPPFLAGS) @sasl_inc@ @db_inc@ @svrcore_inc@ @kerberos_inc@
+libslapd_la_CPPFLAGS = $(PLUGIN_CPPFLAGS) @sasl_inc@ @db_inc@ @svrcore_inc@ @kerberos_inc@ @pcre_inc@
if SOLARIS
libslapd_la_SOURCES += ldap/servers/slapd/slapi_counter_sunos_sparcv9.S
endif
@@ -1132,7 +1132,7 @@ ns_slapd_SOURCES = ldap/servers/slapd/abandon.c \
$(GETSOCKETPEER)
ns_slapd_CPPFLAGS = $(AM_CPPFLAGS) @sasl_inc@ @openldap_inc@ @ldapsdk_inc@ @nss_inc@ \
- @nspr_inc@ @svrcore_inc@ @pcre_inc@
+ @nspr_inc@ @svrcore_inc@
ns_slapd_LDADD = libslapd.la libldaputil.a $(LDAPSDK_LINK) $(NSS_LINK) \
$(NSPR_LINK) $(SASL_LINK) $(SVRCORE_LINK) $(LIBNSL) $(LIBSOCKET)
# We need to link ns-slapd with the C++ compiler on HP-UX since we load
diff --git a/Makefile.in b/Makefile.in
index 215b72d52..a96f78b41 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -1561,7 +1561,7 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \
ldap/servers/slapd/uuid.c ldap/servers/slapd/value.c \
ldap/servers/slapd/valueset.c ldap/servers/slapd/vattr.c \
$(libavl_a_SOURCES) $(am__append_1)
-libslapd_la_CPPFLAGS = $(PLUGIN_CPPFLAGS) @sasl_inc@ @db_inc@ @svrcore_inc@ @kerberos_inc@
+libslapd_la_CPPFLAGS = $(PLUGIN_CPPFLAGS) @sasl_inc@ @db_inc@ @svrcore_inc@ @kerberos_inc@ @pcre_inc@
libslapd_la_LIBADD = $(LDAPSDK_LINK) $(SASL_LINK) $(SVRCORE_LINK) $(NSS_LINK) $(NSPR_LINK) $(KERBEROS_LINK) $(PCRE_LINK)
#////////////////////////////////////////////////////////////////
@@ -2102,7 +2102,7 @@ ns_slapd_SOURCES = ldap/servers/slapd/abandon.c \
$(GETSOCKETPEER)
ns_slapd_CPPFLAGS = $(AM_CPPFLAGS) @sasl_inc@ @openldap_inc@ @ldapsdk_inc@ @nss_inc@ \
- @nspr_inc@ @svrcore_inc@ @pcre_inc@
+ @nspr_inc@ @svrcore_inc@
ns_slapd_LDADD = libslapd.la libldaputil.a $(LDAPSDK_LINK) $(NSS_LINK) \
$(NSPR_LINK) $(SASL_LINK) $(SVRCORE_LINK) $(LIBNSL) $(LIBSOCKET)
diff --git a/configure b/configure
index fdf49b92f..407670922 100755
--- a/configure
+++ b/configure
@@ -27005,28 +27005,8 @@ else
echo "${ECHO_T}no" >&6
fi;
-if test -z "$pcre_inc"; then
- echo "$as_me:$LINENO: checking for pcre.h" >&5
-echo $ECHO_N "checking for pcre.h... $ECHO_C" >&6
- if test -f "/usr/include/pcre.h"; then
- echo "$as_me:$LINENO: result: using /usr/include/pcre.h" >&5
-echo "${ECHO_T}using /usr/include/pcre.h" >&6
- pcre_incdir="/usr/include"
- pcre_inc="-I/usr/include"
- pcre_lib='-L$(libdir)'
- pcre_libdir='$(libdir)'
- else
- echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6
- { { echo "$as_me:$LINENO: error: pcre not found, specify with --with-pcre." >&5
-echo "$as_me: error: pcre not found, specify with --with-pcre." >&2;}
- { (exit 1); exit 1; }; }
- fi
-fi
#
# if PCRE is not found yet, try pkg-config
-
-# last resort
if test -z "$pcre_inc" -o -z "$pcre_lib" -o -z "$pcre_libdir"; then
# Extract the first word of "pkg-config", so it can be a program name with args.
set dummy pkg-config; ac_word=$2
@@ -27075,6 +27055,12 @@ echo $ECHO_N "checking for pcre with pkg-config... $ECHO_C" >&6
pcre_lib=`$PKG_CONFIG --libs-only-L pcre`
pcre_libdir=`$PKG_CONFIG --libs-only-L pcre | sed -e s/-L// | sed -e s/\ .*$//`
echo "$as_me:$LINENO: result: using system PCRE" >&5
+echo "${ECHO_T}using system PCRE" >&6
+ elif $PKG_CONFIG --exists libpcre; then
+ pcre_inc=`$PKG_CONFIG --cflags-only-I libpcre`
+ pcre_lib=`$PKG_CONFIG --libs-only-L libpcre`
+ pcre_libdir=`$PKG_CONFIG --libs-only-L libpcre | sed -e s/-L// | sed -e s/\ .*$//`
+ echo "$as_me:$LINENO: result: using system PCRE" >&5
echo "${ECHO_T}using system PCRE" >&6
else
{ { echo "$as_me:$LINENO: error: PCRE not found, specify with --with-pcre." >&5
@@ -27084,6 +27070,25 @@ echo "$as_me: error: PCRE not found, specify with --with-pcre." >&2;}
fi
fi
+if test -z "$pcre_inc"; then
+ echo "$as_me:$LINENO: checking for pcre.h" >&5
+echo $ECHO_N "checking for pcre.h... $ECHO_C" >&6
+ if test -f "/usr/include/pcre.h"; then
+ echo "$as_me:$LINENO: result: using /usr/include/pcre.h" >&5
+echo "${ECHO_T}using /usr/include/pcre.h" >&6
+ pcre_incdir="/usr/include"
+ pcre_inc="-I/usr/include"
+ pcre_lib='-L$(libdir)'
+ pcre_libdir='$(libdir)'
+ else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+ { { echo "$as_me:$LINENO: error: pcre not found, specify with --with-pcre." >&5
+echo "$as_me: error: pcre not found, specify with --with-pcre." >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+fi
+
PACKAGE_BASE_VERSION=`echo $PACKAGE_VERSION | awk -F\. '{print $1"."$2}'`
diff --git a/ldap/servers/slapd/regex.c b/ldap/servers/slapd/regex.c
index 24e7b8600..6eb682a4f 100644
--- a/ldap/servers/slapd/regex.c
+++ b/ldap/servers/slapd/regex.c
@@ -45,6 +45,9 @@
#include "slap.h"
#include "slapi-plugin.h"
+/* Perl Compatible Regular Expression */
+#include <pcre.h>
+
struct slapi_regex_handle {
pcre *re_pcre; /* contains the compiled pattern */
int *re_ovector; /* output vector */
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 55184a329..ceb46b2cb 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -167,9 +167,6 @@ typedef struct symbol_t {
#include "csngen.h"
#include "uuid.h"
-/* Perl Compatible Regular Expression */
-#include <pcre.h>
-
#if defined(OS_solaris)
# include <thread.h>
# define GET_THREAD_ID() thr_self()
diff --git a/m4/pcre.m4 b/m4/pcre.m4
index f60123e27..6096108d3 100644
--- a/m4/pcre.m4
+++ b/m4/pcre.m4
@@ -53,24 +53,8 @@ AC_ARG_WITH(pcre, [ --with-pcre=PATH Perl Compatible Regular Expression direc
],
AC_MSG_RESULT(no))
-dnl - check in system locations
-if test -z "$pcre_inc"; then
- AC_MSG_CHECKING(for pcre.h)
- if test -f "/usr/include/pcre.h"; then
- AC_MSG_RESULT([using /usr/include/pcre.h])
- pcre_incdir="/usr/include"
- pcre_inc="-I/usr/include"
- pcre_lib='-L$(libdir)'
- pcre_libdir='$(libdir)'
- else
- AC_MSG_RESULT(no)
- AC_MSG_ERROR([pcre not found, specify with --with-pcre.])
- fi
-fi
#
# if PCRE is not found yet, try pkg-config
-
-# last resort
if test -z "$pcre_inc" -o -z "$pcre_lib" -o -z "$pcre_libdir"; then
AC_PATH_PROG(PKG_CONFIG, pkg-config)
AC_MSG_CHECKING(for pcre with pkg-config)
@@ -80,8 +64,29 @@ if test -z "$pcre_inc" -o -z "$pcre_lib" -o -z "$pcre_libdir"; then
pcre_lib=`$PKG_CONFIG --libs-only-L pcre`
pcre_libdir=`$PKG_CONFIG --libs-only-L pcre | sed -e s/-L// | sed -e s/\ .*$//`
AC_MSG_RESULT([using system PCRE])
+ elif $PKG_CONFIG --exists libpcre; then
+ pcre_inc=`$PKG_CONFIG --cflags-only-I libpcre`
+ pcre_lib=`$PKG_CONFIG --libs-only-L libpcre`
+ pcre_libdir=`$PKG_CONFIG --libs-only-L libpcre | sed -e s/-L// | sed -e s/\ .*$//`
+ AC_MSG_RESULT([using system PCRE])
else
AC_MSG_ERROR([PCRE not found, specify with --with-pcre.])
fi
fi
fi
+
+dnl last resort
+dnl - check in system locations
+if test -z "$pcre_inc"; then
+ AC_MSG_CHECKING(for pcre.h)
+ if test -f "/usr/include/pcre.h"; then
+ AC_MSG_RESULT([using /usr/include/pcre.h])
+ pcre_incdir="/usr/include"
+ pcre_inc="-I/usr/include"
+ pcre_lib='-L$(libdir)'
+ pcre_libdir='$(libdir)'
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR([pcre not found, specify with --with-pcre.])
+ fi
+fi
| 0 |
09d7fd28d6bbf58032a5137e05d697318bcd412b
|
389ds/389-ds-base
|
Resolves: #214728
Summary: Cleaning up obsolete macros in the build
Changes: eliminated macro LDAPDB_THREAD_SAFE (Comment #10)
|
commit 09d7fd28d6bbf58032a5137e05d697318bcd412b
Author: Noriko Hosoi <[email protected]>
Date: Fri Nov 10 01:37:46 2006 +0000
Resolves: #214728
Summary: Cleaning up obsolete macros in the build
Changes: eliminated macro LDAPDB_THREAD_SAFE (Comment #10)
diff --git a/include/ldaputil/ldapdb.h b/include/ldaputil/ldapdb.h
index fd667e629..6f326d224 100644
--- a/include/ldaputil/ldapdb.h
+++ b/include/ldaputil/ldapdb.h
@@ -39,17 +39,10 @@
#define _LDAPU_LDAPDB_H
#include <ldap.h>
-/* removed for LDAPSDK31 integration
-#include <lcache.h>
-*/
-#ifdef LDAPDB_THREAD_SAFE
/* In the past, we used CRITICAL objects from lib/base/crit.cpp.
* Now we use PRMonitor to avoid ldapu to depend on lib/base.
*/
#include <prmon.h>
-#else
-#define PRMonitor void
-#endif /* LDAPDB_THREAD_SAFE */
#ifndef NSAPI_PUBLIC
#ifdef XP_WIN32
| 0 |
c766b7ac3ce68eae18ace54805bdd3badcaf7a80
|
389ds/389-ds-base
|
610281 - fix coverity Defect Type: Control flow issues
https://bugzilla.redhat.com/show_bug.cgi?id=610281
11814 DEADCODE Triaged Unassigned Bug Moderate Fix Required
string_filter_sub() ds/ldap/servers/plugins/syntaxes/string.c
Comment:
A code to update tmpbufsize was missing. This "tpbufsize = len + 1;"
is needed before slapi_ch_realloc.
351 tmpbufsize = len + 1;
352 tmpbuf = (char *) slapi_ch_realloc( tmpbuf, tmpbufsize );
Also, if (len < tmpbufsize) were true (could not be true since
tmpbufsize never have been set), bvp->bv_val was copied to buf
which is not long enough for bvp->bv_val. The bug was also
fixed.
|
commit c766b7ac3ce68eae18ace54805bdd3badcaf7a80
Author: Noriko Hosoi <[email protected]>
Date: Fri Jul 2 17:15:22 2010 -0700
610281 - fix coverity Defect Type: Control flow issues
https://bugzilla.redhat.com/show_bug.cgi?id=610281
11814 DEADCODE Triaged Unassigned Bug Moderate Fix Required
string_filter_sub() ds/ldap/servers/plugins/syntaxes/string.c
Comment:
A code to update tmpbufsize was missing. This "tpbufsize = len + 1;"
is needed before slapi_ch_realloc.
351 tmpbufsize = len + 1;
352 tmpbuf = (char *) slapi_ch_realloc( tmpbuf, tmpbufsize );
Also, if (len < tmpbufsize) were true (could not be true since
tmpbufsize never have been set), bvp->bv_val was copied to buf
which is not long enough for bvp->bv_val. The bug was also
fixed.
diff --git a/ldap/servers/plugins/syntaxes/string.c b/ldap/servers/plugins/syntaxes/string.c
index 9131f9f3d..18e7a0514 100644
--- a/ldap/servers/plugins/syntaxes/string.c
+++ b/ldap/servers/plugins/syntaxes/string.c
@@ -342,15 +342,15 @@ string_filter_sub( Slapi_PBlock *pb, char *initial, char **any, char *final,
len = bvp->bv_len;
if ( len < sizeof(buf) ) {
- strcpy( buf, bvp->bv_val );
realval = buf;
+ strncpy( realval, bvp->bv_val, sizeof(buf) );
} else if ( len < tmpbufsize ) {
- strcpy( buf, bvp->bv_val );
realval = tmpbuf;
+ strncpy( realval, bvp->bv_val, tmpbufsize );
} else {
- tmpbuf = (char *) slapi_ch_realloc( tmpbuf, len + 1 );
- strcpy( tmpbuf, bvp->bv_val );
- realval = tmpbuf;
+ tmpbufsize = len + 1;
+ realval = tmpbuf = (char *) slapi_ch_realloc( tmpbuf, tmpbufsize );
+ strncpy( realval, bvp->bv_val, tmpbufsize );
}
/* 3rd arg: 1 - trim leading blanks */
value_normalize_ext( realval, syntax, 1, &alt );
| 0 |
79346deb255ca8d7889d7590534d308d4e3a78da
|
389ds/389-ds-base
|
Ticket 47331 - Self entry access ACI not working properly
Bug Description:
There are two issues in that bug.
The first one is that for a given entry, the rights related to an attribute are evaluated and cached. Reusing this evaluation for a different entry is erronous.
The second one is that for each deny/allow aci, the results of the evaluation of the aci is cached. These results
are reset for aci type that are entry related. The parsing of the rule self entry access miss the setting
of ACI_USERDN_SELFRULE.
This flag allows to reset (in result cache) a result obtained on a previous entry. The consequence is that
a previous result was erronously reused.
Fix Description:
The fix for the first issue, is to prevent acl__match_handlesFromCache to reuse already evaluated attributes.
A new flag make acl__match_handlesFromCache to return if the evaluation is entry related.
The second fix is to set ACI_USERDN_SELFRULE, when we have a rule like 'userdn = ldap:///self'
https://fedorahosted.org/389/ticket/47331
Reviewed by: Noriko Hosoi, Ludwig Krispenz
Platforms tested: fedora 17
Flag Day: no
Doc impact: no
|
commit 79346deb255ca8d7889d7590534d308d4e3a78da
Author: Thierry bordaz (tbordaz) <[email protected]>
Date: Mon Apr 22 14:15:33 2013 +0200
Ticket 47331 - Self entry access ACI not working properly
Bug Description:
There are two issues in that bug.
The first one is that for a given entry, the rights related to an attribute are evaluated and cached. Reusing this evaluation for a different entry is erronous.
The second one is that for each deny/allow aci, the results of the evaluation of the aci is cached. These results
are reset for aci type that are entry related. The parsing of the rule self entry access miss the setting
of ACI_USERDN_SELFRULE.
This flag allows to reset (in result cache) a result obtained on a previous entry. The consequence is that
a previous result was erronously reused.
Fix Description:
The fix for the first issue, is to prevent acl__match_handlesFromCache to reuse already evaluated attributes.
A new flag make acl__match_handlesFromCache to return if the evaluation is entry related.
The second fix is to set ACI_USERDN_SELFRULE, when we have a rule like 'userdn = ldap:///self'
https://fedorahosted.org/389/ticket/47331
Reviewed by: Noriko Hosoi, Ludwig Krispenz
Platforms tested: fedora 17
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/plugins/acl/acl.c b/ldap/servers/plugins/acl/acl.c
index 79213b3c0..2f417f33b 100644
--- a/ldap/servers/plugins/acl/acl.c
+++ b/ldap/servers/plugins/acl/acl.c
@@ -2799,6 +2799,11 @@ acl__TestRights(Acl_PBlock *aclpb,int access, char **right, char ** map_generic,
if (access & ( SLAPI_ACL_SEARCH | SLAPI_ACL_READ)) {
+ /* We can not reused results obtained on a other entry */
+ if (aci->aci_type & ACI_CACHE_RESULT_PER_ENTRY) {
+ aclpb->aclpb_state |= ACLPB_CACHE_RESULT_PER_ENTRY_SKIP;
+ }
+
/*
* aclpb->aclpb_cache_result[0..aclpb->aclpb_last_cache_result] is
* a cache of info about whether applicable acis
@@ -3010,6 +3015,10 @@ acl__TestRights(Acl_PBlock *aclpb,int access, char **right, char ** map_generic,
if (access & ( SLAPI_ACL_SEARCH | SLAPI_ACL_READ)) {
+ /* We can not reused results obtained on a other entry */
+ if (aci->aci_type & ACI_CACHE_RESULT_PER_ENTRY) {
+ aclpb->aclpb_state |= ACLPB_CACHE_RESULT_PER_ENTRY_SKIP;
+ }
/*
* aclpb->aclpb_cache_result[0..aclpb->aclpb_last_cache_result] is
* a cache of info about whether applicable acis
@@ -3810,8 +3819,23 @@ acl__match_handlesFromCache ( Acl_PBlock *aclpb, char *attr, int access)
} else {
context_type = ACLPB_EVALCONTEXT_PREV;
c_evalContext = &aclpb->aclpb_prev_entryEval_context;
- }
-
+ }
+
+ /* we can not reused access evaluation done on a previous entry
+ * so just skip that cache
+ */
+ if (aclpb->aclpb_state & ACLPB_CACHE_RESULT_PER_ENTRY_SKIP) {
+ aclpb->aclpb_state &= ~ACLPB_MATCHES_ALL_ACLS;
+ aclpb->aclpb_state |= ACLPB_UPD_ACLCB_CACHE;
+ /* Did not match */
+ if (context_type == ACLPB_EVALCONTEXT_ACLCB) {
+ aclpb->aclpb_state &= ~ACLPB_HAS_ACLCB_EVALCONTEXT;
+ } else {
+ aclpb->aclpb_state |= ACLPB_COPY_EVALCONTEXT;
+ c_evalContext->acle_numof_tmatched_handles = 0;
+ }
+ return -1;
+ }
if ( aclpb->aclpb_res_type & (ACLPB_NEW_ENTRY | ACLPB_EFFECTIVE_RIGHTS) ) {
aclpb->aclpb_state |= ACLPB_MATCHES_ALL_ACLS;
diff --git a/ldap/servers/plugins/acl/acl.h b/ldap/servers/plugins/acl/acl.h
index 0eb857043..ef375c231 100644
--- a/ldap/servers/plugins/acl/acl.h
+++ b/ldap/servers/plugins/acl/acl.h
@@ -470,6 +470,7 @@ struct acl_pblock {
#define ACLPB_UPD_ACLCB_CACHE 0x100000
#define ACLPB_ATTR_RULE_EVALUATED 0x200000
#define ACLPB_DONOT_EVALUATE_PROXY 0x400000
+#define ACLPB_CACHE_RESULT_PER_ENTRY_SKIP 0x800000
#define ACLPB_RESET_MASK ( ACLPB_ACCESS_ALLOWED_ON_A_ATTR | ACLPB_ACCESS_DENIED_ON_ALL_ATTRS | \
diff --git a/ldap/servers/plugins/acl/aclparse.c b/ldap/servers/plugins/acl/aclparse.c
index 28c01b9e2..5941b2aec 100644
--- a/ldap/servers/plugins/acl/aclparse.c
+++ b/ldap/servers/plugins/acl/aclparse.c
@@ -800,6 +800,8 @@ normalize_nextACERule:
goto error;
}
} else if ( 0 == strncmp ( s, DS_LAS_USERDN, 6 )) {
+ char *prefix;
+
p = PL_strnchr (s, '=', end - s);
if (NULL == p) {
goto error;
@@ -824,6 +826,23 @@ normalize_nextACERule:
goto error;
}
+ /* skip the ldap prefix */
+ prefix = PL_strncasestr(p, LDAP_URL_prefix, end - p);
+ if (prefix) {
+ prefix += strlen(LDAP_URL_prefix);
+ } else {
+ prefix = PL_strncasestr(p, LDAPS_URL_prefix, end - p);
+ if (prefix) {
+ prefix += strlen(LDAPS_URL_prefix);
+ }
+ }
+ if (prefix == NULL) {
+ /* userdn value does not starts with LDAP(S)_URL_prefix */
+ goto error;
+ }
+ p = prefix;
+
+
/* we have a rule like userdn = "ldap:///blah". s points to blah now.
** let's find if we have a SELF rule like userdn = "ldap:///self".
** Since the resource changes on entry basis, we can't cache the
| 0 |
f38168d04718776fac0c0ab09f095837d24d4503
|
389ds/389-ds-base
|
Resolves: bug 457156
Bug Description: GER: allow GER for non-existing entries (phase 2)
Reviewed by: nhosoi (Thanks!)
Fix Description: There are a couple of memory leaks in the code. acleffectiverights.c line 617 calls slapi_attr_get_valueset to get the list of objectclass values in objclassvals - this function allocates memory (returns a dup of the list) but this is not freed. The fix is to call slapi_valueset_free() to free it. The allattrs and opattrs arrays are not freed in all conditions. The fix is to make sure they are freed in all conditions.
Platforms tested: RHEL5, Fedora 8
Flag Day: no
Doc impact: no
|
commit f38168d04718776fac0c0ab09f095837d24d4503
Author: Rich Megginson <[email protected]>
Date: Mon Aug 11 17:02:00 2008 +0000
Resolves: bug 457156
Bug Description: GER: allow GER for non-existing entries (phase 2)
Reviewed by: nhosoi (Thanks!)
Fix Description: There are a couple of memory leaks in the code. acleffectiverights.c line 617 calls slapi_attr_get_valueset to get the list of objectclass values in objclassvals - this function allocates memory (returns a dup of the list) but this is not freed. The fix is to call slapi_valueset_free() to free it. The allattrs and opattrs arrays are not freed in all conditions. The fix is to make sure they are freed in all conditions.
Platforms tested: RHEL5, Fedora 8
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/plugins/acl/acleffectiverights.c b/ldap/servers/plugins/acl/acleffectiverights.c
index 6c73862cf..70c7c858a 100644
--- a/ldap/servers/plugins/acl/acleffectiverights.c
+++ b/ldap/servers/plugins/acl/acleffectiverights.c
@@ -649,6 +649,7 @@ _ger_get_attrs_rights (
}
}
}
+ slapi_valueset_free(objclassvals);
}
/* get operational attrs */
@@ -706,9 +707,9 @@ _ger_get_attrs_rights (
}
}
}
- charray_free(allattrs);
- charray_free(opattrs);
}
+ charray_free(allattrs);
+ charray_free(opattrs);
}
else
{
| 0 |
2119b2eb87ebe9ab2d8f33317e01e796182013ff
|
389ds/389-ds-base
|
Bug(s) fixed: 176302
Bug Description: crash in PTA plugin when bind returned controls
Reviewed by: Pete (Thanks!)
Fix Description: Fix by Ulf. We are passing the ***LDAPControl when it
needs the **LDAPControl.
Platforms tested: Fedora Core 4
Flag Day: no
Doc impact: no
QA impact: should be covered by regular nightly and manual testing
New Tests integrated into TET: none
|
commit 2119b2eb87ebe9ab2d8f33317e01e796182013ff
Author: Rich Megginson <[email protected]>
Date: Wed Feb 8 18:59:22 2006 +0000
Bug(s) fixed: 176302
Bug Description: crash in PTA plugin when bind returned controls
Reviewed by: Pete (Thanks!)
Fix Description: Fix by Ulf. We are passing the ***LDAPControl when it
needs the **LDAPControl.
Platforms tested: Fedora Core 4
Flag Day: no
Doc impact: no
QA impact: should be covered by regular nightly and manual testing
New Tests integrated into TET: none
diff --git a/ldap/servers/plugins/passthru/ptpreop.c b/ldap/servers/plugins/passthru/ptpreop.c
index 5ab2ae847..df61f9a6e 100644
--- a/ldap/servers/plugins/passthru/ptpreop.c
+++ b/ldap/servers/plugins/passthru/ptpreop.c
@@ -253,7 +253,7 @@ passthru_bindpreop( Slapi_PBlock *pb )
* Send a result to our client.
*/
if ( resctrls != NULL ) {
- (void)slapi_pblock_set( pb, SLAPI_RESCONTROLS, &resctrls );
+ (void)slapi_pblock_set( pb, SLAPI_RESCONTROLS, resctrls );
}
slapi_send_ldap_result( pb, rc, matcheddn, errmsg, 0, urls );
}
| 0 |
ca065479af93699abd507a082b82e0be778e12eb
|
389ds/389-ds-base
|
Ticket #487 - Possible to add invalid attribute values to PAM PTA plugin configuration
Bug Description:
Original issue was discovered in 1.2.10 and was fixed in 1.2.11(and after) as a
side effect of ticket 181.
A minor issue was introduced by ticket181. When changing pamIDMapMethod with an
invalid value, the returned text is:
additional info: Error: valid values for pamMissingSuffix are
PAMPT_MISSING_SUFFIX_ERROR, PAMPT_MISSING_SUFFIX_ALLOW,
PAMPT_MISSING_SUFFIX_IGNORE
where it should be
additional info: Error: valid values for pamMissingSuffix are
ERROR, ALLOW, IGNORE
Ticket 487 is used to fix this minor issue
Fix Description:
The fix is to let the macro to be preprocessed
The fix could be verified with the following test case:
Create an instance, enable pam-plugin, restart the instance,
ldapmodify ... <<EOF
dn: cn=PAM Pass Through Auth,cn=plugins,cn=config
changetype: modify
replace: pamMissingSuffix
pamMissingSuffix: invalid
EOF
https://fedorahosted.org/389/ticket/487
Reviewed by: Mark Reynolds ([email protected])
|
commit ca065479af93699abd507a082b82e0be778e12eb
Author: Thierry bordaz (tbordaz) <[email protected]>
Date: Thu Jan 24 16:17:32 2013 +0100
Ticket #487 - Possible to add invalid attribute values to PAM PTA plugin configuration
Bug Description:
Original issue was discovered in 1.2.10 and was fixed in 1.2.11(and after) as a
side effect of ticket 181.
A minor issue was introduced by ticket181. When changing pamIDMapMethod with an
invalid value, the returned text is:
additional info: Error: valid values for pamMissingSuffix are
PAMPT_MISSING_SUFFIX_ERROR, PAMPT_MISSING_SUFFIX_ALLOW,
PAMPT_MISSING_SUFFIX_IGNORE
where it should be
additional info: Error: valid values for pamMissingSuffix are
ERROR, ALLOW, IGNORE
Ticket 487 is used to fix this minor issue
Fix Description:
The fix is to let the macro to be preprocessed
The fix could be verified with the following test case:
Create an instance, enable pam-plugin, restart the instance,
ldapmodify ... <<EOF
dn: cn=PAM Pass Through Auth,cn=plugins,cn=config
changetype: modify
replace: pamMissingSuffix
pamMissingSuffix: invalid
EOF
https://fedorahosted.org/389/ticket/487
Reviewed by: Mark Reynolds ([email protected])
diff --git a/ldap/servers/plugins/pam_passthru/pam_ptconfig.c b/ldap/servers/plugins/pam_passthru/pam_ptconfig.c
index fce8000bf..fb2dc2bec 100644
--- a/ldap/servers/plugins/pam_passthru/pam_ptconfig.c
+++ b/ldap/servers/plugins/pam_passthru/pam_ptconfig.c
@@ -244,11 +244,10 @@ check_missing_suffix_flag(int val) {
return PR_FALSE;
}
-#define MAKE_STR(x) #x
static char *get_missing_suffix_values()
{
- return MAKE_STR(PAMPT_MISSING_SUFFIX_ERROR) ", " MAKE_STR(PAMPT_MISSING_SUFFIX_ALLOW) ", "
- MAKE_STR(PAMPT_MISSING_SUFFIX_IGNORE);
+ return PAMPT_MISSING_SUFFIX_ERROR_STRING ", " PAMPT_MISSING_SUFFIX_ALLOW_STRING ", "
+ PAMPT_MISSING_SUFFIX_IGNORE_STRING;
}
static char *get_map_method_values()
| 0 |
abff3feacb218a7bb65a358dce2e9c90a2f185b1
|
389ds/389-ds-base
|
506786 Index maintenance mechanism causes wrong search results when
modifying attributes with subtypes
When there are identical attribute value pairs except subtypes exist
in an entry, if one of the pairs are deleted, it should not affect the
index the attribute value is the key.
e.g.,
mail: abc
mail;en: abc
mail;fr: xyz
removing mail=abc or mail;en=abc, should not remove =abc from the
mail.db#.
This fix uses the value array evals to determine if the equality key
in the index should be deleted or not. The value array evals stores
the values of the attribute in the entry after the deletion is done.
If evals is empty, it means the to-be-deleted attribute value pair is
the only pair in the entry. Thus, the equality key can be removed fom
the index.
If evals has values, then the to-be-deleted attribute (curr_attr,
which was retrieved from the old entry) value needs to be checked if
it's in evals or not. If it is in evals, the equality key is still
used by other pair(s). So, leave it. Otherwise, the key can be
removed.
In the above example, let's assume removing mail=abc. evals holds
{"abc", "xyz"}. curr_attr abc is in evals, thus =abc will not be
removed.
|
commit abff3feacb218a7bb65a358dce2e9c90a2f185b1
Author: Noriko Hosoi <[email protected]>
Date: Mon Aug 10 17:36:36 2009 -0700
506786 Index maintenance mechanism causes wrong search results when
modifying attributes with subtypes
When there are identical attribute value pairs except subtypes exist
in an entry, if one of the pairs are deleted, it should not affect the
index the attribute value is the key.
e.g.,
mail: abc
mail;en: abc
mail;fr: xyz
removing mail=abc or mail;en=abc, should not remove =abc from the
mail.db#.
This fix uses the value array evals to determine if the equality key
in the index should be deleted or not. The value array evals stores
the values of the attribute in the entry after the deletion is done.
If evals is empty, it means the to-be-deleted attribute value pair is
the only pair in the entry. Thus, the equality key can be removed fom
the index.
If evals has values, then the to-be-deleted attribute (curr_attr,
which was retrieved from the old entry) value needs to be checked if
it's in evals or not. If it is in evals, the equality key is still
used by other pair(s). So, leave it. Otherwise, the key can be
removed.
In the above example, let's assume removing mail=abc. evals holds
{"abc", "xyz"}. curr_attr abc is in evals, thus =abc will not be
removed.
diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c
index 3c639c258..c4b905c2d 100644
--- a/ldap/servers/slapd/back-ldbm/index.c
+++ b/ldap/servers/slapd/back-ldbm/index.c
@@ -641,30 +641,34 @@ index_add_mods(
* for this attribute?)
*/
if (evals == NULL || evals[0] == NULL) {
- flags = BE_INDEX_DEL|BE_INDEX_PRESENCE;
+ /* The new entry newe does not have the attribute at all
+ * including the one with subtypes. Thus it's safe to
+ * remove the presence and equality index.
+ */
+ flags = BE_INDEX_DEL|BE_INDEX_PRESENCE|BE_INDEX_EQUALITY;
} else {
flags = BE_INDEX_DEL;
- }
- /* If the same value doesn't exist in a subtype, set
- * BE_INDEX_EQUALITY flag so the equality index is
- * removed.
- */
- slapi_entry_attr_find( newe->ep_entry, mods[i]->mod_type, &curr_attr);
- if (curr_attr) {
- for (j = 0; mods_valueArray[j] != NULL; j++ ) {
- if ( valuearray_find(curr_attr, evals, mods_valueArray[j]) == -1 ) {
- if (!(flags & BE_INDEX_EQUALITY)) {
- flags |= BE_INDEX_EQUALITY;
+ /* If the same value doesn't exist in a subtype, set
+ * BE_INDEX_EQUALITY flag so the equality index is
+ * removed.
+ */
+ slapi_entry_attr_find( olde->ep_entry, mods[i]->mod_type, &curr_attr );
+ if (curr_attr) {
+ int found = 0;
+ for (j = 0; mods_valueArray[j] != NULL; j++ ) {
+ if ( valuearray_find(curr_attr, evals, mods_valueArray[j]) > -1 ) {
+ found = 1;
}
}
- }
- } else {
- /* If we didn't find the attribute in the new
- * entry, we should remove the equality index. */
- if (!(flags & BE_INDEX_EQUALITY)) {
- flags |= BE_INDEX_EQUALITY;
- }
+ /*
+ * to-be-deleted curr_attr does not exist in the
+ * new value set evals. So, we can remove it.
+ */
+ if (!found && !(flags & BE_INDEX_EQUALITY)) {
+ flags |= BE_INDEX_EQUALITY;
+ }
+ }
}
rc = index_addordel_values_sv( be, basetype,
| 0 |
f3c13129604ce877a9cb078c2536f8c2bef15cb7
|
389ds/389-ds-base
|
Issue 4459 - lib389 - Default paths should use dse.ldif if the server is down
Bug Description: If a custom path is used for something like the backup directory,
dsctl will still use the default path from defaults.inf.
Fix Description: When initializing the default Paths consult dse.ldif for some
of the paths.
relates: https://github.com/389ds/389-ds-base/issues/4459
Reviewed by: firstyear(Thanks!)
|
commit f3c13129604ce877a9cb078c2536f8c2bef15cb7
Author: Mark Reynolds <[email protected]>
Date: Wed Mar 10 10:22:11 2021 -0500
Issue 4459 - lib389 - Default paths should use dse.ldif if the server is down
Bug Description: If a custom path is used for something like the backup directory,
dsctl will still use the default path from defaults.inf.
Fix Description: When initializing the default Paths consult dse.ldif for some
of the paths.
relates: https://github.com/389ds/389-ds-base/issues/4459
Reviewed by: firstyear(Thanks!)
diff --git a/dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py b/dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py
new file mode 100644
index 000000000..a0f89defd
--- /dev/null
+++ b/dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py
@@ -0,0 +1,56 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2021 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+
+import logging
+import pytest
+import os
+from lib389._constants import *
+from lib389.topologies import topology_st as topo
+
+log = logging.getLogger(__name__)
+
+
+def test_custom_path(topo):
+ """Test that a custom path, backup directory, is correctly used by lib389
+ when the server is stopped.
+
+ :id: 8659e209-ee83-477e-8183-1d2f555669ea
+ :setup: Standalone Instance
+ :steps:
+ 1. Get the LDIF directory
+ 2. Change the server's backup directory to the LDIF directory
+ 3. Stop the server, and perform a backup
+ 4. Backup was written to LDIF directory
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ # Get LDIF dir
+ ldif_dir = topo.standalone.get_ldif_dir()
+
+ # Set backup directory to LDIF directory
+ topo.standalone.config.replace('nsslapd-bakdir', ldif_dir)
+
+ # Stop the server and take a backup
+ topo.standalone.stop()
+ topo.standalone.db2bak(None)
+
+ # Verify backup was written to LDIF directory
+ backups = os.listdir(ldif_dir)
+ assert len(backups)
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main(["-s", CURRENT_FILE])
+
diff --git a/src/lib389/lib389/paths.py b/src/lib389/lib389/paths.py
index b5ab2a30d..d668168ca 100644
--- a/src/lib389/lib389/paths.py
+++ b/src/lib389/lib389/paths.py
@@ -1,14 +1,14 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2016 Red Hat, Inc.
+# Copyright (C) 2021 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
+import ldif
import sys
import os
-
from lib389._constants import DIRSRV_STATE_ONLINE, DSRC_CONTAINER
MAJOR, MINOR, _, _, _ = sys.version_info
@@ -87,6 +87,20 @@ CONFIG_MAP = {
'version': ('', 'vendorVersion'),
}
+DSE_MAP = {
+ 'nsslapd-bakdir': 'backup_dir',
+ 'nsslapd-schemadir': 'schema_dir',
+ 'nsslapd-certdir': 'cert_dir',
+ 'nsslapd-lockdir': 'lock_dir',
+ 'nsslapd-ldifdir': 'ldif_dir',
+ 'nsslapd-bakdir': 'backup_dir',
+ 'nsslapd-errorlog': 'error_log',
+ 'nsslapd-accesslog': 'access_log',
+ 'nsslapd-auditlog': 'audit_log',
+ 'nsslapd-ldapifilepath': 'ldapi',
+ 'nsslapd-instancedir': 'inst_dir',
+}
+
SECTION = 'slapd'
@@ -142,6 +156,39 @@ class Paths(object):
self._config.set(SECTION, "ldapi", "/data/run/slapd-localhost.socket")
self._defaults_cached = True
+ # Now check the dse.ldif (if present) to see if custom paths were set
+ if self._serverid:
+ # Get the dse.ldif from the instance name
+ prefix = os.environ.get('PREFIX', ""),
+ if self._serverid.startswith("slapd-"):
+ self._serverid = self._serverid.replace("slapd-", "", 1)
+ dsepath = "{}/etc/dirsrv/slapd-{}/dse.ldif".format(prefix[0], self._serverid)
+ elif self._instance is not None:
+ ds_paths = Paths(self._instance.serverid, None)
+ dsepath = os.path.join(ds_paths.config_dir, 'dse.ldif')
+ else:
+ # Nothing else to do but return
+ return
+
+ try:
+ from lib389.utils import ensure_str # prevents circular import errors
+ with open(dsepath, 'r') as file_dse:
+ dse_parser = ldif.LDIFRecordList(file_dse, max_entries=2)
+ if dse_parser is None:
+ return
+ dse_parser.parse()
+ if dse_parser.all_records is None:
+ return
+ # We have the config, start processing the DSE_MAP
+ config = dse_parser.all_records[1] # cn=config
+ attrs = config[1]
+ for attr in DSE_MAP.keys():
+ if attr in attrs.keys():
+ self._config.set(SECTION, DSE_MAP[attr], ensure_str(attrs[attr][0]))
+ except:
+ # No dse.ldif or can't read it, no problem just skip it
+ pass
+
def _validate_defaults(self):
if self._defaults_cached is False:
return False
| 0 |
84a82b32683c3fde43074661b2b99fc2e93e8830
|
389ds/389-ds-base
|
Bug 616500 - fix coverity Defect Type: Resource leaks issues CID 12094 - 12136
https://bugzilla.redhat.com/show_bug.cgi?id=616500
coverity ID 12099
Comment:
When connection_threadmain receives SHUTDOWN, the function was
returning without destroying the pblock. This patch destroys
it before calling return.
|
commit 84a82b32683c3fde43074661b2b99fc2e93e8830
Author: Noriko Hosoi <[email protected]>
Date: Thu Aug 19 15:14:50 2010 -0700
Bug 616500 - fix coverity Defect Type: Resource leaks issues CID 12094 - 12136
https://bugzilla.redhat.com/show_bug.cgi?id=616500
coverity ID 12099
Comment:
When connection_threadmain receives SHUTDOWN, the function was
returning without destroying the pblock. This patch destroys
it before calling return.
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index d4af04c6a..4de41f267 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -2148,6 +2148,7 @@ connection_threadmain()
int ret = 0;
int more_data = 0;
int replication_connection = 0; /* If this connection is from a replication supplier, we want to ensure that operation processing is serialized */
+ int doshutdown = 0;
#if defined( OSF1 ) || defined( hpux )
/* Arrange to ignore SIGPIPE signals. */
@@ -2252,9 +2253,10 @@ connection_threadmain()
goto done;
case CONN_SHUTDOWN:
LDAPDebug( LDAP_DEBUG_TRACE,
- "op_thread received shutdown signal\n", 0, 0, 0 );
+ "op_thread received shutdown signal\n", 0, 0, 0 );
g_decr_active_threadcnt();
- return;
+ doshutdown = 1;
+ goto done; /* To destroy pb, jump to done once */
default:
break;
}
@@ -2349,6 +2351,9 @@ done:
if ( ((1 == is_timedout) || (replication_connection && !thread_turbo_flag)) && !more_data)
connection_make_readable(conn);
pb = NULL;
+ if (doshutdown) {
+ return;
+ }
if (!thread_turbo_flag && !more_data) { /* Don't do this in turbo mode */
PR_Lock( conn->c_mutex );
| 0 |
19e49e69124ff19530a584f90808aa652a4c686f
|
389ds/389-ds-base
|
Trac Ticket #500 - Newly created users with
organizationalPerson objectClass fails to sync from AD to DS with missing attribute error
https://fedorahosted.org/389/ticket/500
Bug description: Posix Account objectclass requires homeDirectory,
uidNumber, and gidNumber. When an AD entry has just some of these
attributes or other allow-to-have attributes, i.e., loginShell or
gecos, the entry is incompletely converted to Posix Account entry
and fails to be added due to the missing attribute error.
Fix description: Before transforming the AD entry to the DS posix
account entry, check the required attributes first. If any of the
above 3 attributes is missing, all of the posix account related
attributes are dropped and added to the DS as a non-posix account
entry. If the PLUGIN log level is set, this type of message is
logged in the error log.
[] posix-winsync - AD entry CN=<CN>,OU=<OU>,DC=<DC>,DC=<COM> does
not have required attribute uidNumber for posixAccount objectclass.
|
commit 19e49e69124ff19530a584f90808aa652a4c686f
Author: Noriko Hosoi <[email protected]>
Date: Tue Nov 13 11:21:26 2012 -0800
Trac Ticket #500 - Newly created users with
organizationalPerson objectClass fails to sync from AD to DS with missing attribute error
https://fedorahosted.org/389/ticket/500
Bug description: Posix Account objectclass requires homeDirectory,
uidNumber, and gidNumber. When an AD entry has just some of these
attributes or other allow-to-have attributes, i.e., loginShell or
gecos, the entry is incompletely converted to Posix Account entry
and fails to be added due to the missing attribute error.
Fix description: Before transforming the AD entry to the DS posix
account entry, check the required attributes first. If any of the
above 3 attributes is missing, all of the posix account related
attributes are dropped and added to the DS as a non-posix account
entry. If the PLUGIN log level is set, this type of message is
logged in the error log.
[] posix-winsync - AD entry CN=<CN>,OU=<OU>,DC=<DC>,DC=<COM> does
not have required attribute uidNumber for posixAccount objectclass.
diff --git a/ldap/servers/plugins/posix-winsync/posix-winsync.c b/ldap/servers/plugins/posix-winsync/posix-winsync.c
index aa292c33e..92a3a79b7 100644
--- a/ldap/servers/plugins/posix-winsync/posix-winsync.c
+++ b/ldap/servers/plugins/posix-winsync/posix-winsync.c
@@ -86,14 +86,17 @@ typedef struct _windows_attr_map
{
char *windows_attribute_name;
char *ldap_attribute_name;
+ int isMUST; /* schema: required attribute */
} windows_attribute_map;
-static windows_attribute_map user_attribute_map[] = { { "unixHomeDirectory", "homeDirectory" },
- { "loginShell", "loginShell" },
- { "uidNumber", "uidNumber" },
- { "gidNumber", "gidNumber" },
- { "gecos", "gecos" },
- { NULL, NULL } };
+static windows_attribute_map user_attribute_map[] = {
+ { "unixHomeDirectory", "homeDirectory", 1 },
+ { "loginShell", "loginShell", 0 },
+ { "uidNumber", "uidNumber", 1 },
+ { "gidNumber", "gidNumber", 1 },
+ { "gecos", "gecos", 0 },
+ { NULL, NULL, 0 }
+};
static windows_attribute_map user_mssfu_attribute_map[] =
{ { "msSFU30homedirectory", "homeDirectory" },
@@ -751,7 +754,9 @@ posix_winsync_pre_ds_mod_user_cb(void *cbdata, const Slapi_Entry *rawentry, Slap
int is_present_local = 0;
int do_modify_local = 0;
int rc;
+ int i;
windows_attribute_map *attr_map = user_attribute_map;
+ PRBool posixval = PR_TRUE;
if (posix_winsync_config_get_msSFUSchema())
attr_map = user_mssfu_attribute_map;
@@ -759,15 +764,33 @@ posix_winsync_pre_ds_mod_user_cb(void *cbdata, const Slapi_Entry *rawentry, Slap
slapi_log_error(SLAPI_LOG_PLUGIN, posix_winsync_plugin_name,
"--> _pre_ds_mod_user_cb -- begin\n");
+ /* check all of the required attributes are in the ad_entry:
+ * MUST (cn $ uid $ uidNumber $ gidNumber $ homeDirectory).
+ * If any of the required attributes are missing, drop them before adding
+ * the entry to the DS. */
+ for (i = 0; attr_map[i].windows_attribute_name != NULL; i++) {
+ Slapi_Attr *pa_attr;
+ if (attr_map[i].isMUST &&
+ slapi_entry_attr_find(ad_entry,
+ attr_map[i].windows_attribute_name,
+ &pa_attr)) {
+ /* required attribute does not exist */
+ posixval = PR_FALSE;
+ slapi_log_error(SLAPI_LOG_PLUGIN, posix_winsync_plugin_name,
+ "AD entry %s does not have required attribute %s for posixAccount objectclass.\n",
+ slapi_entry_get_dn_const(ad_entry),
+ attr_map[i].ldap_attribute_name);
+ }
+ }
+
/* add objectclass: posixAccount, uidnumber ,gidnumber ,homeDirectory, loginshell */
/* in the ad to ds case we have no changelog, so we have to compare the entries */
for (rc = slapi_entry_first_attr(ad_entry, &attr); rc == 0;
rc = slapi_entry_next_attr(ad_entry, attr, &attr)) {
char *type = NULL;
- size_t i = 0;
slapi_attr_get_type(attr, &type);
- for (; attr_map[i].windows_attribute_name != NULL; i++) {
+ for (i = 0; attr_map[i].windows_attribute_name != NULL; i++) {
if (0 == slapi_attr_type_cmp(type, attr_map[i].windows_attribute_name,
SLAPI_TYPE_CMP_SUBTYPE)) {
Slapi_Attr *local_attr = NULL;
@@ -779,7 +802,10 @@ posix_winsync_pre_ds_mod_user_cb(void *cbdata, const Slapi_Entry *rawentry, Slap
slapi_entry_attr_find(ds_entry, local_type, &local_attr);
is_present_local = (NULL == local_attr) ? 0 : 1;
if (is_present_local) {
+ /* DS entry has the posix attrs.
+ * I.e., it is a posix account*/
int values_equal = 0;
+ posixval = PR_TRUE;
values_equal = attr_compare_equal(attr, local_attr);
if (!values_equal) {
slapi_log_error(SLAPI_LOG_PLUGIN, posix_winsync_plugin_name,
@@ -791,8 +817,8 @@ posix_winsync_pre_ds_mod_user_cb(void *cbdata, const Slapi_Entry *rawentry, Slap
valueset_get_valuearray(vs));
*do_modify = 1;
}
- } else {
-
+ } else if (posixval) {
+ /* only if AD provides the all necessary attributes */
slapi_mods_add_mod_values(smods, LDAP_MOD_ADD, local_type,
valueset_get_valuearray(vs));
*do_modify = do_modify_local = 1;
@@ -804,10 +830,11 @@ posix_winsync_pre_ds_mod_user_cb(void *cbdata, const Slapi_Entry *rawentry, Slap
}
}
slapi_log_error(SLAPI_LOG_PLUGIN, posix_winsync_plugin_name,
- "<-- _pre_ds_mod_user_cb present %d modify %d\n", is_present_local,
- do_modify_local);
+ "<-- _pre_ds_mod_user_cb present %d modify %d isPosixaccount %s\n",
+ is_present_local, do_modify_local,
+ posixval?"yes":"no");
- if (!is_present_local && do_modify_local) {
+ if (!is_present_local && do_modify_local && posixval) {
Slapi_Attr *oc_attr = NULL;
Slapi_Value *voc = slapi_value_new();
@@ -988,8 +1015,9 @@ posix_winsync_pre_ds_add_user_cb(void *cbdata, const Slapi_Entry *rawentry, Slap
{
Slapi_Attr *attr = NULL;
char *type = NULL;
- PRBool posixval = PR_FALSE;
+ PRBool posixval = PR_TRUE;
windows_attribute_map *attr_map = user_attribute_map;
+ int i = 0;
if (posix_winsync_config_get_msSFUSchema())
attr_map = user_mssfu_attribute_map;
@@ -998,42 +1026,65 @@ posix_winsync_pre_ds_add_user_cb(void *cbdata, const Slapi_Entry *rawentry, Slap
slapi_log_error(SLAPI_LOG_PLUGIN, posix_winsync_plugin_name,
"--> _pre_ds_add_user_cb -- begin\n");
- for (slapi_entry_first_attr(ad_entry, &attr); attr; slapi_entry_next_attr(ad_entry, attr, &attr)) {
- size_t i = 0;
-
- slapi_attr_get_type(attr, &type);
- if (!type) {
- continue;
+ /* check all of the required attributes are in the ad_entry:
+ * MUST (cn $ uid $ uidNumber $ gidNumber $ homeDirectory).
+ * If any of the required attributes are missing, drop them before adding
+ * the entry to the DS. */
+ for (i = 0; attr_map[i].windows_attribute_name != NULL; i++) {
+ Slapi_Attr *pa_attr;
+ if (attr_map[i].isMUST &&
+ slapi_entry_attr_find(ad_entry,
+ attr_map[i].windows_attribute_name,
+ &pa_attr)) {
+ /* required attribute does not exist */
+ posixval = PR_FALSE;
+ slapi_log_error(SLAPI_LOG_PLUGIN, posix_winsync_plugin_name,
+ "AD entry %s does not have required attribute %s for posixAccount objectclass.\n",
+ slapi_entry_get_dn_const(ad_entry),
+ attr_map[i].ldap_attribute_name);
}
+ }
- slapi_log_error(SLAPI_LOG_PLUGIN, posix_winsync_plugin_name, "--> _pre_ds_add_user_cb -- "
- "look for [%s] to new entry [%s]\n", type, slapi_entry_get_dn_const(ds_entry));
- for (; attr_map[i].windows_attribute_name != NULL; i++) {
- if (slapi_attr_type_cmp(attr_map[i].windows_attribute_name, type,
- SLAPI_TYPE_CMP_SUBTYPE) == 0) {
- Slapi_ValueSet *svs = NULL;
- slapi_attr_get_valueset(attr, &svs);
- slapi_entry_add_valueset(ds_entry, attr_map[i].ldap_attribute_name, svs);
- slapi_valueset_free(svs);
+ /* converts the AD attributes to DS posix attribute if all the posix
+ * required attributes are available */
+ if (posixval) {
+ int rc;
+ for (slapi_entry_first_attr(ad_entry, &attr); attr;
+ slapi_entry_next_attr(ad_entry, attr, &attr)) {
- slapi_log_error(SLAPI_LOG_PLUGIN, posix_winsync_plugin_name,
- "--> _pre_ds_add_user_cb -- "
- "adding val for [%s] to new entry [%s]\n", type,
- slapi_entry_get_dn_const(ds_entry));
- posixval = PR_TRUE;
+ slapi_attr_get_type(attr, &type);
+ if (!type) {
+ continue;
+ }
+
+ slapi_log_error(SLAPI_LOG_PLUGIN, posix_winsync_plugin_name,
+ "--> _pre_ds_add_user_cb -- "
+ "look for [%s] to new entry [%s]\n",
+ type, slapi_entry_get_dn_const(ds_entry));
+ for (i = 0; attr_map[i].windows_attribute_name != NULL; i++) {
+ if (slapi_attr_type_cmp(attr_map[i].windows_attribute_name,
+ type, SLAPI_TYPE_CMP_SUBTYPE) == 0) {
+ Slapi_ValueSet *svs = NULL;
+ slapi_attr_get_valueset(attr, &svs);
+ slapi_entry_add_valueset(ds_entry,
+ attr_map[i].ldap_attribute_name, svs);
+ slapi_valueset_free(svs);
+
+ slapi_log_error(SLAPI_LOG_PLUGIN, posix_winsync_plugin_name,
+ "--> _pre_ds_add_user_cb -- "
+ "adding val for [%s] to new entry [%s]\n",
+ type, slapi_entry_get_dn_const(ds_entry));
+ }
}
}
- }
- if (posixval) {
- int rc;
rc = slapi_entry_add_string(ds_entry, "objectClass", "posixAccount");
rc |= slapi_entry_add_string(ds_entry, "objectClass", "shadowAccount");
rc |= slapi_entry_add_string(ds_entry, "objectClass", "inetUser");
- if (rc != 0)
+ if (rc != 0) {
slapi_log_error(SLAPI_LOG_PLUGIN, posix_winsync_plugin_name,
"<-- _pre_ds_add_user_cb -- adding objectclass for new entry failed %d\n",
rc);
- else {
+ } else {
if (posix_winsync_config_get_mapNestedGrouping()) {
memberUidLock();
addUserToGroupMembership(ds_entry);
diff --git a/ldap/servers/slapd/mapping_tree.c b/ldap/servers/slapd/mapping_tree.c
index 237fc24a9..5de0e46b6 100644
--- a/ldap/servers/slapd/mapping_tree.c
+++ b/ldap/servers/slapd/mapping_tree.c
@@ -2451,7 +2451,7 @@ int slapi_mapping_tree_select_and_check(Slapi_PBlock *pb,char *newdn, Slapi_Back
{
ret = LDAP_AFFECTS_MULTIPLE_DSAS;
PR_snprintf(errorbuf, BUFSIZ,
- "Cannot move entries accross backends\n");
+ "Cannot move entries across backends\n");
goto unlock_and_return;
}
}
| 0 |
b6080f72b85a67114525ed844c4fbd4aa18f8611
|
389ds/389-ds-base
|
bump version to 1.2.10.a5
|
commit b6080f72b85a67114525ed844c4fbd4aa18f8611
Author: Rich Megginson <[email protected]>
Date: Fri Oct 7 21:27:36 2011 -0600
bump version to 1.2.10.a5
diff --git a/VERSION.sh b/VERSION.sh
index ad2faefaf..f0e4468ad 100644
--- a/VERSION.sh
+++ b/VERSION.sh
@@ -14,7 +14,7 @@ VERSION_MAINT=10
# if this is a PRERELEASE, set VERSION_PREREL
# otherwise, comment it out
# be sure to include the dot prefix in the prerel
-VERSION_PREREL=.a4
+VERSION_PREREL=.a5
# NOTES on VERSION_PREREL
# use aN for an alpha release e.g. a1, a2, etc.
# use rcN for a release candidate e.g. rc1, rc2, etc.
| 0 |
702807607636ef3eae63c716c3e3f174b81776c3
|
389ds/389-ds-base
|
Ticket #302 - use thread local storage for internalModifiersName & internalCreatorsName
Bug Description: use thread local storage for internalModifiersName & internalCreatorsName
Fix description: Created new thread local storage slapi functions for initializing, setting/getting
thread local storage data in a new file thread_data.c. This was built on
top of some of the changes for ticket 111.
We create the index in main.c right before we start the plugins and worker threads.
Then we set the bind dn in bind_credentials_set_nolock(), nad we also set the
thread data when we copy the operation in op_copy_indentity so can maintian the
bind dn through different threads from the same connection.
For plugins that create new threads we need to pass the new thread the bind dn(char *),
and then set the thread data(slapi_td_set_dn()).
https://fedorahosted.org/389/ticket/302
|
commit 702807607636ef3eae63c716c3e3f174b81776c3
Author: Mark Reynolds <[email protected]>
Date: Wed Feb 29 12:43:46 2012 -0500
Ticket #302 - use thread local storage for internalModifiersName & internalCreatorsName
Bug Description: use thread local storage for internalModifiersName & internalCreatorsName
Fix description: Created new thread local storage slapi functions for initializing, setting/getting
thread local storage data in a new file thread_data.c. This was built on
top of some of the changes for ticket 111.
We create the index in main.c right before we start the plugins and worker threads.
Then we set the bind dn in bind_credentials_set_nolock(), nad we also set the
thread data when we copy the operation in op_copy_indentity so can maintian the
bind dn through different threads from the same connection.
For plugins that create new threads we need to pass the new thread the bind dn(char *),
and then set the thread data(slapi_td_set_dn()).
https://fedorahosted.org/389/ticket/302
diff --git a/Makefile.am b/Makefile.am
index 787161dd2..521f4f4b2 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -685,6 +685,7 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \
ldap/servers/slapd/subentry.c \
ldap/servers/slapd/task.c \
ldap/servers/slapd/time.c \
+ ldap/servers/slapd/thread_data.c \
ldap/servers/slapd/uniqueid.c \
ldap/servers/slapd/uniqueidgen.c \
ldap/servers/slapd/utf8.c \
diff --git a/Makefile.in b/Makefile.in
index fc638dceb..3aaf49f57 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -644,12 +644,12 @@ am__libslapd_la_SOURCES_DIST = ldap/servers/slapd/add.c \
ldap/servers/slapd/snmp_collator.c ldap/servers/slapd/sort.c \
ldap/servers/slapd/ssl.c ldap/servers/slapd/str2filter.c \
ldap/servers/slapd/subentry.c ldap/servers/slapd/task.c \
- ldap/servers/slapd/time.c ldap/servers/slapd/uniqueid.c \
- ldap/servers/slapd/uniqueidgen.c ldap/servers/slapd/utf8.c \
- ldap/servers/slapd/utf8compare.c ldap/servers/slapd/util.c \
- ldap/servers/slapd/uuid.c ldap/servers/slapd/value.c \
- ldap/servers/slapd/valueset.c ldap/servers/slapd/vattr.c \
- ldap/libraries/libavl/avl.c \
+ ldap/servers/slapd/time.c ldap/servers/slapd/thread_data.c \
+ ldap/servers/slapd/uniqueid.c ldap/servers/slapd/uniqueidgen.c \
+ ldap/servers/slapd/utf8.c ldap/servers/slapd/utf8compare.c \
+ ldap/servers/slapd/util.c ldap/servers/slapd/uuid.c \
+ ldap/servers/slapd/value.c ldap/servers/slapd/valueset.c \
+ ldap/servers/slapd/vattr.c ldap/libraries/libavl/avl.c \
ldap/servers/slapd/slapi_counter_sunos_sparcv9.S
am__objects_2 = ldap/libraries/libavl/libslapd_la-avl.lo
@SOLARIS_TRUE@am__objects_3 = ldap/servers/slapd/libslapd_la-slapi_counter_sunos_sparcv9.lo
@@ -737,6 +737,7 @@ am_libslapd_la_OBJECTS = ldap/servers/slapd/libslapd_la-add.lo \
ldap/servers/slapd/libslapd_la-subentry.lo \
ldap/servers/slapd/libslapd_la-task.lo \
ldap/servers/slapd/libslapd_la-time.lo \
+ ldap/servers/slapd/libslapd_la-thread_data.lo \
ldap/servers/slapd/libslapd_la-uniqueid.lo \
ldap/servers/slapd/libslapd_la-uniqueidgen.lo \
ldap/servers/slapd/libslapd_la-utf8.lo \
@@ -1842,12 +1843,12 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \
ldap/servers/slapd/snmp_collator.c ldap/servers/slapd/sort.c \
ldap/servers/slapd/ssl.c ldap/servers/slapd/str2filter.c \
ldap/servers/slapd/subentry.c ldap/servers/slapd/task.c \
- ldap/servers/slapd/time.c ldap/servers/slapd/uniqueid.c \
- ldap/servers/slapd/uniqueidgen.c ldap/servers/slapd/utf8.c \
- ldap/servers/slapd/utf8compare.c ldap/servers/slapd/util.c \
- ldap/servers/slapd/uuid.c ldap/servers/slapd/value.c \
- ldap/servers/slapd/valueset.c ldap/servers/slapd/vattr.c \
- $(libavl_a_SOURCES) $(am__append_1)
+ ldap/servers/slapd/time.c ldap/servers/slapd/thread_data.c \
+ ldap/servers/slapd/uniqueid.c ldap/servers/slapd/uniqueidgen.c \
+ ldap/servers/slapd/utf8.c ldap/servers/slapd/utf8compare.c \
+ ldap/servers/slapd/util.c ldap/servers/slapd/uuid.c \
+ ldap/servers/slapd/value.c ldap/servers/slapd/valueset.c \
+ ldap/servers/slapd/vattr.c $(libavl_a_SOURCES) $(am__append_1)
libslapd_la_CPPFLAGS = $(PLUGIN_CPPFLAGS) @sasl_inc@ @db_inc@ @svrcore_inc@ @kerberos_inc@ @pcre_inc@
libslapd_la_LIBADD = $(LDAPSDK_LINK) $(SASL_LINK) $(SVRCORE_LINK) $(NSS_LINK) $(NSPR_LINK) $(KERBEROS_LINK) $(PCRE_LINK) $(THREADLIB)
@@ -4050,6 +4051,9 @@ ldap/servers/slapd/libslapd_la-task.lo: \
ldap/servers/slapd/libslapd_la-time.lo: \
ldap/servers/slapd/$(am__dirstamp) \
ldap/servers/slapd/$(DEPDIR)/$(am__dirstamp)
+ldap/servers/slapd/libslapd_la-thread_data.lo: \
+ ldap/servers/slapd/$(am__dirstamp) \
+ ldap/servers/slapd/$(DEPDIR)/$(am__dirstamp)
ldap/servers/slapd/libslapd_la-uniqueid.lo: \
ldap/servers/slapd/$(am__dirstamp) \
ldap/servers/slapd/$(DEPDIR)/$(am__dirstamp)
@@ -5360,6 +5364,8 @@ mostlyclean-compile:
-rm -f ldap/servers/slapd/libslapd_la-subentry.lo
-rm -f ldap/servers/slapd/libslapd_la-task.$(OBJEXT)
-rm -f ldap/servers/slapd/libslapd_la-task.lo
+ -rm -f ldap/servers/slapd/libslapd_la-thread_data.$(OBJEXT)
+ -rm -f ldap/servers/slapd/libslapd_la-thread_data.lo
-rm -f ldap/servers/slapd/libslapd_la-time.$(OBJEXT)
-rm -f ldap/servers/slapd/libslapd_la-time.lo
-rm -f ldap/servers/slapd/libslapd_la-uniqueid.$(OBJEXT)
@@ -5821,6 +5827,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-str2filter.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-subentry.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-task.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-thread_data.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-time.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-uniqueid.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-uniqueidgen.Plo@am__quote@
@@ -8344,6 +8351,13 @@ ldap/servers/slapd/libslapd_la-time.lo: ldap/servers/slapd/time.c
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libslapd_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/slapd/libslapd_la-time.lo `test -f 'ldap/servers/slapd/time.c' || echo '$(srcdir)/'`ldap/servers/slapd/time.c
+ldap/servers/slapd/libslapd_la-thread_data.lo: ldap/servers/slapd/thread_data.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libslapd_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/slapd/libslapd_la-thread_data.lo -MD -MP -MF ldap/servers/slapd/$(DEPDIR)/libslapd_la-thread_data.Tpo -c -o ldap/servers/slapd/libslapd_la-thread_data.lo `test -f 'ldap/servers/slapd/thread_data.c' || echo '$(srcdir)/'`ldap/servers/slapd/thread_data.c
+@am__fastdepCC_TRUE@ $(am__mv) ldap/servers/slapd/$(DEPDIR)/libslapd_la-thread_data.Tpo ldap/servers/slapd/$(DEPDIR)/libslapd_la-thread_data.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ldap/servers/slapd/thread_data.c' object='ldap/servers/slapd/libslapd_la-thread_data.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libslapd_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/slapd/libslapd_la-thread_data.lo `test -f 'ldap/servers/slapd/thread_data.c' || echo '$(srcdir)/'`ldap/servers/slapd/thread_data.c
+
ldap/servers/slapd/libslapd_la-uniqueid.lo: ldap/servers/slapd/uniqueid.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libslapd_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/slapd/libslapd_la-uniqueid.lo -MD -MP -MF ldap/servers/slapd/$(DEPDIR)/libslapd_la-uniqueid.Tpo -c -o ldap/servers/slapd/libslapd_la-uniqueid.lo `test -f 'ldap/servers/slapd/uniqueid.c' || echo '$(srcdir)/'`ldap/servers/slapd/uniqueid.c
@am__fastdepCC_TRUE@ $(am__mv) ldap/servers/slapd/$(DEPDIR)/libslapd_la-uniqueid.Tpo ldap/servers/slapd/$(DEPDIR)/libslapd_la-uniqueid.Plo
diff --git a/ldap/servers/plugins/linkedattrs/fixup_task.c b/ldap/servers/plugins/linkedattrs/fixup_task.c
index 10c821b2e..b19d3ab5c 100644
--- a/ldap/servers/plugins/linkedattrs/fixup_task.c
+++ b/ldap/servers/plugins/linkedattrs/fixup_task.c
@@ -67,6 +67,7 @@ linked_attrs_fixup_task_add(Slapi_PBlock *pb, Slapi_Entry *e,
task_data *mytaskdata = NULL;
Slapi_Task *task = NULL;
const char *linkdn = NULL;
+ char *bind_dn;
*returncode = LDAP_SUCCESS;
@@ -81,6 +82,7 @@ linked_attrs_fixup_task_add(Slapi_PBlock *pb, Slapi_Entry *e,
linkdn = fetch_attr(e, "linkdn", 0);
/* setup our task data */
+ slapi_pblock_get(pb, SLAPI_REQUESTOR_DN, &bind_dn);
mytaskdata = (task_data*)slapi_ch_calloc(1, sizeof(task_data));
if (mytaskdata == NULL) {
*returncode = LDAP_OPERATIONS_ERROR;
@@ -91,6 +93,7 @@ linked_attrs_fixup_task_add(Slapi_PBlock *pb, Slapi_Entry *e,
if (linkdn) {
mytaskdata->linkdn = slapi_dn_normalize(slapi_ch_strdup(linkdn));
}
+ mytaskdata->bind_dn = slapi_ch_strdup(bind_dn);
/* allocate new task now */
task = slapi_new_task(slapi_entry_get_ndn(e));
@@ -126,6 +129,7 @@ linked_attrs_fixup_task_destructor(Slapi_Task *task)
task_data *mydata = (task_data *)slapi_task_get_data(task);
if (mydata) {
slapi_ch_free_string(&mydata->linkdn);
+ slapi_ch_free_string(&mydata->bind_dn);
/* Need to cast to avoid a compiler warning */
slapi_ch_free((void **)&mydata);
}
@@ -144,6 +148,9 @@ linked_attrs_fixup_task_thread(void *arg)
/* Fetch our task data from the task */
td = (task_data *)slapi_task_get_data(task);
+ /* init and set the bind dn in the thread data */
+ slapi_td_set_dn(slapi_ch_strdup(td->bind_dn));
+
/* Log started message. */
slapi_task_begin(task, 1);
slapi_task_log_notice(task, "Linked attributes fixup task starting (link dn: \"%s\") ...\n",
diff --git a/ldap/servers/plugins/linkedattrs/linked_attrs.h b/ldap/servers/plugins/linkedattrs/linked_attrs.h
index 137e31723..2eba62f36 100644
--- a/ldap/servers/plugins/linkedattrs/linked_attrs.h
+++ b/ldap/servers/plugins/linkedattrs/linked_attrs.h
@@ -100,6 +100,7 @@ struct configIndex {
typedef struct _task_data
{
char *linkdn;
+ char *bind_dn;
} task_data;
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
index 56e34d484..655054c9b 100644
--- a/ldap/servers/plugins/memberof/memberof.c
+++ b/ldap/servers/plugins/memberof/memberof.c
@@ -2195,6 +2195,7 @@ void memberof_unlock()
typedef struct _task_data
{
char *dn;
+ char *bind_dn;
char *filter_str;
} task_data;
@@ -2208,6 +2209,9 @@ void memberof_fixup_task_thread(void *arg)
/* Fetch our task data from the task */
td = (task_data *)slapi_task_get_data(task);
+ /* set bind DN in the thread data */
+ slapi_td_set_dn(slapi_ch_strdup(td->bind_dn));
+
slapi_task_begin(task, 1);
slapi_task_log_notice(task, "Memberof task starts (arg: %s) ...\n",
td->filter_str);
@@ -2263,6 +2267,7 @@ int memberof_task_add(Slapi_PBlock *pb, Slapi_Entry *e,
int rv = SLAPI_DSE_CALLBACK_OK;
task_data *mytaskdata = NULL;
Slapi_Task *task = NULL;
+ char *bind_dn;
const char *filter;
const char *dn = 0;
@@ -2292,6 +2297,7 @@ int memberof_task_add(Slapi_PBlock *pb, Slapi_Entry *e,
}
/* setup our task data */
+ slapi_pblock_get(pb, SLAPI_REQUESTOR_DN, &bind_dn);
mytaskdata = (task_data*)slapi_ch_malloc(sizeof(task_data));
if (mytaskdata == NULL)
{
@@ -2301,6 +2307,7 @@ int memberof_task_add(Slapi_PBlock *pb, Slapi_Entry *e,
}
mytaskdata->dn = slapi_ch_strdup(dn);
mytaskdata->filter_str = slapi_ch_strdup(filter);
+ mytaskdata->bind_dn = slapi_ch_strdup(bind_dn);
/* allocate new task now */
task = slapi_new_task(slapi_entry_get_ndn(e));
@@ -2337,6 +2344,7 @@ memberof_task_destructor(Slapi_Task *task)
task_data *mydata = (task_data *)slapi_task_get_data(task);
if (mydata) {
slapi_ch_free_string(&mydata->dn);
+ slapi_ch_free_string(&mydata->bind_dn);
slapi_ch_free_string(&mydata->filter_str);
/* Need to cast to avoid a compiler warning */
slapi_ch_free((void **)&mydata);
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
index 8ece43894..fcb70fb13 100644
--- a/ldap/servers/plugins/referint/referint.c
+++ b/ldap/servers/plugins/referint/referint.c
@@ -80,7 +80,7 @@ int referint_postop_close( Slapi_PBlock *pb);
int update_integrity(char **argv, Slapi_DN *sDN, char *newrDN, Slapi_DN *newsuperior, int logChanges);
void referint_thread_func(void *arg);
int GetNextLine(char *dest, int size_dest, PRFileDesc *stream);
-void writeintegritylog(char *logfilename, Slapi_DN *sdn, char *newrdn, Slapi_DN *newsuperior, Slapi_DN *requestorsdn);
+void writeintegritylog(Slapi_PBlock *pb, char *logfilename, Slapi_DN *sdn, char *newrdn, Slapi_DN *newsuperior, Slapi_DN *requestorsdn);
int my_fgetc(PRFileDesc *stream);
/* global thread control stuff */
@@ -216,7 +216,7 @@ referint_postop_del( Slapi_PBlock *pb )
rc = update_integrity(argv, sdn, NULL, NULL, logChanges);
}else{
/* write the entry to integrity log */
- writeintegritylog(argv[1], sdn, NULL, NULL, NULL /* slapi_get_requestor_sdn(pb) */);
+ writeintegritylog(pb, argv[1], sdn, NULL, NULL, NULL /* slapi_get_requestor_sdn(pb) */);
rc = 0;
}
} else {
@@ -300,7 +300,7 @@ referint_postop_modrdn( Slapi_PBlock *pb )
newsuperior, logChanges);
}else{
/* write the entry to integrity log */
- writeintegritylog(argv[1], sdn, newrdn, newsuperior, NULL /* slapi_get_requestor_sdn(pb) */);
+ writeintegritylog(pb, argv[1], sdn, newrdn, newsuperior, NULL /* slapi_get_requestor_sdn(pb) */);
rc = 0;
}
@@ -913,6 +913,11 @@ referint_thread_func(void *arg)
return;
}
+ /* initialize the thread data index
+ if(slapi_td_dn_init()){
+ slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM,"Failed to create thread data index\n");
+
+ } */
delay = atoi(plugin_argv[0]);
logfilename = plugin_argv[1];
@@ -978,6 +983,13 @@ referint_thread_func(void *arg)
} else {
tmpsuperior = slapi_sdn_new_normdn_byref(ptoken);
}
+ ptoken = ldap_utf8strtok_r (NULL, delimiter, &iter);
+ if (strcasecmp(ptoken, "NULL") != 0) {
+ /* Set the bind DN in the thread data */
+ if(slapi_td_set_dn(slapi_ch_strdup(ptoken))){
+ slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM,"Failed to set thread data\n");
+ }
+ }
update_integrity(plugin_argv, sdn, tmprdn,
tmpsuperior, logChanges);
@@ -1097,7 +1109,7 @@ GetNextLine(char *dest, int size_dest, PRFileDesc *stream) {
}
void
-writeintegritylog(char *logfilename, Slapi_DN *sdn,
+writeintegritylog(Slapi_PBlock *pb, char *logfilename, Slapi_DN *sdn,
char *newrdn, Slapi_DN *newsuperior, Slapi_DN *requestorsdn)
{
PRFileDesc *prfd;
@@ -1147,6 +1159,7 @@ writeintegritylog(char *logfilename, Slapi_DN *sdn,
/* add the length of the newsuperior */
len_to_write += slapi_sdn_get_ndn_len(newsuperior);
}
+ slapi_pblock_get(pb, SLAPI_REQUESTOR_DN, &requestordn);
if (requestorsdn && (requestordn = slapi_sdn_get_udn(requestorsdn)) &&
(reqdn_len = strlen(requestordn))) {
len_to_write += reqdn_len;
diff --git a/ldap/servers/plugins/schema_reload/schema_reload.c b/ldap/servers/plugins/schema_reload/schema_reload.c
index 7aeab7c83..338aaf4dc 100644
--- a/ldap/servers/plugins/schema_reload/schema_reload.c
+++ b/ldap/servers/plugins/schema_reload/schema_reload.c
@@ -121,6 +121,12 @@ schemareload_start(Slapi_PBlock *pb)
return rc;
}
+typedef struct _task_data
+{
+ char *schemadir;
+ char *bind_dn;
+} task_data;
+
/*
* Task thread
* This is the heart of the reload-schema-file task:
@@ -134,16 +140,21 @@ schemareload_thread(void *arg)
Slapi_Task *task = (Slapi_Task *)arg;
int rv = 0;
int total_work = 2;
- /* fetch our argument from the task */
- char *schemadir = (char *)slapi_task_get_data(task);
+ task_data *td = NULL;
+
+ /* Fetch our task data from the task */
+ td = (task_data *)slapi_task_get_data(task);
+
+ /* Initialize and set the bind dn in the thread data */
+ slapi_td_set_dn(slapi_ch_strdup(td->bind_dn));
/* update task state to show it's running */
slapi_task_begin(task, total_work);
PR_Lock(schemareload_lock); /* make schema reload serialized */
- slapi_task_log_notice(task, "Schema reload task starts (schema dir: %s) ...\n", schemadir?schemadir:"default");
- slapi_log_error(SLAPI_LOG_FATAL, "schemareload", "Schema reload task starts (schema dir: %s) ...\n", schemadir?schemadir:"default");
+ slapi_task_log_notice(task, "Schema reload task starts (schema dir: %s) ...\n", td->schemadir?td->schemadir:"default");
+ slapi_log_error(SLAPI_LOG_FATAL, "schemareload", "Schema reload task starts (schema dir: %s) ...\n", td->schemadir?td->schemadir:"default");
- rv = slapi_validate_schema_files(schemadir);
+ rv = slapi_validate_schema_files(td->schemadir);
slapi_task_inc_progress(task);
if (LDAP_SUCCESS == rv) {
@@ -151,7 +162,7 @@ schemareload_thread(void *arg)
slapi_task_log_status(task, "Schema validation passed.");
slapi_log_error(SLAPI_LOG_FATAL, "schemareload", "Schema validation passed.\n");
- rv = slapi_reload_schema_files(schemadir);
+ rv = slapi_reload_schema_files(td->schemadir);
slapi_task_inc_progress(task);
/* update task state to say we're finished */
@@ -196,8 +207,13 @@ static void
schemareload_destructor(Slapi_Task *task)
{
if (task) {
- char *schemadir = (char *)slapi_task_get_data(task);
- slapi_ch_free_string(&schemadir);
+ task_data *mydata = (task_data *)slapi_task_get_data(task);
+ if (mydata) {
+ slapi_ch_free_string(&mydata->schemadir);
+ slapi_ch_free_string(&mydata->bind_dn);
+ /* Need to cast to avoid a compiler warning */
+ slapi_ch_free((void **)&mydata);
+ }
}
}
@@ -215,6 +231,9 @@ schemareload_add(Slapi_PBlock *pb, Slapi_Entry *e,
const char *schemadir = NULL;
int rv = SLAPI_DSE_CALLBACK_OK;
Slapi_Task *task = NULL;
+ task_data *mytaskdata = NULL;
+
+ char *bind_dn;
*returncode = LDAP_SUCCESS;
if (fetch_attr(e, "cn", NULL) == NULL) {
@@ -223,6 +242,9 @@ schemareload_add(Slapi_PBlock *pb, Slapi_Entry *e,
goto out;
}
+ /* get the requestor dn for our thread data*/
+ slapi_pblock_get(pb, SLAPI_REQUESTOR_DN, &bind_dn);
+
/* get arg(s) */
schemadir = fetch_attr(e, "schemadir", NULL);
@@ -235,11 +257,14 @@ schemareload_add(Slapi_PBlock *pb, Slapi_Entry *e,
goto out;
}
+ mytaskdata->schemadir = slapi_ch_strdup(schemadir);
+ mytaskdata->bind_dn = slapi_ch_strdup(bind_dn);
+
/* set a destructor that will clean up schemadir for us when the task is complete */
slapi_task_set_destructor_fn(task, schemareload_destructor);
- /* Stash our argument in the task for use by the task thread */
- slapi_task_set_data(task, slapi_ch_strdup(schemadir));
+ /* Stash our task_data for use by the task thread */
+ slapi_task_set_data(task, mytaskdata);
/* start the schema reload task as a separate thread */
thread = PR_CreateThread(PR_USER_THREAD, schemareload_thread,
diff --git a/ldap/servers/plugins/usn/usn_cleanup.c b/ldap/servers/plugins/usn/usn_cleanup.c
index 12a48e346..20decae9f 100644
--- a/ldap/servers/plugins/usn/usn_cleanup.c
+++ b/ldap/servers/plugins/usn/usn_cleanup.c
@@ -44,6 +44,7 @@
struct usn_cleanup_data {
char *suffix;
char *maxusn_to_delete;
+ char *bind_dn;
};
static int usn_cleanup_add(Slapi_PBlock *pb, Slapi_Entry *e,
@@ -86,6 +87,9 @@ usn_cleanup_thread(void *arg)
goto bail;
}
+ /* Initialize and set the thread data */
+ slapi_td_set_dn(slapi_ch_strdup(cleanup_data->bind_dn));
+
/* update task state to show it's running */
slapi_task_begin(task, total_work);
if (cleanup_data->maxusn_to_delete) {
@@ -184,6 +188,7 @@ bail:
}
slapi_ch_free_string(&cleanup_data->maxusn_to_delete);
slapi_ch_free_string(&cleanup_data->suffix);
+ slapi_ch_free_string(&cleanup_data->bind_dn);
slapi_ch_free((void **)&cleanup_data);
/* this will queue the destruction of the task */
@@ -242,6 +247,7 @@ usn_cleanup_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter,
char *suffix = NULL;
char *backend = NULL;
char *maxusn = NULL;
+ char *bind_dn;
struct usn_cleanup_data *cleanup_data = NULL;
int rv = SLAPI_DSE_CALLBACK_OK;
Slapi_Task *task = NULL;
@@ -253,6 +259,9 @@ usn_cleanup_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter,
*returncode = LDAP_SUCCESS;
+ /* get the requestor dn */
+ slapi_pblock_get(pb, SLAPI_REQUESTOR_DN, &bind_dn);
+
/* make sure plugin is not closed*/
if(!usn_is_started()){
*returncode = LDAP_OPERATIONS_ERROR;
@@ -310,6 +319,7 @@ usn_cleanup_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter,
(struct usn_cleanup_data *)slapi_ch_malloc(sizeof(struct usn_cleanup_data));
cleanup_data->suffix = slapi_ch_strdup(suffix);
cleanup_data->maxusn_to_delete = slapi_ch_strdup(maxusn);
+ cleanup_data->bind_dn = slapi_ch_strdup(bind_dn);
/* allocate new task now */
task = slapi_new_task(slapi_entry_get_ndn(e));
diff --git a/ldap/servers/slapd/add.c b/ldap/servers/slapd/add.c
index 53fd3fc93..204e13cba 100644
--- a/ldap/servers/slapd/add.c
+++ b/ldap/servers/slapd/add.c
@@ -742,6 +742,7 @@ static int
add_created_attrs(Operation *op, Slapi_Entry *e)
{
char buf[20];
+ char *binddn = NULL;
struct berval bv;
struct berval *bvals[2];
time_t curtime;
@@ -753,21 +754,39 @@ add_created_attrs(Operation *op, Slapi_Entry *e)
bvals[0] = &bv;
bvals[1] = NULL;
- if(slapdFrontendConfig->plugin_track && !slapi_sdn_isempty(&op->o_sdn)){
- /* assume op->o_sdn holds the plugin DN */
- bv.bv_val = (char*)slapi_sdn_get_dn(&op->o_sdn);
- bv.bv_len = strlen(bv.bv_val);
+ if(slapdFrontendConfig->plugin_track){
+ /* plugin bindDN tracking is enabled, grab the dn from thread local storage */
+ if(slapi_sdn_isempty(&op->o_sdn)){
+ bv.bv_val = "";
+ bv.bv_len = strlen(bv.bv_val);
+ } else {
+ bv.bv_val = (char*)slapi_sdn_get_dn(&op->o_sdn);
+ bv.bv_len = strlen(bv.bv_val);
+ }
slapi_entry_attr_replace(e, "internalCreatorsName", bvals);
slapi_entry_attr_replace(e, "internalModifiersName", bvals);
- }
- if (slapi_sdn_isempty(&op->o_sdn)) {
- bv.bv_val = "";
- bv.bv_len = strlen(bv.bv_val);
+ /* Grab the thread data(binddn) */
+ slapi_td_get_dn(&binddn);
+
+ if(binddn == NULL){
+ /* anonymous bind */
+ bv.bv_val = "";
+ bv.bv_len = strlen(bv.bv_val);
+ } else {
+ bv.bv_val = binddn;
+ bv.bv_len = strlen(bv.bv_val);
+ }
} else {
- bv.bv_val = (char*)slapi_sdn_get_dn(&op->o_sdn);
- bv.bv_len = strlen(bv.bv_val);
+ if (slapi_sdn_isempty(&op->o_sdn)) {
+ bv.bv_val = "";
+ bv.bv_len = strlen(bv.bv_val);
+ } else {
+ bv.bv_val = (char*)slapi_sdn_get_dn(&op->o_sdn);
+ bv.bv_len = strlen(bv.bv_val);
+ }
}
+
slapi_entry_attr_replace(e, "creatorsname", bvals);
slapi_entry_attr_replace(e, "modifiersname", bvals);
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index cabfe71c3..13d56b6a2 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -2634,6 +2634,8 @@ op_copy_identity(Connection *conn, Operation *op)
} else {
slapi_sdn_set_dn_byval(&op->o_sdn,conn->c_dn);
op->o_authtype = slapi_ch_strdup(conn->c_authtype);
+ /* set the thread data bind dn index */
+ slapi_td_set_dn(slapi_ch_strdup(conn->c_dn));
}
/* XXX We should also copy c_client_cert into *op here; it's
* part of the authorization identity. The operation's copy
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index 345585224..7b15249fb 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -1150,6 +1150,9 @@ main( int argc, char **argv)
*/
task_cleanup();
+ /* init the thread data index for bind dn's */
+ slapi_td_dn_init();
+
plugin_print_lists();
plugin_startall(argc, argv, 1 /* Start Backends */, 1 /* Start Globals */);
if (housekeeping_start((time_t)0, NULL) == NULL) {
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index 12f7fdf40..01014692a 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -137,6 +137,7 @@ void modify_update_last_modified_attr(Slapi_PBlock *pb, Slapi_Mods *smods)
{
char buf[20];
char *plugin_dn = NULL;
+ char *binddn = NULL;
struct berval bv;
struct berval *bvals[2];
time_t curtime;
@@ -152,35 +153,52 @@ void modify_update_last_modified_attr(Slapi_PBlock *pb, Slapi_Mods *smods)
bvals[0] = &bv;
bvals[1] = NULL;
- if(slapdFrontendConfig->plugin_track && !slapi_sdn_isempty(&op->o_sdn)){
- /* write to the new attribute the bind dn and plugin name */
- slapi_pblock_get (pb, SLAPI_PLUGIN_IDENTITY, &cid);
- if (cid)
- plugin=(struct slapdplugin *) cid->sci_plugin;
- if(plugin)
- plugin_dn = plugin_get_dn (plugin);
- if(plugin_dn){
- bv.bv_val = plugin_dn;
- bv.bv_len = strlen(bv.bv_val);
- } else {
- bv.bv_val = (char*)slapi_sdn_get_dn(&op->o_sdn);
+ if(slapdFrontendConfig->plugin_track){
+ /* plugin bindDN tracking is enabled, grab the bind dn from thread local storage */
+ if(slapi_sdn_isempty(&op->o_sdn)){
+ bv.bv_val = "";
bv.bv_len = strlen(bv.bv_val);
- }
- slapi_mods_add_modbvps(smods, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES,
- "internalModifiersName", bvals);
- }
+ } else {
+ slapi_pblock_get (pb, SLAPI_PLUGIN_IDENTITY, &cid);
+ if (cid)
+ plugin=(struct slapdplugin *) cid->sci_plugin;
+ if(plugin)
+ plugin_dn = plugin_get_dn (plugin);
+ if(plugin_dn){
+ bv.bv_val = plugin_dn;
+ bv.bv_len = strlen(bv.bv_val);
+ } else {
+ bv.bv_val = (char*)slapi_sdn_get_dn(&op->o_sdn);
+ bv.bv_len = strlen(bv.bv_val);
+ }
+ }
+ slapi_mods_add_modbvps(smods, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES,
+ "internalModifiersName", bvals);
- /* fill in modifiersname */
- if (slapi_sdn_isempty(&op->o_sdn)) {
- bv.bv_val = "";
- bv.bv_len = strlen(bv.bv_val);
+ /* Grab the thread data(binddn) */
+ slapi_td_get_dn(&binddn);
+
+ if(binddn == NULL){
+ /* anonymous bind */
+ bv.bv_val = "";
+ bv.bv_len = strlen(bv.bv_val);
+ } else {
+ bv.bv_val = binddn;
+ bv.bv_len = strlen(bv.bv_val);
+ }
} else {
- bv.bv_val = (char*)slapi_sdn_get_dn(&op->o_sdn);
- bv.bv_len = strlen(bv.bv_val);
+ /* fill in modifiersname */
+ if (slapi_sdn_isempty(&op->o_sdn)) {
+ bv.bv_val = "";
+ bv.bv_len = strlen(bv.bv_val);
+ } else {
+ bv.bv_val = (char*)slapi_sdn_get_dn(&op->o_sdn);
+ bv.bv_len = strlen(bv.bv_val);
+ }
}
- slapi_mods_add_modbvps(smods, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES,
- "modifiersname", bvals);
+ slapi_mods_add_modbvps(smods, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES,
+ "modifiersname", bvals);
/* fill in modifytimestamp */
curtime = current_time();
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
index b607f2888..2a1c70643 100644
--- a/ldap/servers/slapd/pblock.c
+++ b/ldap/servers/slapd/pblock.c
@@ -3593,6 +3593,8 @@ bind_credentials_set_nolock( Connection *conn, char *authtype, char *normdn,
conn->c_dn = normdn;
conn->c_isroot = slapi_dn_isroot( normdn );
+ /* Set the thread data with the normalized dn */
+ slapi_td_set_dn(slapi_ch_strdup(normdn));
/* set external credentials if requested */
if ( extauthtype != NULL ) {
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 3444b7e48..b4eb2a6be 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -5096,6 +5096,16 @@ int slapi_apib_release(void **api);
/**** End of API broker interface. *******************************************/
+/* thread_data.c */
+int slapi_td_init(int indexType);
+int slapi_td_set_val(int indexType, void *value);
+void slapi_td_get_val(int indexType, void **value);
+int slapi_td_dn_init();
+int slapi_td_set_dn(char *dn);
+void slapi_td_get_dn(char **dn);
+
+/* Thread Local Storage Index Types */
+#define SLAPI_TD_REQUESTOR_DN 1
/*
* routines for dealing with controls
diff --git a/ldap/servers/slapd/thread_data.c b/ldap/servers/slapd/thread_data.c
new file mode 100644
index 000000000..ead6e8875
--- /dev/null
+++ b/ldap/servers/slapd/thread_data.c
@@ -0,0 +1,174 @@
+/** BEGIN COPYRIGHT BLOCK
+ * This Program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; version 2 of the License.
+ *
+ * This Program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * In addition, as a special exception, Red Hat, Inc. gives You the additional
+ * right to link the code of this Program with code not covered under the GNU
+ * General Public License ("Non-GPL Code") and to distribute linked combinations
+ * including the two, subject to the limitations in this paragraph. Non-GPL Code
+ * permitted under this exception must only link to the code of this Program
+ * through those well defined interfaces identified in the file named EXCEPTION
+ * found in the source code files (the "Approved Interfaces"). The files of
+ * Non-GPL Code may instantiate templates or use macros or inline functions from
+ * the Approved Interfaces without causing the resulting work to be covered by
+ * the GNU General Public License. Only Red Hat, Inc. may make changes or
+ * additions to the list of Approved Interfaces. You must obey the GNU General
+ * Public License in all respects for all of the Program code and other code used
+ * in conjunction with the Program except the Non-GPL Code covered by this
+ * exception. If you modify this file, you may extend this exception to your
+ * version of the file, but you are not obligated to do so. If you do not wish to
+ * provide this exception without modification, you must delete this exception
+ * statement from your version and license this file solely under the GPL without
+ * exception.
+ *
+ *
+ * Copyright (C) 2012 Red Hat, Inc.
+ * All rights reserved.
+ * END COPYRIGHT BLOCK **/
+
+/*
+ * Thread Local Storage Functions
+ */
+#include <slapi-plugin.h>
+#include <prthread.h>
+
+void td_dn_destructor(void *priv);
+
+/*
+ * Thread Local Storage Indexes
+ */
+static PRUintn td_requestor_dn; /* TD_REQUESTOR_DN */
+
+/*
+ * Index types defined in slapi-plugin.h
+ *
+ * #define SLAPI_TD_REQUESTOR_DN 1
+ * ...
+ * ...
+ */
+
+
+/*
+ * The Process:
+ *
+ * [1] Create new index type macro in slapi-plugin.h
+ * [2] Create new static "PRUintn" index
+ * [3] Update these functions with the new index:
+ * slapi_td_init()
+ * slapi_td_set_val()
+ * slapi_td_get_val()
+ * [4] Create wrapper functions if so desired, and update slapi_plugin.h
+ * [5] Create destructor (if necessary)
+ */
+
+int
+slapi_td_init(int indexType)
+{
+ switch(indexType){
+ case SLAPI_TD_REQUESTOR_DN:
+ if(PR_NewThreadPrivateIndex(&td_requestor_dn, td_dn_destructor) == PR_FAILURE){
+ return PR_FAILURE;
+ }
+ break;
+
+ default:
+ return PR_FAILURE;
+ }
+
+ return PR_SUCCESS;
+}
+
+/*
+ * Caller needs to cast value to (void *)
+ */
+int
+slapi_td_set_val(int indexType, void *value)
+{
+ switch(indexType){
+ case SLAPI_TD_REQUESTOR_DN:
+ if(td_requestor_dn){
+ if(PR_SetThreadPrivate(td_requestor_dn, value) == PR_FAILURE){
+ return PR_FAILURE;
+ }
+ } else {
+ return PR_FAILURE;
+ }
+ break;
+
+ default:
+ return PR_FAILURE;
+ }
+
+ return PR_SUCCESS;
+}
+
+/*
+ * Caller needs to cast value to (void **)
+ */
+void
+slapi_td_get_val(int indexType, void **value)
+{
+ switch(indexType){
+ case SLAPI_TD_REQUESTOR_DN:
+ if(td_requestor_dn){
+ *value = PR_GetThreadPrivate(td_requestor_dn);
+ } else {
+ *value = NULL;
+ }
+ break;
+ default:
+ *value = NULL;
+ return;
+ }
+}
+
+/*
+ * Wrapper Functions
+ */
+
+int
+slapi_td_dn_init()
+{
+ if(slapi_td_init(SLAPI_TD_REQUESTOR_DN) == PR_FAILURE){
+ return PR_FAILURE;
+ }
+
+ return PR_SUCCESS;
+}
+
+int
+slapi_td_set_dn(char *value)
+{
+ if(slapi_td_set_val(SLAPI_TD_REQUESTOR_DN, (void *)value) == PR_FAILURE){
+ return PR_FAILURE;
+ }
+
+ return PR_SUCCESS;
+}
+
+void
+slapi_td_get_dn(char **value){
+ slapi_td_get_val(SLAPI_TD_REQUESTOR_DN, (void **)value);
+}
+
+
+/*
+ * Destructor Functions
+ */
+
+void
+td_dn_destructor(void *priv)
+{
+ slapi_ch_free((void **)&priv);
+}
+
+
| 0 |
f6481f623d234b5b09972629f893e0ace31e8ce8
|
389ds/389-ds-base
|
Issue 6241 - Add support for CRYPT-YESCRYPT (#6242)
Description:
Implements CRYPT-YESCRYPT as a password storage scheme
Issue: #6241
Reviewed by: @progier389
|
commit f6481f623d234b5b09972629f893e0ace31e8ce8
Author: jasonborden <[email protected]>
Date: Tue Jul 2 04:07:29 2024 -0600
Issue 6241 - Add support for CRYPT-YESCRYPT (#6242)
Description:
Implements CRYPT-YESCRYPT as a password storage scheme
Issue: #6241
Reviewed by: @progier389
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
index b1736ccc2..ce44d75cd 100644
--- a/ldap/ldif/template-dse.ldif.in
+++ b/ldap/ldif/template-dse.ldif.in
@@ -195,6 +195,15 @@ nsslapd-plugininitfunc: crypt_sha512_pwd_storage_scheme_init
nsslapd-plugintype: pwdstoragescheme
nsslapd-pluginenabled: on
+dn: cn=CRYPT-YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config
+objectclass: top
+objectclass: nsSlapdPlugin
+cn: CRYPT-YESCRYPT
+nsslapd-pluginpath: libpwdstorage-plugin
+nsslapd-plugininitfunc: crypt_yescrypt_pwd_storage_scheme_init
+nsslapd-plugintype: pwdstoragescheme
+nsslapd-pluginenabled: on
+
dn: cn=MD5,cn=Password Storage Schemes,cn=plugins,cn=config
objectclass: top
objectclass: nsSlapdPlugin
diff --git a/ldap/servers/plugins/pwdstorage/crypt_pwd.c b/ldap/servers/plugins/pwdstorage/crypt_pwd.c
index 083a8c6cf..3eb8bc2a5 100644
--- a/ldap/servers/plugins/pwdstorage/crypt_pwd.c
+++ b/ldap/servers/plugins/pwdstorage/crypt_pwd.c
@@ -39,12 +39,20 @@
static unsigned char itoa64[] = /* 0 ... 63 => ascii - 64 */
"./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
+/* Standard Crypt Algorithms identifiers (with usual options when needed) */
+#define CRYPT_ALGO_ID_UNIX ""
+#define CRYPT_ALGO_ID_MD5 "$1$"
+#define CRYPT_ALGO_ID_SHA256 "$5$"
+#define CRYPT_ALGO_ID_SHA512 "$6$"
+#define CRYPT_ALGO_ID_YESCRYPT "$y$j9T$"
+
/* Use the same salt lengths as shadow */
#define CRYPT_UNIX_SALT_LENGTH 2
#define CRYPT_MD5_SALT_LENGTH 8
#define CRYPT_SHA_SALT_LENGTH 16
+#define CRYPT_YESCRYPT_SALT_LENGTH 24
-#define CRYPT_SALT_STRING_MAXLEN CRYPT_SHA_SALT_LENGTH + 1
+#define CRYPT_SALT_STRING_MAXLEN CRYPT_YESCRYPT_SALT_LENGTH + 1
int
crypt_pw_cmp(const char *userpwd, const char *dbpwd)
@@ -106,21 +114,26 @@ crypt_pw_enc_by_hash(const char *pwd, int salt_len, const char *algo_id)
char *
crypt_pw_enc(const char *pwd)
{
- return crypt_pw_enc_by_hash(pwd, CRYPT_UNIX_SALT_LENGTH, "");
+ return crypt_pw_enc_by_hash(pwd, CRYPT_UNIX_SALT_LENGTH, CRYPT_ALGO_ID_UNIX);
}
char *
crypt_pw_md5_enc(const char *pwd)
{
- return crypt_pw_enc_by_hash(pwd, CRYPT_MD5_SALT_LENGTH, "$1$");
+ return crypt_pw_enc_by_hash(pwd, CRYPT_MD5_SALT_LENGTH, CRYPT_ALGO_ID_MD5);
}
char *
crypt_pw_sha256_enc(const char *pwd)
{
- return crypt_pw_enc_by_hash(pwd, CRYPT_SHA_SALT_LENGTH, "$5$");
+ return crypt_pw_enc_by_hash(pwd, CRYPT_SHA_SALT_LENGTH, CRYPT_ALGO_ID_SHA256);
}
char *
crypt_pw_sha512_enc(const char *pwd)
{
- return crypt_pw_enc_by_hash(pwd, CRYPT_SHA_SALT_LENGTH, "$6$");
+ return crypt_pw_enc_by_hash(pwd, CRYPT_SHA_SALT_LENGTH, CRYPT_ALGO_ID_SHA512);
+}
+char *
+crypt_pw_yescrypt_enc(const char *pwd)
+{
+ return crypt_pw_enc_by_hash(pwd, CRYPT_YESCRYPT_SALT_LENGTH, CRYPT_ALGO_ID_YESCRYPT);
}
diff --git a/ldap/servers/plugins/pwdstorage/pwd_init.c b/ldap/servers/plugins/pwdstorage/pwd_init.c
index 606e63404..8f907f624 100644
--- a/ldap/servers/plugins/pwdstorage/pwd_init.c
+++ b/ldap/servers/plugins/pwdstorage/pwd_init.c
@@ -42,6 +42,8 @@ static Slapi_PluginDesc crypt_sha256_pdesc = {"crypt-sha256-password-storage-sch
static Slapi_PluginDesc crypt_sha512_pdesc = {"crypt-sha512-password-storage-scheme", VENDOR, DS_PACKAGE_VERSION, "Unix crypt algorithm (CRYPT-SHA512)"};
+static Slapi_PluginDesc crypt_yescrypt_pdesc = {"crypt-yescrypt-password-storage-scheme", VENDOR, DS_PACKAGE_VERSION, "Unix crypt algorithm (CRYPT-YESCRYPT)"};
+
static Slapi_PluginDesc clear_pdesc = {"clear-password-storage-scheme", VENDOR, DS_PACKAGE_VERSION, "No encryption (CLEAR)"};
static Slapi_PluginDesc ns_mta_md5_pdesc = {"NS-MTA-MD5-password-storage-scheme", VENDOR, DS_PACKAGE_VERSION, "Netscape MD5 (NS-MTA-MD5)"};
@@ -324,6 +326,28 @@ crypt_sha512_pwd_storage_scheme_init(Slapi_PBlock *pb)
return (rc);
}
+int
+crypt_yescrypt_pwd_storage_scheme_init(Slapi_PBlock *pb)
+{
+ int rc;
+
+ slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "=> crypt_yescrypt_pwd_storage_scheme_init\n");
+
+ rc = slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION,
+ (void *)SLAPI_PLUGIN_VERSION_01);
+ rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION,
+ (void *)&crypt_yescrypt_pdesc);
+ rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_ENC_FN,
+ (void *)crypt_pw_yescrypt_enc);
+ rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN,
+ (void *)crypt_pw_cmp);
+ rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME,
+ "CRYPT-YESCRYPT");
+
+ slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "<= crypt_yescrypt_pwd_storage_scheme_init %d\n\n", rc);
+ return (rc);
+}
+
int
clear_pwd_storage_scheme_init(Slapi_PBlock *pb)
{
diff --git a/ldap/servers/plugins/pwdstorage/pwdstorage.h b/ldap/servers/plugins/pwdstorage/pwdstorage.h
index 72b382224..1b39752a5 100644
--- a/ldap/servers/plugins/pwdstorage/pwdstorage.h
+++ b/ldap/servers/plugins/pwdstorage/pwdstorage.h
@@ -80,6 +80,7 @@ char *crypt_pw_enc(const char *pwd);
char *crypt_pw_md5_enc(const char *pwd);
char *crypt_pw_sha256_enc(const char *pwd);
char *crypt_pw_sha512_enc(const char *pwd);
+char *crypt_pw_yescrypt_enc(const char *pwd);
int ns_mta_md5_pw_cmp(const char *userpwd, const char *dbpwd);
int md5_pw_cmp(const char *userpwd, const char *dbpwd);
char *md5_pw_enc(const char *pwd);
diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
index 00509654c..74f7e6cf9 100644
--- a/ldap/servers/slapd/fedse.c
+++ b/ldap/servers/slapd/fedse.c
@@ -202,6 +202,19 @@ static const char *internal_entries[] =
"nsslapd-pluginVendor: 389 Project\n"
"nsslapd-pluginDescription: CRYPT-SHA512\n",
+ "dn: cn=CRYPT-YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config\n"
+ "objectClass: top\n"
+ "objectClass: nsSlapdPlugin\n"
+ "cn: CRYPT-YESCRYPT\n"
+ "nsslapd-pluginPath: libpwdstorage-plugin\n"
+ "nsslapd-pluginInitfunc: crypt_yescrypt_pwd_storage_scheme_init\n"
+ "nsslapd-pluginType: pwdstoragescheme\n"
+ "nsslapd-pluginEnabled: on\n"
+ "nsslapd-pluginId: CRYPT-YESCRYPT\n"
+ "nsslapd-pluginVersion: none\n"
+ "nsslapd-pluginVendor: 389 Project\n"
+ "nsslapd-pluginDescription: CRYPT-YESCRYPT\n",
+
"dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config\n"
"objectclass: top\n"
"objectclass: nsSlapdPlugin\n"
| 0 |
a252a6f9706465fd7a542a8e2039266f30cfd9d1
|
389ds/389-ds-base
|
Ticket 51014 - slapi_pal.c possible static buffer overflow
Bug Description: Due to an incorrect use of a buffer size,
static analysis in suse detected a possible overflow in
slapi pal. However, it requires root permissions to exploit
anything, and thus is not a security issues.
Fix Description: Change the buffer we read the cgroup into
to be maxpathlen size.
https://pagure.io/389-ds-base/issue/51014
Author: William Brown <[email protected]>
Review by: ???
|
commit a252a6f9706465fd7a542a8e2039266f30cfd9d1
Author: William Brown <[email protected]>
Date: Tue Apr 7 16:30:41 2020 +1000
Ticket 51014 - slapi_pal.c possible static buffer overflow
Bug Description: Due to an incorrect use of a buffer size,
static analysis in suse detected a possible overflow in
slapi pal. However, it requires root permissions to exploit
anything, and thus is not a security issues.
Fix Description: Change the buffer we read the cgroup into
to be maxpathlen size.
https://pagure.io/389-ds-base/issue/51014
Author: William Brown <[email protected]>
Review by: ???
diff --git a/ldap/servers/slapd/slapi_pal.c b/ldap/servers/slapd/slapi_pal.c
index c6b0dda7b..f658d461e 100644
--- a/ldap/servers/slapd/slapi_pal.c
+++ b/ldap/servers/slapd/slapi_pal.c
@@ -126,7 +126,7 @@ _spal_dir_exist(char *path)
static char *
_spal_cgroupv2_path() {
FILE *f;
- char s[256] = {0};
+ char s[MAXPATHLEN + 1] = {0};
char *res = NULL;
/* We discover our path by looking at /proc/self/cgroup */
f = fopen("/proc/self/cgroup", "r");
| 0 |
8d3b9218d0f1bcb2aa46311436538ed415d82560
|
389ds/389-ds-base
|
Use new PLUGIN_CONFIG_ENTRY feature to allow switching between txn and regular
Use new PLUGIN_CONFIG_ENTRY feature to allow switching between txn and regular
Setting the referint plugin type to "betxnpostoperation" will make
referint be a betxn postoperation plugin.
Reviewed by: nkinder (Thanks!)
|
commit 8d3b9218d0f1bcb2aa46311436538ed415d82560
Author: Rich Megginson <[email protected]>
Date: Mon Oct 10 09:59:22 2011 -0600
Use new PLUGIN_CONFIG_ENTRY feature to allow switching between txn and regular
Use new PLUGIN_CONFIG_ENTRY feature to allow switching between txn and regular
Setting the referint plugin type to "betxnpostoperation" will make
referint be a betxn postoperation plugin.
Reviewed by: nkinder (Thanks!)
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
index 86f1372be..8b3ac7549 100644
--- a/ldap/servers/plugins/referint/referint.c
+++ b/ldap/servers/plugins/referint/referint.c
@@ -109,6 +109,10 @@ void plugin_init_debug_level(int *level_ptr)
int
referint_postop_init( Slapi_PBlock *pb )
{
+ Slapi_Entry *plugin_entry = NULL;
+ char *plugin_type = NULL;
+ int delfn = SLAPI_PLUGIN_POST_DELETE_FN;
+ int mdnfn = SLAPI_PLUGIN_POST_MODRDN_FN;
/*
* Get plugin identity and stored it for later use
@@ -118,13 +122,23 @@ referint_postop_init( Slapi_PBlock *pb )
slapi_pblock_get (pb, SLAPI_PLUGIN_IDENTITY, &referint_plugin_identity);
PR_ASSERT (referint_plugin_identity);
+ /* get args */
+ if ((slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_ENTRY, &plugin_entry) == 0) &&
+ plugin_entry &&
+ (plugin_type = slapi_entry_attr_get_charptr(plugin_entry, "nsslapd-plugintype")) &&
+ plugin_type && strstr(plugin_type, "betxn")) {
+ delfn = SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN;
+ mdnfn = SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN;
+ }
+ slapi_ch_free_string(&plugin_type);
+
if ( slapi_pblock_set( pb, SLAPI_PLUGIN_VERSION,
SLAPI_PLUGIN_VERSION_01 ) != 0 ||
slapi_pblock_set( pb, SLAPI_PLUGIN_DESCRIPTION,
(void *)&pdesc ) != 0 ||
- slapi_pblock_set( pb, SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN,
+ slapi_pblock_set( pb, delfn,
(void *) referint_postop_del ) != 0 ||
- slapi_pblock_set( pb, SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN,
+ slapi_pblock_set( pb, mdnfn,
(void *) referint_postop_modrdn ) != 0 ||
slapi_pblock_set(pb, SLAPI_PLUGIN_START_FN,
(void *) referint_postop_start ) != 0 ||
| 0 |
0ca62a2447ab94fb3369ea64423235f1f6cbcbad
|
389ds/389-ds-base
|
Ticket 49103 - python 2 support for installer
Bug Description: The python installer tools were originally targeted at python
3 only. Due to the slow adoption of python 3, and it's various issues, we can
not solely rely on it's presence. As a result we need to support python 2
as well.
Fix Description: Add support for python 2 in the new installer code. This
was mainly related to a number of assumptions present in the init code, and
the use of set manipulations in arrays that is not possible in python2
https://fedorahosted.org/389/ticket/49103
Author: wibrown
Review by: spichugi (Thanks!)
|
commit 0ca62a2447ab94fb3369ea64423235f1f6cbcbad
Author: William Brown <[email protected]>
Date: Mon Jan 30 12:34:35 2017 +1000
Ticket 49103 - python 2 support for installer
Bug Description: The python installer tools were originally targeted at python
3 only. Due to the slow adoption of python 3, and it's various issues, we can
not solely rely on it's presence. As a result we need to support python 2
as well.
Fix Description: Add support for python 2 in the new installer code. This
was mainly related to a number of assumptions present in the init code, and
the use of set manipulations in arrays that is not possible in python2
https://fedorahosted.org/389/ticket/49103
Author: wibrown
Review by: spichugi (Thanks!)
diff --git a/src/lib389/examples/ds-setup.inf b/src/lib389/examples/ds-setup.inf
new file mode 100644
index 000000000..3da0a51fb
--- /dev/null
+++ b/src/lib389/examples/ds-setup.inf
@@ -0,0 +1,41 @@
+; --- BEGIN COPYRIGHT BLOCK ---
+; Copyright (C) 2015 Red Hat, Inc.
+; All rights reserved.
+;
+; License: GPL (version 3 or any later version).
+; See LICENSE for details.
+; --- END COPYRIGHT BLOCK ---
+
+; Author: firstyear at redhat.com
+
+; This is a version 2 ds setup inf file.
+; It is used by the python versions of setup-ds-*
+; Most options map 1 to 1 to the original .inf file.
+; However, there are some differences that I envision
+; For example, note the split backend section.
+; You should be able to create, one, many or no backends in an install
+
+[general]
+config_version=2
+full_machine_name=localhost.localdomain
+strict_host_checking=False
+
+[slapd]
+instance_name=localhost
+user=dirsrv
+group=dirsrv
+port=389
+secure_port=636
+root_dn=cn=Directory Manager
+root_password=password
+prefix=/opt/dirsrv
+defaults=latest
+
+[backend-userRoot]
+suffix=dc=example,dc=com
+; this is controlled by slapd.InstallLdifFile == none, suggest or path in setup-ds.pl
+sample_entries=yes
+
+
+
+
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 2ef065a23..4394ecec7 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -520,21 +520,23 @@ class DirSrv(SimpleLDAPObject, object):
self.binddn = args.get(SER_ROOT_DN, DN_DM)
self.bindpw = args.get(SER_ROOT_PW, PW_DM)
self.creation_suffix = args.get(SER_CREATION_SUFFIX, DEFAULT_SUFFIX)
- self.userid = args.get(SER_USER_ID)
- if not self.userid:
- if os.getuid() == 0:
- # as root run as default user
- self.userid = DEFAULT_USER
- else:
- self.userid = pwd.getpwuid(os.getuid())[0]
-
- # Settings from args of server attributes
- self.serverid = args.get(SER_SERVERID_PROP, None)
- self.groupid = args.get(SER_GROUP_ID, self.userid)
- self.backupdir = args.get(SER_BACKUP_INST_DIR, DEFAULT_BACKUPDIR)
- # Allocate from the args, or use our env, or use /
- if args.get(SER_DEPLOYED_DIR, self.prefix) is not None:
- self.prefix = args.get(SER_DEPLOYED_DIR, self.prefix)
+ # These settings are only needed on a local connection.
+ if self.isLocal:
+ self.userid = args.get(SER_USER_ID)
+ if not self.userid:
+ if os.getuid() == 0:
+ # as root run as default user
+ self.userid = DEFAULT_USER
+ else:
+ self.userid = pwd.getpwuid(os.getuid())[0]
+
+ # Settings from args of server attributes
+ self.serverid = args.get(SER_SERVERID_PROP, None)
+ self.groupid = args.get(SER_GROUP_ID, self.userid)
+ self.backupdir = args.get(SER_BACKUP_INST_DIR, DEFAULT_BACKUPDIR)
+ # Allocate from the args, or use our env, or use /
+ if args.get(SER_DEPLOYED_DIR, self.prefix) is not None:
+ self.prefix = args.get(SER_DEPLOYED_DIR, self.prefix)
self.realm = args.get(SER_REALM, None)
if self.realm is not None:
self.krb5_realm = MitKrb5(realm=self.realm, debug=self.verbose)
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
index 4d75efc36..ef6b4ad6f 100644
--- a/src/lib389/lib389/backend.py
+++ b/src/lib389/lib389/backend.py
@@ -435,7 +435,6 @@ class Backend(DSLdapObject):
def create(self, dn=None, properties=None, basedn=None):
sample_entries = properties.pop(BACKEND_SAMPLE_ENTRIES, False)
- print(properties)
# Okay, now try to make the backend.
super(Backend, self).create(dn, properties, basedn)
# We check if the mapping tree exists in create, so do this *after*
diff --git a/src/lib389/lib389/instance/options.py b/src/lib389/lib389/instance/options.py
index 21d8c2770..b1a541898 100644
--- a/src/lib389/lib389/instance/options.py
+++ b/src/lib389/lib389/instance/options.py
@@ -242,7 +242,7 @@ class Slapd2Base(Options2):
def _format(self, d):
new_d = {}
ks = d.keys()
- no_format_keys = ks - format_keys
+ no_format_keys = set(ks) - set(format_keys)
for k in no_format_keys:
new_d[k] = d[k]
diff --git a/src/lib389/setup.py b/src/lib389/setup.py
index 0582b250f..dfcd498df 100644
--- a/src/lib389/setup.py
+++ b/src/lib389/setup.py
@@ -51,6 +51,7 @@ setup(
# find lib389/clitools -name ds\* -exec echo \''{}'\', \;
data_files=[
+ ('/usr/share/lib389/examples/', ['examples/ds-setup.inf']),
('/usr/sbin/', [
# 'lib389/clitools/ds_setup',
'cli/dsadm',
| 0 |
7a9c069a7d64c1370353278c34bf9065aeb604ea
|
389ds/389-ds-base
|
Bug 610276 - fix coverity Defect Type: API usage errors issues
https://bugzilla.redhat.com/show_bug.cgi?id=610276
Resolves: bug 610276
Bug Description: fix coverity Defect Type: API usage errors issues
Reviewed by: nhosoi (Thanks!)
Branch: HEAD
Fix Description: Must call va_end after calling va_start.
Platforms tested: RHEL5 x86_64
Flag Day: no
Doc impact: no
|
commit 7a9c069a7d64c1370353278c34bf9065aeb604ea
Author: Rich Megginson <[email protected]>
Date: Thu Jul 1 16:31:27 2010 -0600
Bug 610276 - fix coverity Defect Type: API usage errors issues
https://bugzilla.redhat.com/show_bug.cgi?id=610276
Resolves: bug 610276
Bug Description: fix coverity Defect Type: API usage errors issues
Reviewed by: nhosoi (Thanks!)
Branch: HEAD
Fix Description: Must call va_end after calling va_start.
Platforms tested: RHEL5 x86_64
Flag Day: no
Doc impact: no
diff --git a/lib/base/util.cpp b/lib/base/util.cpp
index 7e9bdbdfa..d3a7c330b 100644
--- a/lib/base/util.cpp
+++ b/lib/base/util.cpp
@@ -121,9 +121,12 @@ NSAPI_PUBLIC int util_vsnprintf(char *s, int n, register const char *fmt,
NSAPI_PUBLIC int util_snprintf(char *s, int n, const char *fmt, ...)
{
+ int rc;
va_list args;
va_start(args, fmt);
- return PR_vsnprintf(s, n, fmt, args);
+ rc = PR_vsnprintf(s, n, fmt, args);
+ va_end(args);
+ return rc;
}
NSAPI_PUBLIC int util_vsprintf(char *s, register const char *fmt, va_list args)
@@ -133,9 +136,12 @@ NSAPI_PUBLIC int util_vsprintf(char *s, register const char *fmt, va_list args)
NSAPI_PUBLIC int util_sprintf(char *s, const char *fmt, ...)
{
+ int rc;
va_list args;
va_start(args, fmt);
- return PR_vsnprintf(s, UTIL_PRF_MAXSIZE, fmt, args);
+ rc = PR_vsnprintf(s, UTIL_PRF_MAXSIZE, fmt, args);
+ va_end(args);
+ return rc;
}
/* --------------------------- util_strcasecmp ---------------------------- */
diff --git a/lib/libaccess/nseframe.cpp b/lib/libaccess/nseframe.cpp
index b29e420fc..4a023e1dc 100644
--- a/lib/libaccess/nseframe.cpp
+++ b/lib/libaccess/nseframe.cpp
@@ -230,6 +230,7 @@ NSEFrame_t * nserrGenerate(NSErr_t * errp, long retcode, long errorid,
esp = va_arg(ap, char *);
efp->ef_errv[i] = STRDUP(esp);
}
+ va_end(ap);
/* Add the frame to the list (if any) */
if (errp) {
| 0 |
7f5268f6f41d74655ee86d948b8ea301adb0138a
|
389ds/389-ds-base
|
Ticket 564 - Is ldbm_txn_ruv_modify_context still required
Bug description
Ticket 47358 introduces backend optimizations using a configuration switch.
By default all optimisation are disabled.
One optimisation is related to changelog and DB RUV being written back to disk in the same txn with the same value.
This is a consequence of bug fix 543633. Although the write back of the DB RUV brings a little overhead
it is not strictly required. In fact since 633168, changelog RUV is in sync with the data store. So
changelog RUV and DB RUV have the same values.
Fix Description:
This fix enables by default the ticket 47358 optimization BACKEND_OPT_NO_RUV_UPDATE that does
not compute/update the database RUV.
Two consequences of this are:
1. In case of disordely shutdown (crash) and after recovery, we need to rebuild the DB RUV
2. Provide a mechanism to monitor replication status, as DB ruv would be late compare
to the current update status
1. is achieve using the changelog RUV (in sync with the data store) to rebuild the database RUV.
2. was done with ticket https://fedorahosted.org/389/ticket/47350
https://fedorahosted.org/389/ticket/564
Reviewed by: Rich, Ludwig (thanks to you both !)
Platforms tested: fedora 17
Flag Day: no
Doc impact: no
|
commit 7f5268f6f41d74655ee86d948b8ea301adb0138a
Author: Thierry bordaz (tbordaz) <[email protected]>
Date: Wed Jun 5 11:09:25 2013 +0200
Ticket 564 - Is ldbm_txn_ruv_modify_context still required
Bug description
Ticket 47358 introduces backend optimizations using a configuration switch.
By default all optimisation are disabled.
One optimisation is related to changelog and DB RUV being written back to disk in the same txn with the same value.
This is a consequence of bug fix 543633. Although the write back of the DB RUV brings a little overhead
it is not strictly required. In fact since 633168, changelog RUV is in sync with the data store. So
changelog RUV and DB RUV have the same values.
Fix Description:
This fix enables by default the ticket 47358 optimization BACKEND_OPT_NO_RUV_UPDATE that does
not compute/update the database RUV.
Two consequences of this are:
1. In case of disordely shutdown (crash) and after recovery, we need to rebuild the DB RUV
2. Provide a mechanism to monitor replication status, as DB ruv would be late compare
to the current update status
1. is achieve using the changelog RUV (in sync with the data store) to rebuild the database RUV.
2. was done with ticket https://fedorahosted.org/389/ticket/47350
https://fedorahosted.org/389/ticket/564
Reviewed by: Rich, Ludwig (thanks to you both !)
Platforms tested: fedora 17
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index f9914e47d..8121901f2 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -1494,41 +1494,56 @@ int replica_check_for_data_reload (Replica *r, void *arg)
* sessions.
*/
- rc = ruv_compare_ruv(upper_bound_ruv, "changelog max RUV", r_ruv, "database RUV", 0, SLAPI_LOG_FATAL);
- if (RUV_COMP_IS_FATAL(rc))
- {
- /* create a temporary replica object to conform to the interface */
- r_obj = object_new (r, NULL);
-
- /* We can't use existing changelog - remove existing file */
+ if (slapi_disordely_shutdown(PR_FALSE)) {
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "replica_check_for_data_reload: "
- "Warning: data for replica %s does not match the data in the changelog. "
- "Recreating the changelog file. "
- "This could affect replication with replica's consumers in which case the "
- "consumers should be reinitialized.\n",
+ "Warning: disordely shutdown for replica %s. Check if DB RUV needs to be updated\n",
slapi_sdn_get_dn(r->repl_root));
+
+ if (ruv_covers_ruv(upper_bound_ruv, r_ruv) && !ruv_covers_ruv(r_ruv, upper_bound_ruv)) {
+ /*
+ * The Changelog RUV is ahead of the RUV in the DB.
+ * RUV DB was likely not flushed on disk.
+ */
+
+ ruv_force_csn_update_from_ruv(upper_bound_ruv, r_ruv,
+ "Force update of database RUV (from CL RUV) -> ", SLAPI_LOG_FATAL);
+ replica_set_ruv_dirty(r);
+ }
+
+ } else {
- rc = cl5DeleteDBSync (r_obj);
+ rc = ruv_compare_ruv(upper_bound_ruv, "changelog max RUV", r_ruv, "database RUV", 0, SLAPI_LOG_FATAL);
+ if (RUV_COMP_IS_FATAL(rc)) {
+ /* create a temporary replica object to conform to the interface */
+ r_obj = object_new(r, NULL);
- object_release (r_obj);
+ /* We can't use existing changelog - remove existing file */
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "replica_check_for_data_reload: "
+ "Warning: data for replica %s does not match the data in the changelog. "
+ "Recreating the changelog file. "
+ "This could affect replication with replica's consumers in which case the "
+ "consumers should be reinitialized.\n",
+ slapi_sdn_get_dn(r->repl_root));
- if (rc == CL5_SUCCESS)
- {
- /* log changes to mark starting point for replication */
- rc = replica_log_ruv_elements (r);
+ rc = cl5DeleteDBSync(r_obj);
+
+ object_release(r_obj);
+
+ if (rc == CL5_SUCCESS) {
+ /* log changes to mark starting point for replication */
+ rc = replica_log_ruv_elements(r);
+ }
+ } else if (rc) {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "replica_check_for_data_reload: "
+ "Warning: for replica %s there were some differences between the changelog max RUV and the "
+ "database RUV. If there are obsolete elements in the database RUV, you "
+ "should remove them using the CLEANALLRUV task. If they are not obsolete, "
+ "you should check their status to see why there are no changes from those "
+ "servers in the changelog.\n",
+ slapi_sdn_get_dn(r->repl_root));
+ rc = 0;
}
- }
- else if (rc)
- {
- slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "replica_check_for_data_reload: "
- "Warning: for replica %s there were some differences between the changelog max RUV and the "
- "database RUV. If there are obsolete elements in the database RUV, you "
- "should remove them using the CLEANALLRUV task. If they are not obsolete, "
- "you should check their status to see why there are no changes from those "
- "servers in the changelog.\n",
- slapi_sdn_get_dn(r->repl_root));
- rc = 0;
- }
+ } // slapi_disordely_shutdown
object_release (ruv_obj);
}
diff --git a/ldap/servers/plugins/replication/repl5_ruv.c b/ldap/servers/plugins/replication/repl5_ruv.c
index 43f2c84d4..bc2a270d7 100644
--- a/ldap/servers/plugins/replication/repl5_ruv.c
+++ b/ldap/servers/plugins/replication/repl5_ruv.c
@@ -2255,6 +2255,37 @@ ruv_is_newer (Object *sruvobj, Object *cruvobj)
return is_newer;
}
+/*
+ * This routine is called after a disordely shutdown
+ * The Database RUV was found late compare to the changelog RUV
+ */
+void
+ruv_force_csn_update_from_ruv(RUV *src_ruv, RUV *tgt_ruv, char *msg, int logLevel) {
+ RUVElement *replica = NULL;
+ char csnStr [CSN_STRSIZE];
+ int cookie;
+
+ slapi_rwlock_rdlock(src_ruv->lock);
+
+ for (replica = dl_get_first(src_ruv->elements, &cookie);
+ NULL != replica;
+ replica = dl_get_next(src_ruv->elements, &cookie)) {
+ /*
+ * In case the DB RUV (tgt_ruv) is behind the CL RUV (src_ruv)
+ * updates the DB RUV.
+ */
+ if (!ruv_covers_csn(tgt_ruv, replica->csn)) {
+ ruv_force_csn_update(tgt_ruv, replica->csn);
+ csn_as_string(replica->csn, PR_FALSE, csnStr);
+ slapi_log_error(logLevel, repl_plugin_name, "%s %s\n",
+ msg, csnStr);
+ }
+ }
+
+ slapi_rwlock_unlock(src_ruv->lock);
+
+}
+
void
ruv_force_csn_update (RUV *ruv, CSN *csn)
{
diff --git a/ldap/servers/plugins/replication/repl5_ruv.h b/ldap/servers/plugins/replication/repl5_ruv.h
index c9b85ac2f..4d8466223 100644
--- a/ldap/servers/plugins/replication/repl5_ruv.h
+++ b/ldap/servers/plugins/replication/repl5_ruv.h
@@ -144,6 +144,7 @@ int ruv_local_contains_supplier(RUV *ruv, ReplicaId rid);
PRBool ruv_has_csns(const RUV *ruv);
PRBool ruv_has_both_csns(const RUV *ruv);
PRBool ruv_is_newer (Object *sruv, Object *cruv);
+void ruv_force_csn_update_from_ruv(RUV *src_ruv, RUV *tgt_ruv, char *msg, int logLevel);
void ruv_force_csn_update (RUV *ruv, CSN *csn);
void ruv_insert_dummy_min_csn (RUV *ruv);
int ruv_compare_ruv(const RUV *ruv1, const char *ruv1name, const RUV *ruv2, const char *ruv2name, int strict, int loglevel);
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index 20af828e9..daf179476 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -1643,6 +1643,7 @@ dblayer_start(struct ldbminfo *li, int dbmode)
LDAPDebug(LDAP_DEBUG_ANY, "Detected Disorderly Shutdown last "
"time Directory Server was running, recovering "
"database.\n", 0, 0, 0);
+ slapi_disordely_shutdown(PR_TRUE);
}
}
switch (dbmode&DBLAYER_RESTORE_MASK) {
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c
index 24668615e..ae2e8fc59 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c
@@ -1436,7 +1436,7 @@ static config_info ldbm_config[] = {
{CONFIG_PAGEDLOOKTHROUGHLIMIT, CONFIG_TYPE_INT, "0", &ldbm_config_pagedlookthroughlimit_get, &ldbm_config_pagedlookthroughlimit_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{CONFIG_PAGEDIDLISTSCANLIMIT, CONFIG_TYPE_INT, "0", &ldbm_config_pagedallidsthreshold_get, &ldbm_config_pagedallidsthreshold_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{CONFIG_RANGELOOKTHROUGHLIMIT, CONFIG_TYPE_INT, "5000", &ldbm_config_rangelookthroughlimit_get, &ldbm_config_rangelookthroughlimit_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
- {CONFIG_BACKEND_OPT_LEVEL, CONFIG_TYPE_INT, "0", &ldbm_config_backend_opt_level_get, &ldbm_config_backend_opt_level_set, CONFIG_FLAG_ALWAYS_SHOW},
+ {CONFIG_BACKEND_OPT_LEVEL, CONFIG_TYPE_INT, "1", &ldbm_config_backend_opt_level_get, &ldbm_config_backend_opt_level_set, CONFIG_FLAG_ALWAYS_SHOW},
{NULL, 0, NULL, NULL, NULL, 0}
};
diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c
index d19faa586..52b9c3c5b 100644
--- a/ldap/servers/slapd/plugin.c
+++ b/ldap/servers/slapd/plugin.c
@@ -3223,3 +3223,14 @@ slapi_set_plugin_open_rootdn_bind(Slapi_PBlock *pb){
ptd_set_special_data(&(config->plgc_bind_subtrees), PLGC_DATA_BIND_ROOT);
}
+
+PRBool
+slapi_disordely_shutdown(PRBool set)
+{
+ static PRBool is_disordely_shutdown = PR_FALSE;
+
+ if (set) {
+ is_disordely_shutdown = PR_TRUE;
+ }
+ return (is_disordely_shutdown);
+}
\ No newline at end of file
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index ba8b6e6fb..d7d968d56 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -7362,6 +7362,8 @@ uint64_t slapi_str_to_u64(const char *s);
void slapi_set_plugin_open_rootdn_bind(Slapi_PBlock *pb);
+PRBool slapi_disordely_shutdown(PRBool set);
+
/*
* Public entry extension getter/setter functions
*
| 0 |
80fb1c7aada872dd984964b87035d5850f223f01
|
389ds/389-ds-base
|
ldclt: -e randombinddnfromfile fails with LDAP_UNWILLING_TO_PERFORM (53)
https://bugzilla.redhat.com/show_bug.cgi?id=555189
Resolves: bug 555189
Bug Description:
ldclt code is not passing the password correctly to ldap_sasl_bind_s.
The server receives NULL credential. Due to the default configuration:
nsslapd-allow-unauthenticated-binds: off
the unauthenticated bind fails with LDAP_UNWILLING_TO_PERFORM.
Reviewed by: [email protected]
Files: see diff
Branch: HEAD
Fix Description:
When "-e randombinddnfromfile=file" is given, bind dn and password pair
is retrieved from the file and is supposed to pass to ldap_sasl_bind_s.
Although the password was read from the file, but it was not set to the
berval "cred" which was passed to ldap_sasl_bind_s. Therefore, the
bind operation tried to bind with bind dn and NULL password, which now
fails since it's considered as an unauthenticated bind. This change
fixes the problem. Also, a usage typo is being fixed.
Platforms tested: Fedora 11
Flag Day: no
Doc impact: no
|
commit 80fb1c7aada872dd984964b87035d5850f223f01
Author: Noriko Hosoi <[email protected]>
Date: Thu Jan 14 10:58:12 2010 -0800
ldclt: -e randombinddnfromfile fails with LDAP_UNWILLING_TO_PERFORM (53)
https://bugzilla.redhat.com/show_bug.cgi?id=555189
Resolves: bug 555189
Bug Description:
ldclt code is not passing the password correctly to ldap_sasl_bind_s.
The server receives NULL credential. Due to the default configuration:
nsslapd-allow-unauthenticated-binds: off
the unauthenticated bind fails with LDAP_UNWILLING_TO_PERFORM.
Reviewed by: [email protected]
Files: see diff
Branch: HEAD
Fix Description:
When "-e randombinddnfromfile=file" is given, bind dn and password pair
is retrieved from the file and is supposed to pass to ldap_sasl_bind_s.
Although the password was read from the file, but it was not set to the
berval "cred" which was passed to ldap_sasl_bind_s. Therefore, the
bind operation tried to bind with bind dn and NULL password, which now
fails since it's considered as an unauthenticated bind. This change
fixes the problem. Also, a usage typo is being fixed.
Platforms tested: Fedora 11
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/slapd/tools/ldclt/ldapfct.c b/ldap/servers/slapd/tools/ldclt/ldapfct.c
index ac8250aa4..359b5fe10 100644
--- a/ldap/servers/slapd/tools/ldclt/ldapfct.c
+++ b/ldap/servers/slapd/tools/ldclt/ldapfct.c
@@ -770,10 +770,12 @@ connectToServer (
binddn = "";
passwd = NULL;
} else {
- binddn = tttctx->bufBindDN;
- passwd = tttctx->bufPasswd;
- cred.bv_val = (char *)passwd;
- cred.bv_len = strlen(passwd);
+ binddn = tttctx->bufBindDN?tttctx->bufBindDN:mctx.bindDN;
+ passwd = tttctx->bufPasswd?tttctx->bufPasswd:mctx.passwd;
+ if (passwd) {
+ cred.bv_val = (char *)passwd;
+ cred.bv_len = strlen(passwd);
+ }
}
if (mctx.mode & LDAP_V2)
@@ -931,22 +933,35 @@ connectToServer (
((!(tttctx->binded)) || (mctx.mode & BIND_EACH_OPER)))
{
struct berval *servercredp = NULL;
+ char *binddn = NULL;
+ char *passwd = NULL;
if (buildNewBindDN (tttctx) < 0) /*JLS 05-01-01*/
- return (-1); /*JLS 05-01-01*/
+ return (-1); /*JLS 05-01-01*/
+ if (tttctx->bufPasswd) {
+ binddn = tttctx->bufBindDN;
+ passwd = tttctx->bufPasswd;
+ } else if (mctx.passwd) {
+ binddn = mctx.bindDN;
+ passwd = mctx.passwd;
+ }
+ if (passwd) {
+ cred.bv_val = passwd;
+ cred.bv_len = strlen(passwd);
+ }
if (mctx.mode & VERY_VERBOSE)
- printf ("ldclt[%d]: T%03d: Before ldap_simple_bind_s (%s, %s)\n",
- mctx.pid, tttctx->thrdNum, tttctx->bufBindDN,
- mctx.passwd?tttctx->bufPasswd:"NO PASSWORD PROVIDED");
- ret = ldap_sasl_bind_s (tttctx->ldapCtx, tttctx->bufBindDN, LDAP_SASL_SIMPLE,
- &cred, NULL, NULL, &servercredp); /*JLS 05-01-01*/
+ printf ("ldclt[%d]: T%03d: Before ldap_simple_bind_s (%s, %s)\n",
+ mctx.pid, tttctx->thrdNum, binddn,
+ passwd?passwd:"NO PASSWORD PROVIDED");
+ ret = ldap_sasl_bind_s (tttctx->ldapCtx, binddn,
+ LDAP_SASL_SIMPLE, &cred, NULL, NULL, &servercredp); /*JLS 05-01-01*/
ber_bvfree(servercredp);
if (mctx.mode & VERY_VERBOSE)
- printf ("ldclt[%d]: T%03d: After ldap_simple_bind_s (%s, %s)\n",
- mctx.pid, tttctx->thrdNum, tttctx->bufBindDN,
- mctx.passwd?tttctx->bufPasswd:"NO PASSWORD PROVIDED");
+ printf ("ldclt[%d]: T%03d: After ldap_simple_bind_s (%s, %s)\n",
+ mctx.pid, tttctx->thrdNum, binddn,
+ passwd?passwd:"NO PASSWORD PROVIDED");
if (ret == LDAP_SUCCESS) /*JLS 18-12-00*/
- tttctx->binded = 1; /*JLS 18-12-00*/
+ tttctx->binded = 1; /*JLS 18-12-00*/
else /*JLS 18-12-00*/
{ /*JLS 18-12-00*/
tttctx->binded = 0; /*JLS 18-12-00*/
@@ -2048,10 +2063,12 @@ createMissingNodes (
binddn = "";
passwd = NULL;
} else {
- binddn = tttctx->bufBindDN;
- passwd = tttctx->bufPasswd;
- cred.bv_val = (char *)passwd;
- cred.bv_len = strlen(passwd);
+ binddn = tttctx->bufBindDN?tttctx->bufBindDN:mctx.bindDN;
+ passwd = tttctx->bufPasswd?tttctx->bufPasswd:mctx.passwd;
+ if (passwd) {
+ cred.bv_val = (char *)passwd;
+ cred.bv_len = strlen(passwd);
+ }
}
if (mctx.mode & LDAP_V2)
diff --git a/ldap/servers/slapd/tools/ldclt/ldclt.use b/ldap/servers/slapd/tools/ldclt/ldclt.use
index 4f388e642..96136909e 100644
--- a/ldap/servers/slapd/tools/ldclt/ldclt.use
+++ b/ldap/servers/slapd/tools/ldclt/ldclt.use
@@ -50,7 +50,7 @@ usage: ldclt [-qQvV] [-E <max errors>]
randombaselow=value : low value for random generator.
randombasehigh=value : high value for random generator.
randombinddn : random bind DN.
- randombinddnfromfile=fine : retrieve bind DN & passwd from file
+ randombinddnfromfile=file : retrieve bind DN & passwd from file
randombinddnlow=value : low value for random generator.
randombinddnhigh=value : high value for random generator.
rdn=attrname:value : alternate for -f.
diff --git a/ldap/servers/slapd/tools/ldclt/ldcltU.c b/ldap/servers/slapd/tools/ldclt/ldcltU.c
index 7540c5f40..1233a5583 100644
--- a/ldap/servers/slapd/tools/ldclt/ldcltU.c
+++ b/ldap/servers/slapd/tools/ldclt/ldcltU.c
@@ -100,7 +100,7 @@
* randombaselow=value : low value for random generator.
* randombasehigh=value : high value for random generator.
* randombinddn : random bind DN.
- * randombinddnfromfile=fine : retrieve bind DN & passwd from file
+ * randombinddnfromfile=file : retrieve bind DN & passwd from file
* randombinddnlow=value : low value for random generator.
* randombinddnhigh=value : high value for random generator.
* rdn=attrname:value : alternate for -f.
@@ -195,7 +195,7 @@ void usage ()
(void) printf (" randombaselow=value : low value for random generator.\n");
(void) printf (" randombasehigh=value : high value for random generator.\n");
(void) printf (" randombinddn : random bind DN.\n");
- (void) printf (" randombinddnfromfile=fine : retrieve bind DN & passwd from file\n");
+ (void) printf (" randombinddnfromfile=file : retrieve bind DN & passwd from file\n");
(void) printf (" randombinddnlow=value : low value for random generator.\n");
(void) printf (" randombinddnhigh=value : high value for random generator.\n");
(void) printf (" rdn=attrname:value : alternate for -f.\n");
| 0 |
30109ae563c3b6ee760c0b0b9152adf35245b486
|
389ds/389-ds-base
|
Bug 614511 - fix coverify Defect Type: Null pointer dereferences issues 11846 - 11891
https://bugzilla.redhat.com/show_bug.cgi?id=614511
Resolves: bug 614511
Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11846 - 11891
description: Catch possible NULL pointer in repl_objset_destroy().
|
commit 30109ae563c3b6ee760c0b0b9152adf35245b486
Author: Endi S. Dewata <[email protected]>
Date: Mon Jul 12 23:16:43 2010 -0500
Bug 614511 - fix coverify Defect Type: Null pointer dereferences issues 11846 - 11891
https://bugzilla.redhat.com/show_bug.cgi?id=614511
Resolves: bug 614511
Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11846 - 11891
description: Catch possible NULL pointer in repl_objset_destroy().
diff --git a/ldap/servers/plugins/replication/repl_objset.c b/ldap/servers/plugins/replication/repl_objset.c
index 4f28eb844..77e5e7c63 100644
--- a/ldap/servers/plugins/replication/repl_objset.c
+++ b/ldap/servers/plugins/replication/repl_objset.c
@@ -179,14 +179,11 @@ repl_objset_destroy(Repl_Objset **o, time_t maxwait, FNFree panic_fn)
* function for each remaining object.
*/
PR_Lock((*o)->lock);
- if ((co = llistGetFirst((*o)->objects, &cookie)) != NULL)
+ co = llistGetFirst((*o)->objects, &cookie);
+ while (NULL != co)
{
panic_fn(co->data);
- while (NULL != co)
- {
- panic_fn(co->data);
- co = llistGetNext((*o)->objects, &cookie);
- }
+ co = llistGetNext((*o)->objects, &cookie);
}
PR_Unlock((*o)->lock);
}
| 0 |
76c0e75a2594308e2c5ec4f24c1e7c72ab4f2db5
|
389ds/389-ds-base
|
Issue 50425 - Add jemalloc LD_PRELOAD to systemd drop-in file
The previous fix (7466be3) for jemalloc inclusion stopped installing
template-initconfig rendering ds-setup.pl unusable.
This fix moves the template-initconfig into -legacy-tools
to make it available only when necessary.
Relates: https://pagure.io/389-ds-base/issue/50425
Reviewed by: Viktor, Ludwig, Thierry, Mark (thanks!)
|
commit 76c0e75a2594308e2c5ec4f24c1e7c72ab4f2db5
Author: Matus Honek <[email protected]>
Date: Wed Jul 17 08:59:39 2019 +0000
Issue 50425 - Add jemalloc LD_PRELOAD to systemd drop-in file
The previous fix (7466be3) for jemalloc inclusion stopped installing
template-initconfig rendering ds-setup.pl unusable.
This fix moves the template-initconfig into -legacy-tools
to make it available only when necessary.
Relates: https://pagure.io/389-ds-base/issue/50425
Reviewed by: Viktor, Ludwig, Thierry, Mark (thanks!)
diff --git a/Makefile.am b/Makefile.am
index 6c948a338..2ea1344af 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -654,10 +654,8 @@ endif
#------------------------
config_DATA = $(srcdir)/lib/ldaputil/certmap.conf \
$(srcdir)/ldap/schema/slapd-collations.conf \
+ ldap/admin/src/template-initconfig \
ldap/servers/snmp/ldap-agent.conf
-if !SYSTEMD
-config_DATA += ldap/admin/src/template-initconfig
-endif
# the schema files in this list are either not
# standard schema, not tested, or not compatible
@@ -987,14 +985,12 @@ dist_man_MANS = man/man1/dbscan.1 \
man/man5/slapd-collations.conf.5 \
man/man8/suffix2instance.8 \
man/man8/syntax-validate.pl.8 \
+ man/man5/template-initconfig.5 \
man/man8/upgradednformat.8 \
man/man8/upgradedb.8 \
man/man8/usn-tombstone-cleanup.pl.8 \
man/man8/vlvindex.8 \
man/man8/verify-db.pl.8
-if !SYSTEMD
-dist_man_MANS += man/man5/template-initconfig.5
-endif
#------------------------
# updates
@@ -2298,14 +2294,20 @@ fixupcmd = sed \
if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi
$(fixupcmd) $^ > $@
-if !SYSTEMD
%/$(PACKAGE_NAME): %/base-initconfig.in
if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi
+if SYSTEMD
+ $(fixupcmd) $^ | sed -e 's/@preamble@/# This file is in systemd EnvironmentFile format - see man systemd.exec/' > $@
+else
$(fixupcmd) $^ | sed -n -e 's/@preamble@//' -e '/^#/{p;d;}' -e '/^$$/{p;d;}' -e 's/^\([^=]*\)\(=.*\)$$/\1\2 ; export \1/ ; p' > $@
$(fixupcmd) $(srcdir)/ldap/admin/src/initconfig.in >> $@
+endif
%/template-initconfig: %/template-initconfig.in
if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi
+if SYSTEMD
+ $(fixupcmd) $^ | sed -e 's/@preamble@/# This file is in systemd EnvironmentFile format - see man systemd.exec/' > $@
+else
$(fixupcmd) $^ | sed -n -e 's/@preamble@//' -e '/^#/{p;d;}' -e '/^$$/{p;d;}' -e 's/^\([^=]*\)\(=.*\)$$/\1\2 ; export \1/ ; p' > $@
endif
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index 16406d795..6edd1ebf3 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -741,6 +741,8 @@ exit 0
%{_libexecdir}/%{pkgname}/ds_selinux_enabled
%{_libexecdir}/%{pkgname}/ds_selinux_port_query
%if %{use_perl}
+%config(noreplace)%{_sysconfdir}/%{pkgname}/config/template-initconfig
+%{_mandir}/man5/template-initconfig.5.gz
%{_datadir}/%{pkgname}/properties/*.res
%{_datadir}/%{pkgname}/script-templates
%{_datadir}/%{pkgname}/updates
| 0 |
af9bb7206aa39bb0c4c6fdbaa2653391f120671f
|
389ds/389-ds-base
|
Bump version to 1.4.1.1
|
commit af9bb7206aa39bb0c4c6fdbaa2653391f120671f
Author: Mark Reynolds <[email protected]>
Date: Wed Jan 30 15:08:52 2019 -0500
Bump version to 1.4.1.1
diff --git a/VERSION.sh b/VERSION.sh
index 2f8db5740..4acdeb6b8 100644
--- a/VERSION.sh
+++ b/VERSION.sh
@@ -10,7 +10,7 @@ vendor="389 Project"
# PACKAGE_VERSION is constructed from these
VERSION_MAJOR=1
VERSION_MINOR=4
-VERSION_MAINT=1.0
+VERSION_MAINT=1.1
# NOTE: VERSION_PREREL is automatically set for builds made out of a git tree
VERSION_PREREL=
VERSION_DATE=$(date -u +%Y%m%d)
| 0 |
bf46ccec94da41616b7fa1bd5ac39c970b367846
|
389ds/389-ds-base
|
Issue 4272 RFE - add support for gost-yescrypt for hashing passwords (#4497)
Bug Description: The state standard of Russian Federation requires
strong password hashes relied on GOST R 34.11-2012 (also known as
Streebog[0]) hash function.
Fix Description: One of the implementations of Streebog hash function
was made by libxcrypt, which has come as the replacement of glibc's
libcrypt. This means that several of the pwdstorage plugins have already
linked against libxcrypt.
From libxcrypt docs:
gost-yescrypt uses the output from the yescrypt hashing method in
place of a hmac message. Thus, the yescrypt crypto properties
are superseeded by the GOST R 34.11-2012 (Streebog) hash function
with a 256 bit digest.
[0]: https://tools.ietf.org/html/rfc6986
fixes: #4272
Reviewed by: @Firstyear, @mreynolds389 (Thanks!)
|
commit bf46ccec94da41616b7fa1bd5ac39c970b367846
Author: Stanislav Levin <[email protected]>
Date: Tue Dec 15 02:49:34 2020 +0300
Issue 4272 RFE - add support for gost-yescrypt for hashing passwords (#4497)
Bug Description: The state standard of Russian Federation requires
strong password hashes relied on GOST R 34.11-2012 (also known as
Streebog[0]) hash function.
Fix Description: One of the implementations of Streebog hash function
was made by libxcrypt, which has come as the replacement of glibc's
libcrypt. This means that several of the pwdstorage plugins have already
linked against libxcrypt.
From libxcrypt docs:
gost-yescrypt uses the output from the yescrypt hashing method in
place of a hmac message. Thus, the yescrypt crypto properties
are superseeded by the GOST R 34.11-2012 (Streebog) hash function
with a 256 bit digest.
[0]: https://tools.ietf.org/html/rfc6986
fixes: #4272
Reviewed by: @Firstyear, @mreynolds389 (Thanks!)
diff --git a/Makefile.am b/Makefile.am
index 99897f3ca..2bd136202 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1582,7 +1582,9 @@ libpwdstorage_plugin_la_SOURCES = ldap/servers/plugins/pwdstorage/clear_pwd.c \
ldap/servers/plugins/pwdstorage/sha_pwd.c \
ldap/servers/plugins/pwdstorage/smd5_pwd.c \
ldap/servers/plugins/pwdstorage/ssha_pwd.c \
- ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
+ ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c \
+ ldap/servers/plugins/pwdstorage/gost_yescrypt.c \
+ $(NULLSTRING)
libpwdstorage_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS)
libpwdstorage_plugin_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK) $(LIBCRYPT)
diff --git a/dirsrvtests/tests/suites/password/pwd_algo_test.py b/dirsrvtests/tests/suites/password/pwd_algo_test.py
index 5f4c2080a..2eb0ff362 100644
--- a/dirsrvtests/tests/suites/password/pwd_algo_test.py
+++ b/dirsrvtests/tests/suites/password/pwd_algo_test.py
@@ -125,13 +125,16 @@ def _test_algo_for_pbkdf2(inst, algo_name):
ALGO_SET = ('CLEAR', 'CRYPT', 'CRYPT-MD5', 'CRYPT-SHA256', 'CRYPT-SHA512',
'MD5', 'SHA', 'SHA256', 'SHA384', 'SHA512', 'SMD5', 'SSHA',
- 'SSHA256', 'SSHA384', 'SSHA512', 'PBKDF2_SHA256', 'DEFAULT',)
+ 'SSHA256', 'SSHA384', 'SSHA512', 'PBKDF2_SHA256', 'DEFAULT',
+ 'GOST_YESCRYPT',
+)
if default_paths.rust_enabled and ds_is_newer('1.4.3.0'):
ALGO_SET = ('CLEAR', 'CRYPT', 'CRYPT-MD5', 'CRYPT-SHA256', 'CRYPT-SHA512',
'MD5', 'SHA', 'SHA256', 'SHA384', 'SHA512', 'SMD5', 'SSHA',
'SSHA256', 'SSHA384', 'SSHA512', 'PBKDF2_SHA256', 'DEFAULT',
- 'PBKDF2-SHA1', 'PBKDF2-SHA256', 'PBKDF2-SHA512',)
+ 'PBKDF2-SHA1', 'PBKDF2-SHA256', 'PBKDF2-SHA512', 'GOST_YESCRYPT',
+ )
@pytest.mark.parametrize("algo", ALGO_SET)
def test_pwd_algo_test(topology_st, algo):
diff --git a/ldap/ldif/template-dse-minimal.ldif.in b/ldap/ldif/template-dse-minimal.ldif.in
index 1a05f4a67..2eccae9b2 100644
--- a/ldap/ldif/template-dse-minimal.ldif.in
+++ b/ldap/ldif/template-dse-minimal.ldif.in
@@ -194,6 +194,15 @@ nsslapd-pluginarg1: nsds5ReplicaCredentials
nsslapd-pluginid: aes-storage-scheme
nsslapd-pluginprecedence: 1
+dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config
+objectclass: top
+objectclass: nsSlapdPlugin
+cn: GOST_YESCRYPT
+nsslapd-pluginpath: libpwdstorage-plugin
+nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init
+nsslapd-plugintype: pwdstoragescheme
+nsslapd-pluginenabled: on
+
dn: cn=Syntax Validation Task,cn=plugins,cn=config
objectclass: top
objectclass: nsSlapdPlugin
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
index b09fb3893..b255b4dec 100644
--- a/ldap/ldif/template-dse.ldif.in
+++ b/ldap/ldif/template-dse.ldif.in
@@ -242,6 +242,15 @@ nsslapd-pluginarg2: nsds5ReplicaBootstrapCredentials
nsslapd-pluginid: aes-storage-scheme
nsslapd-pluginprecedence: 1
+dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config
+objectclass: top
+objectclass: nsSlapdPlugin
+cn: GOST_YESCRYPT
+nsslapd-pluginpath: libpwdstorage-plugin
+nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init
+nsslapd-plugintype: pwdstoragescheme
+nsslapd-pluginenabled: on
+
dn: cn=Syntax Validation Task,cn=plugins,cn=config
objectclass: top
objectclass: nsSlapdPlugin
diff --git a/ldap/servers/plugins/pwdstorage/gost_yescrypt.c b/ldap/servers/plugins/pwdstorage/gost_yescrypt.c
new file mode 100644
index 000000000..2af1c2919
--- /dev/null
+++ b/ldap/servers/plugins/pwdstorage/gost_yescrypt.c
@@ -0,0 +1,64 @@
+/** BEGIN COPYRIGHT BLOCK
+ * License: GPL (version 3 or any later version).
+ * See LICENSE for details.
+ * END COPYRIGHT BLOCK **/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <crypt.h>
+#include <errno.h>
+
+#include "pwdstorage.h"
+
+int
+gost_yescrypt_pw_cmp(const char *userpwd, const char *dbpwd)
+{
+ /* return 0 If the passwords match, return 1 if passwords do not match. */
+ int rc = 1;
+ char *hash;
+ struct crypt_data output = {0};
+
+ hash = crypt_rn(userpwd, dbpwd, &output, (int) sizeof(output));
+ if (!hash) {
+ slapi_log_err(SLAPI_LOG_ERR, GOST_YESCRYPT_SCHEME_NAME,
+ "Unable to hash userpwd value: %d\n", errno);
+ return rc;
+ }
+
+ if (slapi_ct_memcmp(hash, dbpwd, strlen(dbpwd)) == 0) {
+ rc = 0;
+ }
+
+ return rc;
+}
+
+char *
+gost_yescrypt_pw_enc(const char *pwd)
+{
+ const char *prefix = "$gy$";
+ char salt[CRYPT_GENSALT_OUTPUT_SIZE];
+ char *hash;
+ char *enc = NULL;
+ struct crypt_data output = {0};
+
+ /* 0 - means default, in Y2020 it defaults to 5 */
+ if (!crypt_gensalt_rn(prefix, 0, NULL, 0, salt, (int) sizeof(salt))) {
+ slapi_log_err(SLAPI_LOG_ERR, GOST_YESCRYPT_SCHEME_NAME,
+ "Unable to generate salt: %d\n", errno);
+ return NULL;
+ }
+
+ hash = crypt_rn(pwd, salt, &output, (int) sizeof(output));
+ if (!hash) {
+ slapi_log_err(SLAPI_LOG_ERR, GOST_YESCRYPT_SCHEME_NAME,
+ "Unable to hash pwd value: %d\n", errno);
+ return NULL;
+ }
+ enc = slapi_ch_smprintf("%c%s%c%s", PWD_HASH_PREFIX_START,
+ GOST_YESCRYPT_SCHEME_NAME, PWD_HASH_PREFIX_END,
+ hash);
+
+ return enc;
+}
diff --git a/ldap/servers/plugins/pwdstorage/pwd_init.c b/ldap/servers/plugins/pwdstorage/pwd_init.c
index 8efe4d6b3..606e63404 100644
--- a/ldap/servers/plugins/pwdstorage/pwd_init.c
+++ b/ldap/servers/plugins/pwdstorage/pwd_init.c
@@ -52,6 +52,8 @@ static Slapi_PluginDesc smd5_pdesc = {"smd5-password-storage-scheme", VENDOR, DS
static Slapi_PluginDesc pbkdf2_sha256_pdesc = {"pbkdf2-sha256-password-storage-scheme", VENDOR, DS_PACKAGE_VERSION, "Salted PBKDF2 SHA256 hash algorithm (PBKDF2_SHA256)"};
+static Slapi_PluginDesc gost_yescrypt_pdesc = {"gost-yescrypt-password-storage-scheme", VENDOR, DS_PACKAGE_VERSION, "Yescrypt KDF algorithm (Streebog256)"};
+
static char *plugin_name = "NSPwdStoragePlugin";
int
@@ -428,3 +430,20 @@ pbkdf2_sha256_pwd_storage_scheme_init(Slapi_PBlock *pb)
slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "<= pbkdf2_sha256_pwd_storage_scheme_init %d\n", rc);
return rc;
}
+
+int
+gost_yescrypt_pwd_storage_scheme_init(Slapi_PBlock *pb)
+{
+ int rc;
+
+ slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "=> gost_yescrypt_pwd_storage_scheme_init\n");
+
+ rc = slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION, (void *)SLAPI_PLUGIN_VERSION_01);
+ rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION, (void *)&gost_yescrypt_pdesc);
+ rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_ENC_FN, (void *)gost_yescrypt_pw_enc);
+ rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *)gost_yescrypt_pw_cmp);
+ rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, GOST_YESCRYPT_SCHEME_NAME);
+
+ slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "<= gost_yescrypt_pwd_storage_scheme_init %d\n", rc);
+ return rc;
+}
diff --git a/ldap/servers/plugins/pwdstorage/pwdstorage.h b/ldap/servers/plugins/pwdstorage/pwdstorage.h
index 7b46bb929..7d0e45866 100644
--- a/ldap/servers/plugins/pwdstorage/pwdstorage.h
+++ b/ldap/servers/plugins/pwdstorage/pwdstorage.h
@@ -54,6 +54,7 @@
#define SALTED_MD5_NAME_LEN 4
#define PBKDF2_SHA256_SCHEME_NAME "PBKDF2_SHA256"
#define PBKDF2_SHA256_NAME_LEN 13
+#define GOST_YESCRYPT_SCHEME_NAME "GOST_YESCRYPT"
SECStatus sha_salted_hash(char *hash_out, const char *pwd, struct berval *salt, unsigned int secOID);
@@ -84,6 +85,8 @@ int md5_pw_cmp(const char *userpwd, const char *dbpwd);
char *md5_pw_enc(const char *pwd);
int smd5_pw_cmp(const char *userpwd, const char *dbpwd);
char *smd5_pw_enc(const char *pwd);
+int gost_yescrypt_pw_cmp(const char *userpwd, const char *dbpwd);
+char *gost_yescrypt_pw_enc(const char *pwd);
int pbkdf2_sha256_start(Slapi_PBlock *pb);
int pbkdf2_sha256_close(Slapi_PBlock *pb);
diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
index 0f341423d..f4bd21392 100644
--- a/ldap/servers/slapd/fedse.c
+++ b/ldap/servers/slapd/fedse.c
@@ -204,6 +204,19 @@ static const char *internal_entries[] =
"nsslapd-pluginVendor: 389 Project\n"
"nsslapd-pluginDescription: CRYPT-SHA512\n",
+ "dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config\n"
+ "objectclass: top\n"
+ "objectclass: nsSlapdPlugin\n"
+ "cn: GOST_YESCRYPT\n"
+ "nsslapd-pluginpath: libpwdstorage-plugin\n"
+ "nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init\n"
+ "nsslapd-plugintype: pwdstoragescheme\n"
+ "nsslapd-pluginenabled: on\n"
+ "nsslapd-pluginId: GOST_YESCRYPT\n"
+ "nsslapd-pluginVersion: none\n"
+ "nsslapd-pluginVendor: 389 Project\n"
+ "nsslapd-pluginDescription: GOST_YESCRYPT\n",
+
#ifdef RUST_ENABLE
"dn: cn=PBKDF2,cn=Password Storage Schemes,cn=plugins,cn=config\n"
"objectclass: top\n"
diff --git a/src/cockpit/389-console/src/lib/database/globalPwp.jsx b/src/cockpit/389-console/src/lib/database/globalPwp.jsx
index c3374037f..86fdb7306 100644
--- a/src/cockpit/389-console/src/lib/database/globalPwp.jsx
+++ b/src/cockpit/389-console/src/lib/database/globalPwp.jsx
@@ -1085,6 +1085,7 @@ export class GlobalPwPolicy extends React.Component {
<option>CRYPT-SHA512</option>
<option>CRYPT-SHA256</option>
<option>CRYPT</option>
+ <option>GOST_YESCRYPT</option>
<option>CLEAR</option>
</select>
</Col>
diff --git a/src/cockpit/389-console/src/lib/database/localPwp.jsx b/src/cockpit/389-console/src/lib/database/localPwp.jsx
index ad95b386c..d2bbf0306 100644
--- a/src/cockpit/389-console/src/lib/database/localPwp.jsx
+++ b/src/cockpit/389-console/src/lib/database/localPwp.jsx
@@ -128,6 +128,7 @@ class CreatePolicy extends React.Component {
<option>CRYPT-SHA512</option>
<option>CRYPT-SHA256</option>
<option>CRYPT</option>
+ <option>GOST_YESCRYPT</option>
<option>CLEAR</option>
</select>
</Col>
@@ -2103,6 +2104,7 @@ export class LocalPwPolicy extends React.Component {
<option>CRYPT-SHA512</option>
<option>CRYPT-SHA256</option>
<option>CRYPT</option>
+ <option>GOST_YESCRYPT</option>
<option>CLEAR</option>
</select>
</Col>
diff --git a/src/cockpit/389-console/src/lib/server/settings.jsx b/src/cockpit/389-console/src/lib/server/settings.jsx
index 57e97f66d..bf1a9885b 100644
--- a/src/cockpit/389-console/src/lib/server/settings.jsx
+++ b/src/cockpit/389-console/src/lib/server/settings.jsx
@@ -1103,6 +1103,7 @@ export class ServerSettings extends React.Component {
<option>CRYPT-SHA512</option>
<option>CRYPT-SHA256</option>
<option>CRYPT</option>
+ <option>GOST_YESCRYPT</option>
<option>CLEAR</option>
</select>
</Col>
diff --git a/src/lib389/lib389/config.py b/src/lib389/lib389/config.py
index 6a50f5426..946f8d430 100644
--- a/src/lib389/lib389/config.py
+++ b/src/lib389/lib389/config.py
@@ -209,7 +209,7 @@ class Config(DSLdapObject):
yield report
def _lint_passwordscheme(self):
- allowed_schemes = ['SSHA512', 'PBKDF2_SHA256']
+ allowed_schemes = ['SSHA512', 'PBKDF2_SHA256', 'GOST_YESCRYPT']
u_password_scheme = self.get_attr_val_utf8('passwordStorageScheme')
u_root_scheme = self.get_attr_val_utf8('nsslapd-rootpwstoragescheme')
if u_root_scheme not in allowed_schemes or u_password_scheme not in allowed_schemes:
| 0 |
51b1b836421b5d1a57ffacc98e6004dfa7238162
|
389ds/389-ds-base
|
Ticket 47525 - Fix memory leak
Coverity 12411 reported memory leak.
https://fedorahosted.org/389/ticket/47525
Reviewed by: richm(Thanks!)
|
commit 51b1b836421b5d1a57ffacc98e6004dfa7238162
Author: Mark Reynolds <[email protected]>
Date: Mon Nov 25 09:24:35 2013 -0500
Ticket 47525 - Fix memory leak
Coverity 12411 reported memory leak.
https://fedorahosted.org/389/ticket/47525
Reviewed by: richm(Thanks!)
diff --git a/ldap/servers/plugins/memberof/memberof_config.c b/ldap/servers/plugins/memberof/memberof_config.c
index 0e749286b..e7e807bd8 100644
--- a/ldap/servers/plugins/memberof/memberof_config.c
+++ b/ldap/servers/plugins/memberof/memberof_config.c
@@ -698,6 +698,7 @@ memberof_shared_config_validate(Slapi_PBlock *pb)
}
bail:
+ slapi_mods_free(&smods);
slapi_entry_free(e);
return ret;
| 0 |
bd2c0d0b6a499d7f91a36e8a9feb83e4fbd3dac5
|
389ds/389-ds-base
|
Ticket #48146 - async simple paged results issue
Description: commit 5060d11f5039efa8534a8b65392ac6e10cbd2168 introduced
a regression. Handling backend "be" should be done after checking OID
MTN_CONTROL_USE_ONE_BACKEND_EXT_OID in the request control.
https://fedorahosted.org/389/ticket/48146
Reviewed by [email protected] (Thank you, Mark!!)
|
commit bd2c0d0b6a499d7f91a36e8a9feb83e4fbd3dac5
Author: Noriko Hosoi <[email protected]>
Date: Tue May 26 10:41:03 2015 -0700
Ticket #48146 - async simple paged results issue
Description: commit 5060d11f5039efa8534a8b65392ac6e10cbd2168 introduced
a regression. Handling backend "be" should be done after checking OID
MTN_CONTROL_USE_ONE_BACKEND_EXT_OID in the request control.
https://fedorahosted.org/389/ticket/48146
Reviewed by [email protected] (Thank you, Mark!!)
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index 1d454b75d..bb053c1d3 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -408,57 +408,12 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
*/
operation_set_target_spec (pb->pb_op, basesdn);
- if (be_name == NULL)
- {
- /* no specific backend was requested, use the mapping tree
- */
- err_code = slapi_mapping_tree_select_all(pb, be_list, referral_list, errorbuf);
- if (((err_code != LDAP_SUCCESS) && (err_code != LDAP_OPERATIONS_ERROR) && (err_code != LDAP_REFERRAL))
- || ((err_code == LDAP_OPERATIONS_ERROR) && (be_list[0] == NULL)))
- {
- send_ldap_result(pb, err_code, NULL, errorbuf, 0, NULL);
- rc = -1;
- goto free_and_return;
- }
- if (be_list[0] != NULL)
- {
- index = 0;
- if (pr_be) { /* PAGED RESULT: be is found from the previous paging. */
- /* move the index in the be_list which matches pr_be */
- while (be_list[index] && be_list[index+1] && pr_be != be_list[index])
- index++;
- } else {
- while (be_list[index] && be_list[index+1])
- index++;
- }
- /* "be" is either pr_be or the last backend */
- be = be_list[index];
- }
- else
- be = pr_be?pr_be:NULL;
- }
- else
- {
- /* specific backend be_name was requested, use slapi_be_select_by_instance_name
- */
- if (pr_be) {
- be_single = be = pr_be;
- } else {
- be_single = be = slapi_be_select_by_instance_name(be_name);
- }
- if (be_single)
- slapi_be_Rlock(be_single);
- be_list[0] = NULL;
- referral_list[0] = NULL;
- referral = NULL;
- }
-
- /* this is time to check if mapping tree specific control
- * was used to specify that we want to parse only
- * one backend
+ /*
+ * this is time to check if mapping tree specific control was used to
+ * specify that we want to parse only one backend.
*/
slapi_pblock_get(pb, SLAPI_REQCONTROLS, &ctrlp);
- if (NULL != ctrlp)
+ if (ctrlp)
{
if (slapi_control_present(ctrlp, MTN_CONTROL_USE_ONE_BACKEND_EXT_OID,
&ctl_value, &iscritical))
@@ -513,7 +468,57 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
}
}
}
+ }
+
+ if (be_name == NULL)
+ {
+ /* no specific backend was requested, use the mapping tree
+ */
+ err_code = slapi_mapping_tree_select_all(pb, be_list, referral_list, errorbuf);
+ if (((err_code != LDAP_SUCCESS) && (err_code != LDAP_OPERATIONS_ERROR) && (err_code != LDAP_REFERRAL))
+ || ((err_code == LDAP_OPERATIONS_ERROR) && (be_list[0] == NULL)))
+ {
+ send_ldap_result(pb, err_code, NULL, errorbuf, 0, NULL);
+ rc = -1;
+ goto free_and_return;
+ }
+ if (be_list[0] != NULL)
+ {
+ index = 0;
+ if (pr_be) { /* PAGED RESULT: be is found from the previous paging. */
+ /* move the index in the be_list which matches pr_be */
+ while (be_list[index] && be_list[index+1] && pr_be != be_list[index])
+ index++;
+ } else {
+ while (be_list[index] && be_list[index+1])
+ index++;
+ }
+ /* "be" is either pr_be or the last backend */
+ be = be_list[index];
+ }
+ else
+ be = pr_be?pr_be:NULL;
+ }
+ else
+ {
+ /* specific backend be_name was requested, use slapi_be_select_by_instance_name
+ */
+ if (pr_be) {
+ be_single = be = pr_be;
+ } else {
+ be_single = be = slapi_be_select_by_instance_name(be_name);
+ }
+ if (be_single) {
+ slapi_be_Rlock(be_single);
+ }
+ be_list[0] = NULL;
+ referral_list[0] = NULL;
+ referral = NULL;
+ }
+ /* Handle the rest of the controls. */
+ if (ctrlp)
+ {
if ( slapi_control_present (ctrlp, LDAP_CONTROL_GET_EFFECTIVE_RIGHTS,
&ctl_value, &iscritical) )
{
@@ -1024,10 +1029,11 @@ next_be:
}
free_and_return:
- if ((be_list[0] != NULL) || (referral_list[0] != NULL))
+ if ((be_list[0] != NULL) || (referral_list[0] != NULL)) {
slapi_mapping_tree_free_all(be_list, referral_list);
- else if (be_single)
+ } else if (be_single) {
slapi_be_Unlock(be_single);
+ }
free_and_return_nolock:
slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, &rc);
| 0 |
7d0cf1c59a7deda45e340a98fa3b98beb5c9ce54
|
389ds/389-ds-base
|
[155905] mrclone failure: race condition between force_checkpoint and dblayer_close
1) Let dblayer_force_checkpoint check if the db env is still good or not.
If it's already released, it just returns.
2) In _dblayer_delete_instance_dir, it actually does not need to invoke db
threads. Set DBLAYER_NO_DBTHREADS_MODE when calling dblayer_start.
|
commit 7d0cf1c59a7deda45e340a98fa3b98beb5c9ce54
Author: Noriko Hosoi <[email protected]>
Date: Mon Apr 25 16:24:23 2005 +0000
[155905] mrclone failure: race condition between force_checkpoint and dblayer_close
1) Let dblayer_force_checkpoint check if the db env is still good or not.
If it's already released, it just returns.
2) In _dblayer_delete_instance_dir, it actually does not need to invoke db
threads. Set DBLAYER_NO_DBTHREADS_MODE when calling dblayer_start.
diff --git a/ldap/servers/slapd/back-ldbm/archive.c b/ldap/servers/slapd/back-ldbm/archive.c
index b047c0d3d..08968eaac 100644
--- a/ldap/servers/slapd/back-ldbm/archive.c
+++ b/ldap/servers/slapd/back-ldbm/archive.c
@@ -322,7 +322,7 @@ int ldbm_back_ldbm2archive( Slapi_PBlock *pb )
/* start the database code up, do not attempt to perform recovery */
if (run_from_cmdline &&
- 0 != dblayer_start(li,DBLAYER_ARCHIVE_MODE|DBLAYER_CMDLINE_MODE)) {
+ 0 != dblayer_start(li,DBLAYER_ARCHIVE_MODE|DBLAYER_NO_DBTHREADS_MODE)) {
LDAPDebug(LDAP_DEBUG_ANY, "db2archive: Failed to init database\n",
0, 0, 0);
if (task) {
@@ -336,7 +336,7 @@ int ldbm_back_ldbm2archive( Slapi_PBlock *pb )
/* close the database down again */
if (run_from_cmdline &&
- 0 != dblayer_close(li,DBLAYER_ARCHIVE_MODE|DBLAYER_CMDLINE_MODE)) {
+ 0 != dblayer_close(li,DBLAYER_ARCHIVE_MODE|DBLAYER_NO_DBTHREADS_MODE)) {
LDAPDebug(LDAP_DEBUG_ANY, "db2archive: Failed to close database\n",
0, 0, 0);
if (task) {
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
index 1b7e48bbd..0a0e35539 100644
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
@@ -214,7 +214,7 @@ extern int ldbm_warn_if_no_db;
#define DBLAYER_INDEX_MODE 0x80
#define DBLAYER_CLEAN_RECOVER_MODE 0x100
-#define DBLAYER_CMDLINE_MODE 0x1000
+#define DBLAYER_NO_DBTHREADS_MODE 0x1000
#define DBLAYER_RESTORE_MASK (DBLAYER_RESTORE_MODE|DBLAYER_RESTORE_NO_RECOVERY_MODE)
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index 3d5b79295..0d7cc7036 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -1388,12 +1388,12 @@ int dblayer_start(struct ldbminfo *li, int dbmode)
case DBLAYER_RESTORE_MODE:
open_flags |= DB_RECOVER_FATAL;
open_flags &= ~DB_RECOVER; /* shouldn't set both */
- if (!(dbmode & DBLAYER_CMDLINE_MODE))
+ if (!(dbmode & DBLAYER_NO_DBTHREADS_MODE))
dbmode = DBLAYER_NORMAL_MODE; /* to restart helper threads */
break;
case DBLAYER_RESTORE_NO_RECOVERY_MODE:
open_flags &= ~(DB_RECOVER | DB_RECOVER_FATAL);
- if (!(dbmode & DBLAYER_CMDLINE_MODE))
+ if (!(dbmode & DBLAYER_NO_DBTHREADS_MODE))
dbmode = DBLAYER_NORMAL_MODE; /* to restart helper threads */
}
}
@@ -1547,7 +1547,9 @@ int dblayer_start(struct ldbminfo *li, int dbmode)
/* Now attempt to start up the checkpoint and deadlock threads */
- if ( (DBLAYER_NORMAL_MODE & dbmode ) && (0 == return_value)) {
+ /* note: need to be '==', not '&' to omit DBLAYER_NO_DBTHREADS_MODE */
+ if ( (DBLAYER_NORMAL_MODE == dbmode ) &&
+ (0 == return_value)) {
/* update the dbversion file */
dbversion_write(li, region_dir, NULL);
@@ -3934,16 +3936,13 @@ static int dblayer_force_checkpoint(struct ldbminfo *li)
dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
struct dblayer_private_env *pEnv;
- if (NULL == priv){
- /* already terminated. nothing to do */
+ if (NULL == priv || NULL == priv->dblayer_env){
+ /* already terminated. nothing to do */
return -1;
}
pEnv= priv->dblayer_env;
-
- PR_ASSERT(pEnv != NULL);
-
if (priv->dblayer_enable_transactions) {
LDAPDebug(LDAP_DEBUG_TRACE, "Checkpointing database ...\n", 0, 0, 0);
@@ -4008,7 +4007,8 @@ static int _dblayer_delete_instance_dir(ldbm_instance *inst, int startdb)
{
if (startdb)
{
- rval = dblayer_start(li, DBLAYER_NORMAL_MODE);
+ /* close immediately; no need to run db threads */
+ rval = dblayer_start(li, DBLAYER_NORMAL_MODE|DBLAYER_NO_DBTHREADS_MODE);
if (rval)
{
LDAPDebug(LDAP_DEBUG_ANY, "_dblayer_delete_instance_dir: dblayer_start failed! %s (%d)\n",
@@ -5402,7 +5402,8 @@ int dblayer_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task, char *
if (li->li_flags & TASK_RUNNING_FROM_COMMANDLINE)
{
- dbmode |= DBLAYER_CMDLINE_MODE;
+ /* command line mode; no need to run db threads */
+ dbmode |= DBLAYER_NO_DBTHREADS_MODE;
}
else /* on-line mode */
{
| 0 |
791e5aab53675ea883c94f69ad01acdfc57c086b
|
389ds/389-ds-base
|
Issue 49381 - Add docstrings to plugin test suites - Part 1
Description: Add and refactor the test case docstrings.
They should follow one exact format for communicating
with internal tool. Also it should be readable enough
to provide the main idea about the test case.
https://pagure.io/389-ds-base/issue/49381
Reviewed by: vashirov (Thanks!)
|
commit 791e5aab53675ea883c94f69ad01acdfc57c086b
Author: Simon Pichugin <[email protected]>
Date: Tue May 22 22:54:21 2018 +0200
Issue 49381 - Add docstrings to plugin test suites - Part 1
Description: Add and refactor the test case docstrings.
They should follow one exact format for communicating
with internal tool. Also it should be readable enough
to provide the main idea about the test case.
https://pagure.io/389-ds-base/issue/49381
Reviewed by: vashirov (Thanks!)
diff --git a/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py b/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py
index 03612d152..3a66f02ca 100644
--- a/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py
+++ b/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py
@@ -71,10 +71,24 @@ def test_acceptance(topology_m2):
"""Exercise each plugin and its main features, while
changing the configuration without restarting the server.
- Make sure that as configuration changes are made they take
- effect immediately. Cross plugin interaction (e.g. automember/memberOf)
- needs to tested, as well as plugin tasks. Need to test plugin
- config validation(dependencies, etc).
+ :id: 96136538-0151-4b09-9933-0e0cbf2c786c
+ :setup: 2 Master Instances
+ :steps:
+ 1. Pause all replication
+ 2. Set nsslapd-dynamic-plugins to on
+ 3. Try to update LDBM config entry
+ 4. Go through all plugin basic functionality
+ 5. Resume replication
+ 6. Go through all plugin basic functionality again
+ 7. Check that data in sync and replication is working
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
"""
m1 = topology_m2.ms["master1"]
@@ -126,9 +140,32 @@ def test_acceptance(topology_m2):
def test_memory_corruption(topology_m2):
- """Memory Corruption - Restart the plugins many times, and in different orders and test
- functionality, and stability. This will excerise the internal
- plugin linked lists, dse callbacks, and task handlers.
+ """Check the plugins for memory corruption issues while
+ dynamic plugins option is enabled
+
+ :id: 96136538-0151-4b09-9933-0e0cbf2c7862
+ :setup: 2 Master Instances
+ :steps:
+ 1. Pause all replication
+ 2. Set nsslapd-dynamic-plugins to on
+ 3. Try to update LDBM config entry
+ 4. Restart the plugin many times in a linked list fashion
+ restarting previous and preprevious plugins in the list of all plugins
+ 5. Run the functional test
+ 6. Repeat 4 and 5 steps for all plugins
+ 7. Resume replication
+ 8. Go through 4-6 steps once more
+ 9. Check that data in sync and replication is working
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
"""
@@ -205,11 +242,48 @@ def test_memory_corruption(topology_m2):
def test_stress(topology_m2):
- """Test dynamic plugins got
-
- Stress - Put the server under load that will trigger multiple plugins(MO, RI, DNA, etc)
- Restart various plugins while these operations are going on. Perform this test
- 5 times(stress_max_run).
+ """Test plugins while under a big load. Perform the test 5 times
+
+ :id: 96136538-0151-4b09-9933-0e0cbf2c7863
+ :setup: 2 Master Instances
+ :steps:
+ 1. Pause all replication
+ 2. Set nsslapd-dynamic-plugins to on
+ 3. Try to update LDBM config entry
+ 4. Do one run through all tests
+ 5. Enable Referential integrity and MemberOf plugins
+ 6. Launch three new threads to add a bunch of users
+ 7. While we are adding users restart the MemberOf and
+ Linked Attributes plugins many times
+ 8. Wait for the 'adding' threads to complete
+ 9. Now launch three threads to delete the users
+ 10. Restart both the MemberOf, Referential integrity and
+ Linked Attributes plugins during these deletes
+ 11. Wait for the 'deleting' threads to complete
+ 12. Now make sure both the MemberOf and Referential integrity plugins still work correctly
+ 13. Cleanup the stress tests (delete the group entry)
+ 14. Perform 4-13 steps five times
+ 15. Resume replication
+ 16. Go through 4-14 steps once more
+ 17. Check that data in sync and replication is working
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
+ 10. Success
+ 11. Success
+ 12. Success
+ 13. Success
+ 14. Success
+ 15. Success
+ 16. Success
+ 17. Success
"""
m1 = topology_m2.ms["master1"]
diff --git a/dirsrvtests/tests/suites/plugins/acceptance_test.py b/dirsrvtests/tests/suites/plugins/acceptance_test.py
index f7b66fe50..894c0ff25 100644
--- a/dirsrvtests/tests/suites/plugins/acceptance_test.py
+++ b/dirsrvtests/tests/suites/plugins/acceptance_test.py
@@ -47,11 +47,6 @@ else:
'''
-################################################################################
-#
-# Test Plugin Dependency
-#
-################################################################################
def check_dependency(inst, plugin, online=True):
"""Set the "account usability" plugin to depend on this plugin.
This plugin is generic, always enabled, and perfect for our testing
@@ -75,12 +70,37 @@ def check_dependency(inst, plugin, online=True):
inst.start()
-################################################################################
-#
-# Test Account Policy Plugin (0)
-#
-################################################################################
def test_acctpolicy(topo, args=None):
+ """Test Account policy basic functionality
+
+ :id: 9b87493b-0493-46f9-8364-6099d0e5d829
+ :setup: Standalone Instance
+ :steps:
+ 1. Enable the plugin
+ 2. Restart the instance
+ 3. Add a config entry for 'lastLoginTime'
+ 4. Add a user
+ 5. Bind as the user
+ 6. Check testLastLoginTime was added to the user
+ 7. Replace 'stateattrname': 'testLastLoginTime'
+ 8. Bind as the user
+ 9. Check testLastLoginTime was added to the user
+ 10. Check nsslapd-plugin-depends-on-named for the plugin
+ 11. Clean up
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
+ 10. Success
+ 11. Success
+ """
+
inst = topo[0]
# stop the plugin, and start it
@@ -162,12 +182,37 @@ def test_acctpolicy(topo, args=None):
return
-################################################################################
-#
-# Test Attribute Uniqueness Plugin (1)
-#
-################################################################################
def test_attruniq(topo, args=None):
+ """Test Attribute uniqueness basic functionality
+
+ :id: 9b87493b-0493-46f9-8364-6099d0e5d801
+ :setup: Standalone Instance
+ :steps:
+ 1. Enable the plugin
+ 2. Restart the instance
+ 3. Add a user: with 'mail' and 'mailAlternateAddress' attributes
+ 4. Replace 'uniqueness-attribute-name': 'cn'
+ 5. Try to add a user with the same 'cn'
+ 6. Replace 'uniqueness-attribute-name': 'mail'
+ 7. Try to add a user with the same 'mail'
+ 8. Add 'uniqueness-attribute-name': 'mailAlternateAddress'
+ 9. Try to add a user with the same 'mailAlternateAddress'
+ 10. Check nsslapd-plugin-depends-on-named for the plugin
+ 11. Clean up
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Should fail
+ 6. Success
+ 7. Should fail
+ 8. Success
+ 9. Should fail
+ 10. Success
+ 11. Success
+ """
+
inst = topo[0]
# stop the plugin, and start it
@@ -284,12 +329,50 @@ def test_attruniq(topo, args=None):
return
-################################################################################
-#
-# Test Auto Membership Plugin (2)
-#
-################################################################################
def test_automember(topo, args=None):
+ """Test Auto Membership basic functionality
+
+ :id: 9b87493b-0493-46f9-8364-6099d0e5d802
+ :setup: Standalone Instance
+ :steps:
+ 1. Enable the plugin
+ 2. Restart the instance
+ 3. Add a group
+ 4. Add two Organisation Units entries
+ 5. Add a config entry for the group and one branch
+ 6. Add a user that should get added to the group
+ 7. Check the entry is in group
+ 8. Set groupattr to 'uniquemember:dn' and scope to branch2
+ 9. Add a user that should get added to the group
+ 10. Check the group
+ 11. Disable plugin and restart
+ 12. Add an entry that should be picked up by automember
+ 13. Verify that the entry is not picked up by automember (yet)
+ 14. Check the group - uniquemember should not exist
+ 15. Enable plugin and restart
+ 16. Verify the fixup task worked
+ 17. Check nsslapd-plugin-depends-on-named for the plugin
+ 18. Clean up
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
+ 10. Success
+ 11. Success
+ 12. Success
+ 13. Success
+ 14. Success
+ 15. Success
+ 16. Success
+ 17. Success
+ 18. Success
+ """
+
inst = topo[0]
# stop the plugin, and start it
@@ -304,8 +387,6 @@ def test_automember(topo, args=None):
if args is None:
inst.restart()
- CONFIG_DN = 'cn=config,cn=' + PLUGIN_AUTOMEMBER + ',cn=plugins,cn=config'
-
log.info('Testing ' + PLUGIN_AUTOMEMBER + '...')
############################################################################
@@ -413,12 +494,38 @@ def test_automember(topo, args=None):
return
-################################################################################
-#
-# Test DNA Plugin (3)
-#
-################################################################################
def test_dna(topo, args=None):
+ """Test DNA basic functionality
+
+ :id: 9b87493b-0493-46f9-8364-6099d0e5d803
+ :setup: Standalone Instance
+ :steps:
+ 1. Enable the plugin
+ 2. Restart the instance
+ 3. Configure plugin for uidNumber
+ 4. Add a user
+ 5. See if the entry now has the new uidNumber assignment - uidNumber=1
+ 6. Test the magic regen value
+ 7. See if the entry now has the new uidNumber assignment - uidNumber=2
+ 8. Set 'dnaMagicRegen': '-2'
+ 9. Test the magic regen value
+ 10. See if the entry now has the new uidNumber assignment - uidNumber=3
+ 11. Check nsslapd-plugin-depends-on-named for the plugin
+ 12. Clean up
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
+ 10. Success
+ 12. Success
+ """
+
inst = topo[0]
# stop the plugin, and start it
@@ -507,12 +614,61 @@ def test_dna(topo, args=None):
return
-################################################################################
-#
-# Test Linked Attrs Plugin (4)
-#
-################################################################################
def test_linkedattrs(topo, args=None):
+ """Test Linked Attributes basic functionality
+
+ :id: 9b87493b-0493-46f9-8364-6099d0e5d804
+ :setup: Standalone Instance
+ :steps:
+ 1. Enable the plugin
+ 2. Restart the instance
+ 3. Add a config entry for directReport
+ 4. Add test entries
+ 5. Add the linked attrs config entry
+ 6. User1 - Set "directReport" to user2
+ 7. See if manager was added to the other entry
+ 8. User1 - Remove "directReport"
+ 9. See if manager was removed
+ 10. Change the config - using linkType "indirectReport" now
+ 11. Make sure the old linkType(directManager) is not working
+ 12. See if manager was added to the other entry, better not be...
+ 13. Now, set the new linkType "indirectReport", which should add "manager" to the other entry
+ 14. See if manager was added to the other entry, better not be
+ 15. Remove "indirectReport" should remove "manager" to the other entry
+ 16. See if manager was removed
+ 17. Disable plugin and make some updates that would of triggered the plugin
+ 18. The entry should not have a manager attribute
+ 19. Enable the plugin and rerun the task entry
+ 20. Add the task again
+ 21. Check if user2 now has a manager attribute now
+ 22. Check nsslapd-plugin-depends-on-named for the plugin
+ 23. Clean up
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
+ 10. Success
+ 11. Success
+ 12. Success
+ 13. Success
+ 14. Success
+ 15. Success
+ 16. Success
+ 17. Success
+ 18. Success
+ 19. Success
+ 20. Success
+ 21. Success
+ 22. Success
+ 23. Success
+ """
+
inst = topo[0]
# stop the plugin, and start it
@@ -642,12 +798,81 @@ def test_linkedattrs(topo, args=None):
return
-################################################################################
-#
-# Test MemberOf Plugin (5)
-#
-################################################################################
def test_memberof(topo, args=None):
+ """Test MemberOf basic functionality
+
+ :id: 9b87493b-0493-46f9-8364-6099d0e5d805
+ :setup: Standalone Instance
+ :steps:
+ 1. Enable the plugin
+ 2. Restart the instance
+ 3. Replace groupattr with 'member'
+ 4. Add our test entries
+ 5. Check if the user now has a "memberOf" attribute
+ 6. Remove "member" should remove "memberOf" from the entry
+ 7. Check that "memberOf" was removed
+ 8. Replace 'memberofgroupattr': 'uniquemember'
+ 9. Replace 'uniquemember': user1
+ 10. Check if the user now has a "memberOf" attribute
+ 11. Remove "uniquemember" should remove "memberOf" from the entry
+ 12. Check that "memberOf" was removed
+ 13. The shared config entry uses "member" - the above test uses "uniquemember"
+ 14. Delete the test entries then read them to start with a clean slate
+ 15. Check if the user now has a "memberOf" attribute
+ 16. Check that "memberOf" was removed
+ 17. Replace 'memberofgroupattr': 'uniquemember'
+ 18. Check if the user now has a "memberOf" attribute
+ 19. Remove "uniquemember" should remove "memberOf" from the entry
+ 20. Check that "memberOf" was removed
+ 21. Replace 'memberofgroupattr': 'member'
+ 22. Remove shared config from plugin
+ 23. Check if the user now has a "memberOf" attribute
+ 24. Remove "uniquemember" should remove "memberOf" from the entry
+ 25. Check that "memberOf" was removed
+ 26. First change the plugin to use uniquemember
+ 27. Add uniquemember, should not update user1
+ 28. Check for "memberOf"
+ 29. Enable memberof plugin
+ 30. Run the task and validate that it worked
+ 31. Check for "memberOf"
+ 32. Check nsslapd-plugin-depends-on-named for the plugin
+ 33. Clean up
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
+ 10. Success
+ 11. Success
+ 12. Success
+ 13. Success
+ 14. Success
+ 15. Success
+ 16. Success
+ 17. Success
+ 18. Success
+ 19. Success
+ 20. Success
+ 21. Success
+ 22. Success
+ 23. Success
+ 24. Success
+ 25. Success
+ 26. Success
+ 27. Success
+ 28. Success
+ 29. Success
+ 30. Success
+ 31. Success
+ 32. Success
+ 33. Success
+ """
+
inst = topo[0]
# stop the plugin, and start it
@@ -853,12 +1078,37 @@ def test_memberof(topo, args=None):
return
-################################################################################
-#
-# Test Managed Entry Plugin (6)
-#
-################################################################################
def test_mep(topo, args=None):
+ """Test Managed Entries basic functionality
+
+ :id: 9b87493b-0493-46f9-8364-6099d0e5d806
+ :setup: Standalone Instance
+ :steps:
+ 1. Enable the plugin
+ 2. Restart the instance
+ 3. Add our org units
+ 4. Set up config entry and template entry for the org units
+ 5. Add an entry that meets the MEP scope
+ 6. Check if a managed group entry was created
+ 7. Add a new template entry
+ 8. Add an entry that meets the MEP scope
+ 9. Check if a managed group entry was created
+ 10. Check nsslapd-plugin-depends-on-named for the plugin
+ 11. Clean up
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
+ 10. Success
+ 11. Success
+ """
+
inst = topo[0]
# stop the plugin, and start it
@@ -955,12 +1205,41 @@ def test_mep(topo, args=None):
return
-################################################################################
-#
-# Test Passthru Plugin (7)
-#
-################################################################################
def test_passthru(topo, args=None):
+ """Test Passthrough Authentication basic functionality
+
+ :id: 9b87493b-0493-46f9-8364-6099d0e5d807
+ :setup: Standalone Instance
+ :steps:
+ 1. Stop the plugin
+ 2. Restart the instance
+ 3. Create a second backend
+ 4. Create the top of the tree
+ 5. Add user to suffix1
+ 6. Configure and start plugin
+ 7. Login as user
+ 8. Login as root DN
+ 9. Replace 'nsslapd-pluginarg0': ldap uri for second instance
+ 10. Login as user
+ 11. Login as root DN
+ 12. Check nsslapd-plugin-depends-on-named for the plugin
+ 13. Clean up
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
+ 10. Success
+ 11. Success
+ 12. Success
+ 13. Success
+ """
+
inst1 = topo[0]
inst2 = topo[1]
@@ -1061,12 +1340,67 @@ def test_passthru(topo, args=None):
return
-################################################################################
-#
-# Test Referential Integrity Plugin (8)
-#
-################################################################################
def test_referint(topo, args=None):
+ """Test Referential Integrity basic functionality
+
+ :id: 9b87493b-0493-46f9-8364-6099d0e5d808
+ :setup: Standalone Instance
+ :steps:
+ 1. Enable the plugin
+ 2. Restart the instance
+ 3. Replace 'referint-membership-attr': 'member'
+ 4. Add some users and a group
+ 5. Grab the referint log file from the plugin
+ 6. Add shared config entry
+ 7. Delete one user
+ 8. Check for integrity
+ 9. Replace 'referint-membership-attr': 'uniquemember'
+ 10. Delete second user
+ 11. Check for integrity
+ 12. The shared config entry uses "member" - the above test used "uniquemember"
+ 13. Recreate users and a group
+ 14. Delete one user
+ 15. Check for integrity
+ 16. Change the shared config entry to use 'uniquemember' and test the plugin
+ 17. Delete second user
+ 18. Check for integrity
+ 19. First change the plugin to use member before we move the shared config that uses uniquemember
+ 20. Remove shared config from plugin
+ 21. Add test user
+ 22. Add user to group
+ 23. Delete a user
+ 24. Check for integrity
+ 25. Check nsslapd-plugin-depends-on-named for the plugin
+ 26. Clean up
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
+ 10. Success
+ 11. Success
+ 12. Success
+ 13. Success
+ 14. Success
+ 15. Success
+ 16. Success
+ 17. Success
+ 18. Success
+ 19. Success
+ 20. Success
+ 21. Success
+ 22. Success
+ 23. Success
+ 24. Success
+ 25. Success
+ 26. Success
+ """
+
inst = topo[0]
# stop the plugin, and start it
@@ -1207,12 +1541,35 @@ def test_referint(topo, args=None):
return
-################################################################################
-#
-# Test Retro Changelog Plugin (9)
-#
-################################################################################
def test_retrocl(topo, args=None):
+ """Test Retro Changelog basic functionality
+
+ :id: 9b87493b-0493-46f9-8364-6099d0e5d810
+ :setup: Standalone Instance
+ :steps:
+ 1. Enable the plugin
+ 2. Restart the instance
+ 3. Gather the current change count (it's not 1 once we start the stability tests)
+ 4. Add a user
+ 5. Check we logged this in the retro cl
+ 6. Change the config - disable plugin
+ 7. Delete the user
+ 8. Check we didn't log this in the retro cl
+ 9. Check nsslapd-plugin-depends-on-named for the plugin
+ 10. Clean up
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
+ 10. Success
+ """
+
inst = topo[0]
# stop the plugin, and start it
@@ -1297,12 +1654,39 @@ def _rootdn_restart(inst):
inst.state = DIRSRV_STATE_ONLINE
-################################################################################
-#
-# Test Root DN Access Control Plugin (10)
-#
-################################################################################
def test_rootdn(topo, args=None):
+ """Test Root DNA Access control basic functionality
+
+ :id: 9b87493b-0493-46f9-8364-6099d0e5d811
+ :setup: Standalone Instance
+ :steps:
+ 1. Enable the plugin
+ 2. Restart the instance
+ 3. Add an user and aci to open up cn=config
+ 4. Set an aci so we can modify the plugin after we deny the root dn
+ 5. Set allowed IP to an unknown host - blocks root dn
+ 6. Bind as Root DN
+ 7. Bind as the user who can make updates to the config
+ 8. Test that invalid plugin changes are rejected
+ 9. Remove the restriction
+ 10. Bind as Root DN
+ 11. Check nsslapd-plugin-depends-on-named for the plugin
+ 12. Clean up
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
+ 10. Success
+ 11. Success
+ 12. Success
+ """
+
inst = topo[0]
# stop the plugin, and start it
diff --git a/dirsrvtests/tests/suites/plugins/accpol_test.py b/dirsrvtests/tests/suites/plugins/accpol_test.py
index b6de0e608..69e45008f 100644
--- a/dirsrvtests/tests/suites/plugins/accpol_test.py
+++ b/dirsrvtests/tests/suites/plugins/accpol_test.py
@@ -319,16 +319,21 @@ def account_status(topology_st, suffix, subtree, userid, nousrs, ulimit, tochck)
def test_glact_inact(topology_st, accpol_global):
"""Verify if user account is inactivated when accountInactivityLimit is exceeded.
- :ID: 342af084-0ad0-442f-b6f6-5a8b8e5e4c28
- :feature: Account Policy Plugin
+ :id: 342af084-0ad0-442f-b6f6-5a8b8e5e4c28
:setup: Standalone instance, Global account policy plugin configuration,
set accountInactivityLimit to few secs.
- :steps: 1. Add few users to ou=people subtree in the default suffix
- 2. Check if users are active just before it reaches accountInactivityLimit.
- 3. User accounts should not be inactivated, expected 0
- 4. Check if users are inactivated when accountInactivityLimit is exceeded.
- 5. User accounts should be inactivated, expected error 19.
- :assert: Should return error code 19
+ :steps:
+ 1. Add few users to ou=people subtree in the default suffix
+ 2. Check if users are active just before it reaches accountInactivityLimit.
+ 3. User accounts should not be inactivated, expected 0
+ 4. Check if users are inactivated when accountInactivityLimit is exceeded.
+ 5. User accounts should be inactivated, expected error 19.
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Should return error code 19
"""
suffix = DEFAULT_SUFFIX
@@ -353,18 +358,25 @@ def test_glact_inact(topology_st, accpol_global):
def test_glremv_lastlogin(topology_st, accpol_global):
"""Verify if user account is inactivated by createTimeStamp, if lastLoginTime attribute is missing.
- :ID: 8ded5d8e-ed93-4c22-9c8e-78c479189f84
- :feature: Account Policy Plugin
+ :id: 8ded5d8e-ed93-4c22-9c8e-78c479189f84
:setup: Standalone instance, Global account policy plugin configuration,
set accountInactivityLimit to few secs.
- :steps: 1. Add few users to ou=people subtree in the default suffix
- 2. Wait for few secs and bind as user to create lastLoginTime attribute.
- 3. Remove the lastLoginTime attribute from the user.
- 4. Wait till accountInactivityLimit exceeded based on createTimeStamp value
- 5. Check if users are inactivated, expected error 19.
- 6. Replace lastLoginTime attribute and check if account is activated
- 7. User should be activated based on lastLoginTime attribute, expected 0
- :assert: Should return error code 19
+ :steps:
+ 1. Add few users to ou=people subtree in the default suffix
+ 2. Wait for few secs and bind as user to create lastLoginTime attribute.
+ 3. Remove the lastLoginTime attribute from the user.
+ 4. Wait till accountInactivityLimit exceeded based on createTimeStamp value
+ 5. Check if users are inactivated, expected error 19.
+ 6. Replace lastLoginTime attribute and check if account is activated
+ 7. User should be activated based on lastLoginTime attribute, expected 0
+ :assert:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Should return error code 19
"""
suffix = DEFAULT_SUFFIX
@@ -390,16 +402,21 @@ def test_glremv_lastlogin(topology_st, accpol_global):
def test_glact_login(topology_st, accpol_global):
"""Verify if user account can be activated by replacing the lastLoginTime attribute.
- :ID: f89897cc-c13e-4824-af08-3dd1039bab3c
- :feature: Account Policy Plugin
+ :id: f89897cc-c13e-4824-af08-3dd1039bab3c
:setup: Standalone instance, Global account policy plugin configuration,
set accountInactivityLimit to few secs.
- :steps: 1. Add few users to ou=groups subtree in the default suffix
- 2. Wait till accountInactivityLimit exceeded
- 3. Run ldapsearch as normal user, expected error 19.
- 4. Replace the lastLoginTime attribute and check if account is activated
- 5. Run ldapsearch as normal user, expected 0.
- :assert: Should return success once the user is activated
+ :steps:
+ 1. Add few users to ou=groups subtree in the default suffix
+ 2. Wait till accountInactivityLimit exceeded
+ 3. Run ldapsearch as normal user, expected error 19.
+ 4. Replace the lastLoginTime attribute and check if account is activated
+ 5. Run ldapsearch as normal user, expected 0.
+ :assert:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
"""
suffix = DEFAULT_SUFFIX
@@ -420,27 +437,43 @@ def test_glact_login(topology_st, accpol_global):
def test_glinact_limit(topology_st, accpol_global):
"""Verify if account policy plugin functions well when changing accountInactivityLimit value.
- :ID: 7fbc373f-a3d7-4774-8d34-89b057c5e74b
- :feature: Account Policy Plugin
+ :id: 7fbc373f-a3d7-4774-8d34-89b057c5e74b
:setup: Standalone instance, Global account policy plugin configuration,
set accountInactivityLimit to few secs.
- :steps: 1. Add few users to ou=groups subtree in the default suffix
- 2. Check if users are active just before reaching accountInactivityLimit
- 3. Modify AccountInactivityLimit to a bigger value
- 4. Wait for additional few secs, but check users before it reaches accountInactivityLimit
- 5. Wait till accountInactivityLimit exceeded and check users, expected error 19
- 6. Modify accountInactivityLimit to use the min value.
- 7. Add few users to ou=groups subtree in the default suffix
- 8. Wait till it reaches accountInactivityLimit and check users, expected error 19
- 9. Modify accountInactivityLimit to 10 times(30 secs) bigger than the initial value.
- 10. Add few users to ou=groups subtree in the default suffix
- 11. Wait for 90 secs and check if account is not inactivated, expected 0
- 12. Wait for +27 secs and check if account is not inactivated, expected 0
- 13. Wait for +30 secs and check if account is inactivated, error 19
- 14. Replace the lastLoginTime attribute and check if account is activated
- 15. Modify accountInactivityLimit to 12 secs, which is the default
- 16. Run ldapsearch as normal user, expected 0.
- :assert: Should return success once the user is activated
+ :steps:
+ 1. Add few users to ou=groups subtree in the default suffix
+ 2. Check if users are active just before reaching accountInactivityLimit
+ 3. Modify AccountInactivityLimit to a bigger value
+ 4. Wait for additional few secs, but check users before it reaches accountInactivityLimit
+ 5. Wait till accountInactivityLimit exceeded and check users, expected error 19
+ 6. Modify accountInactivityLimit to use the min value.
+ 7. Add few users to ou=groups subtree in the default suffix
+ 8. Wait till it reaches accountInactivityLimit and check users, expected error 19
+ 9. Modify accountInactivityLimit to 10 times(30 secs) bigger than the initial value.
+ 10. Add few users to ou=groups subtree in the default suffix
+ 11. Wait for 90 secs and check if account is not inactivated, expected 0
+ 12. Wait for +27 secs and check if account is not inactivated, expected 0
+ 13. Wait for +30 secs and check if account is inactivated, error 19
+ 14. Replace the lastLoginTime attribute and check if account is activated
+ 15. Modify accountInactivityLimit to 12 secs, which is the default
+ 16. Run ldapsearch as normal user, expected 0.
+ :assert:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
+ 10. Success
+ 11. Success
+ 12. Success
+ 13. Success
+ 14. Success
+ 15. Success
+ 16. Success
"""
suffix = DEFAULT_SUFFIX
@@ -483,29 +516,47 @@ def test_glinact_limit(topology_st, accpol_global):
def test_glnologin_attr(topology_st, accpol_global):
"""Verify if user account is inactivated based on createTimeStamp attribute, no lastLoginTime attribute present
- :ID: 3032f670-705d-4f69-96f5-d75445cffcfb
- :feature: Account Policy Plugin
+ :id: 3032f670-705d-4f69-96f5-d75445cffcfb
:setup: Standalone instance, Local account policy plugin configuration,
set accountInactivityLimit to few secs.
- :steps: 1. Configure Global account policy plugin with createTimestamp as stateattrname
- 2. lastLoginTime attribute will not be effective.
- 3. Add few users to ou=groups subtree in the default suffix
- 4. Wait for 10 secs and check if account is not inactivated, expected 0
- 5. Modify AccountInactivityLimit to 20 secs
- 6. Wait for +9 secs and check if account is not inactivated, expected 0
- 7. Wait for +3 secs and check if account is inactivated, error 19
- 8. Modify accountInactivityLimit to 3 secs
- 9. Add few users to ou=groups subtree in the default suffix
- 10. Wait for 3 secs and check if account is inactivated, error 19
- 11. Modify accountInactivityLimit to 30 secs
- 12. Add few users to ou=groups subtree in the default suffix
- 13. Wait for 90 secs and check if account is not inactivated, expected 0
- 14. Wait for +28 secs and check if account is not inactivated, expected 0
- 15. Wait for +2 secs and check if account is inactivated, error 19
- 16. Replace the lastLoginTime attribute and check if account is activated
- 17. Modify accountInactivityLimit to 12 secs, which is the default
- 18. Run ldapsearch as normal user, expected 0.
- :assert: Should return success once the user is activated
+ :steps:
+ 1. Configure Global account policy plugin with createTimestamp as stateattrname
+ 2. lastLoginTime attribute will not be effective.
+ 3. Add few users to ou=groups subtree in the default suffix
+ 4. Wait for 10 secs and check if account is not inactivated, expected 0
+ 5. Modify AccountInactivityLimit to 20 secs
+ 6. Wait for +9 secs and check if account is not inactivated, expected 0
+ 7. Wait for +3 secs and check if account is inactivated, error 19
+ 8. Modify accountInactivityLimit to 3 secs
+ 9. Add few users to ou=groups subtree in the default suffix
+ 10. Wait for 3 secs and check if account is inactivated, error 19
+ 11. Modify accountInactivityLimit to 30 secs
+ 12. Add few users to ou=groups subtree in the default suffix
+ 13. Wait for 90 secs and check if account is not inactivated, expected 0
+ 14. Wait for +28 secs and check if account is not inactivated, expected 0
+ 15. Wait for +2 secs and check if account is inactivated, error 19
+ 16. Replace the lastLoginTime attribute and check if account is activated
+ 17. Modify accountInactivityLimit to 12 secs, which is the default
+ 18. Run ldapsearch as normal user, expected 0.
+ :assert:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
+ 10. Success
+ 11. Success
+ 12. Success
+ 13. Success
+ 14. Success
+ 15. Success
+ 16. Success
+ 17. Success
+ 18. Success
"""
suffix = DEFAULT_SUFFIX
@@ -555,17 +606,23 @@ def test_glnologin_attr(topology_st, accpol_global):
def test_glnoalt_stattr(topology_st, accpol_global):
"""Verify if user account can be inactivated based on lastLoginTime attribute, altstateattrname set to 1.1
- :ID: 8dcc3540-578f-422a-bb44-28c2cf20dbcd
- :feature: Account Policy Plugin
+ :id: 8dcc3540-578f-422a-bb44-28c2cf20dbcd
:setup: Standalone instance, Global account policy plugin configuration,
set accountInactivityLimit to few secs.
- :steps: 1. Configure Global account policy plugin with altstateattrname to 1.1
- 2. Add few users to ou=groups subtree in the default suffix
- 3. Wait till it reaches accountInactivityLimit
- 4. Remove lastLoginTime attribute from the user entry
- 5. Run ldapsearch as normal user, expected 0. no lastLoginTime attribute present
- 6. Wait till it reaches accountInactivityLimit and check users, expected error 19
- :assert: Should return success once the user is activated
+ :steps:
+ 1. Configure Global account policy plugin with altstateattrname to 1.1
+ 2. Add few users to ou=groups subtree in the default suffix
+ 3. Wait till it reaches accountInactivityLimit
+ 4. Remove lastLoginTime attribute from the user entry
+ 5. Run ldapsearch as normal user, expected 0. no lastLoginTime attribute present
+ 6. Wait till it reaches accountInactivityLimit and check users, expected error 19
+ :assert:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
"""
suffix = DEFAULT_SUFFIX
@@ -594,18 +651,25 @@ def test_glnoalt_stattr(topology_st, accpol_global):
def test_glattr_modtime(topology_st, accpol_global):
"""Verify if user account can be inactivated based on modifyTimeStamp attribute
- :ID: 67380839-2966-45dc-848a-167a954153e1
- :feature: Account Policy Plugin
+ :id: 67380839-2966-45dc-848a-167a954153e1
:setup: Standalone instance, Global account policy plugin configuration,
set accountInactivityLimit to few secs.
- :steps: 1. Configure Global account policy plugin with altstateattrname to modifyTimestamp
- 2. Add few users to ou=groups subtree in the default suffix
- 3. Wait till the accountInactivityLimit exceeded and check users, expected error 19
- 4. Modify cn attribute for user, ModifyTimeStamp is updated.
- 5. Check if user is activated based on ModifyTimeStamp attribute, expected 0
- 6. Change the plugin to use createTimeStamp and remove lastLoginTime attribute
- 7. Check if account is inactivated, expected error 19
- :assert: Should return success once the user is activated
+ :steps:
+ 1. Configure Global account policy plugin with altstateattrname to modifyTimestamp
+ 2. Add few users to ou=groups subtree in the default suffix
+ 3. Wait till the accountInactivityLimit exceeded and check users, expected error 19
+ 4. Modify cn attribute for user, ModifyTimeStamp is updated.
+ 5. Check if user is activated based on ModifyTimeStamp attribute, expected 0
+ 6. Change the plugin to use createTimeStamp and remove lastLoginTime attribute
+ 7. Check if account is inactivated, expected error 19
+ :assert:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
"""
suffix = DEFAULT_SUFFIX
@@ -636,21 +700,31 @@ def test_glattr_modtime(topology_st, accpol_global):
def test_glnoalt_nologin(topology_st, accpol_global):
"""Verify if account policy plugin works if we set altstateattrname set to 1.1 and alwaysrecordlogin to NO
- :ID: 49eda7db-84de-47ba-8f81-ac5e4de3a500
- :feature: Account Policy Plugin
+ :id: 49eda7db-84de-47ba-8f81-ac5e4de3a500
:setup: Standalone instance, Global account policy plugin configuration,
set accountInactivityLimit to few secs.
- :steps: 1. Configure Global account policy plugin with altstateattrname to 1.1
- 2. Set alwaysrecordlogin to NO.
- 3. Add few users to ou=groups subtree in the default suffix
- 4. Wait till accountInactivityLimit exceeded and check users, expected 0
- 5. Check for lastLoginTime attribute, it should not be present
- 6. Wait for few more secs and check if account is not inactivated, expected 0
- 7. Run ldapsearch as normal user, expected 0. no lastLoginTime attribute present
- 8. Set altstateattrname to createTimeStamp
- 9. Check if user account is inactivated based on createTimeStamp attribute.
- 10. Account should be inactivated, expected error 19
- :assert: Should return success once the user is activated
+ :steps:
+ 1. Configure Global account policy plugin with altstateattrname to 1.1
+ 2. Set alwaysrecordlogin to NO.
+ 3. Add few users to ou=groups subtree in the default suffix
+ 4. Wait till accountInactivityLimit exceeded and check users, expected 0
+ 5. Check for lastLoginTime attribute, it should not be present
+ 6. Wait for few more secs and check if account is not inactivated, expected 0
+ 7. Run ldapsearch as normal user, expected 0. no lastLoginTime attribute present
+ 8. Set altstateattrname to createTimeStamp
+ 9. Check if user account is inactivated based on createTimeStamp attribute.
+ 10. Account should be inactivated, expected error 19
+ :assert:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
+ 10. Success
"""
suffix = DEFAULT_SUFFIX
@@ -684,20 +758,29 @@ def test_glnoalt_nologin(topology_st, accpol_global):
def test_glinact_nsact(topology_st, accpol_global):
"""Verify if user account can be activated using ns-activate.pl script.
- :ID: 876a7a7c-0b3f-4cd2-9b45-1dc80846e334
- :feature: Account Policy Plugin
+ :id: 876a7a7c-0b3f-4cd2-9b45-1dc80846e334
:setup: Standalone instance, Global account policy plugin configuration,
set accountInactivityLimit to few secs.
- :steps: 1. Configure Global account policy plugin
- 2. Add few users to ou=groups subtree in the default suffix
- 3. Wait for few secs and inactivate user using ns-inactivate.pl
- 4. Wait till accountInactivityLimit exceeded.
- 5. Run ldapsearch as normal user, expected error 19.
- 6. Activate user using ns-activate.pl script
- 7. Check if account is activated, expected error 19
- 8. Replace the lastLoginTime attribute and check if account is activated
- 9. Run ldapsearch as normal user, expected 0.
- :assert: Should return success once the user is activated
+ :steps:
+ 1. Configure Global account policy plugin
+ 2. Add few users to ou=groups subtree in the default suffix
+ 3. Wait for few secs and inactivate user using ns-inactivate.pl
+ 4. Wait till accountInactivityLimit exceeded.
+ 5. Run ldapsearch as normal user, expected error 19.
+ 6. Activate user using ns-activate.pl script
+ 7. Check if account is activated, expected error 19
+ 8. Replace the lastLoginTime attribute and check if account is activated
+ 9. Run ldapsearch as normal user, expected 0.
+ :assert:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
"""
suffix = DEFAULT_SUFFIX
@@ -724,18 +807,25 @@ def test_glinact_nsact(topology_st, accpol_global):
def test_glinact_acclock(topology_st, accpol_global):
"""Verify if user account is activated when account is unlocked by passwordlockoutduration.
- :ID: 43601a61-065c-4c80-a7c2-e4f6ae17beb8
- :feature: Account Policy Plugin
+ :id: 43601a61-065c-4c80-a7c2-e4f6ae17beb8
:setup: Standalone instance, Global account policy plugin configuration,
set accountInactivityLimit to few secs.
- :steps: 1. Add few users to ou=groups subtree in the default suffix
- 2. Wait for few secs and attempt invalid binds for user
- 3. User account should be locked based on Account Lockout policy.
- 4. Wait till accountInactivityLimit exceeded and check users, expected error 19
- 5. Wait for passwordlockoutduration and check if account is active
- 6. Check if account is unlocked, expected error 19, since account is inactivated
- 7. Replace the lastLoginTime attribute and check users, expected 0
- :assert: Should return success once the user is activated
+ :steps:
+ 1. Add few users to ou=groups subtree in the default suffix
+ 2. Wait for few secs and attempt invalid binds for user
+ 3. User account should be locked based on Account Lockout policy.
+ 4. Wait till accountInactivityLimit exceeded and check users, expected error 19
+ 5. Wait for passwordlockoutduration and check if account is active
+ 6. Check if account is unlocked, expected error 19, since account is inactivated
+ 7. Replace the lastLoginTime attribute and check users, expected 0
+ :assert:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
"""
suffix = DEFAULT_SUFFIX
@@ -773,20 +863,29 @@ def test_glinact_acclock(topology_st, accpol_global):
def test_glnact_pwexp(topology_st, accpol_global):
"""Verify if user account is activated when password is reset after password is expired
- :ID: 3bb97992-101a-4e5a-b60a-4cc21adcc76e
- :feature: Account Policy Plugin
+ :id: 3bb97992-101a-4e5a-b60a-4cc21adcc76e
:setup: Standalone instance, Global account policy plugin configuration,
set accountInactivityLimit to few secs.
- :steps: 1. Add few users to ou=groups subtree in the default suffix
- 2. Set passwordmaxage to few secs
- 3. Wait for passwordmaxage to reach and check if password expired
- 4. Run ldapsearch as normal user, expected error 19.
- 5. Reset the password for user account
- 6. Wait till accountInactivityLimit exceeded and check users
- 7. Run ldapsearch as normal user, expected error 19.
- 8. Replace the lastLoginTime attribute and check if account is activated
- 9. Run ldapsearch as normal user, expected 0.
- :assert: Should return success once the user is activated
+ :steps:
+ 1. Add few users to ou=groups subtree in the default suffix
+ 2. Set passwordmaxage to few secs
+ 3. Wait for passwordmaxage to reach and check if password expired
+ 4. Run ldapsearch as normal user, expected error 19.
+ 5. Reset the password for user account
+ 6. Wait till accountInactivityLimit exceeded and check users
+ 7. Run ldapsearch as normal user, expected error 19.
+ 8. Replace the lastLoginTime attribute and check if account is activated
+ 9. Run ldapsearch as normal user, expected 0.
+ :assert:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
"""
suffix = DEFAULT_SUFFIX
@@ -849,17 +948,23 @@ def test_glnact_pwexp(topology_st, accpol_global):
def test_locact_inact(topology_st, accpol_local):
"""Verify if user account is inactivated when accountInactivityLimit is exceeded.
- :ID: 02140e36-79eb-4d88-ba28-66478689289b
- :feature: Account Policy Plugin
+ :id: 02140e36-79eb-4d88-ba28-66478689289b
:setup: Standalone instance, ou=people subtree configured for Local account
policy plugin configuration, set accountInactivityLimit to few secs.
- :steps: 1. Add few users to ou=people subtree in the default suffix
- 2. Wait for few secs before it reaches accountInactivityLimit and check users.
- 3. Run ldapsearch as normal user, expected 0
- 4. Wait till accountInactivityLimit is exceeded
- 5. Run ldapsearch as normal user and check if its inactivated, expected error 19.
- 6. Replace user's lastLoginTime attribute and check if its activated, expected 0
- :assert: Should return error code 19
+ :steps:
+ 1. Add few users to ou=people subtree in the default suffix
+ 2. Wait for few secs before it reaches accountInactivityLimit and check users.
+ 3. Run ldapsearch as normal user, expected 0
+ 4. Wait till accountInactivityLimit is exceeded
+ 5. Run ldapsearch as normal user and check if its inactivated, expected error 19.
+ 6. Replace user's lastLoginTime attribute and check if its activated, expected 0
+ :assert:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Should return error code 19
"""
suffix = DEFAULT_SUFFIX
@@ -887,18 +992,25 @@ def test_locact_inact(topology_st, accpol_local):
def test_locinact_modrdn(topology_st, accpol_local):
"""Verify if user account is inactivated when moved from ou=groups to ou=people subtree.
- :ID: 5f25bea3-fab0-4db4-b43d-2d47cc6e5ad1
- :feature: Account Policy Plugin
+ :id: 5f25bea3-fab0-4db4-b43d-2d47cc6e5ad1
:setup: Standalone instance, ou=people subtree configured for Local account
policy plugin configuration, set accountInactivityLimit to few secs.
- :steps: 1. Add few users to ou=groups subtree in the default suffix
- 2. Plugin configured to ou=people subtree only.
- 3. Wait for few secs before it reaches accountInactivityLimit and check users.
- 4. Run ldapsearch as normal user, expected 0
- 5. Wait till accountInactivityLimit exceeded
- 6. Move users from ou=groups subtree to ou=people subtree
- 7. Check if users are inactivated, expected error 19
- :assert: Should return error code 0 and 19
+ :steps:
+ 1. Add few users to ou=groups subtree in the default suffix
+ 2. Plugin configured to ou=people subtree only.
+ 3. Wait for few secs before it reaches accountInactivityLimit and check users.
+ 4. Run ldapsearch as normal user, expected 0
+ 5. Wait till accountInactivityLimit exceeded
+ 6. Move users from ou=groups subtree to ou=people subtree
+ 7. Check if users are inactivated, expected error 19
+ :assert:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Should return error code 0 and 19
"""
suffix = DEFAULT_SUFFIX
@@ -930,16 +1042,21 @@ def test_locinact_modrdn(topology_st, accpol_local):
def test_locact_modrdn(topology_st, accpol_local):
"""Verify if user account is inactivated when users moved from ou=people to ou=groups subtree.
- :ID: e821cbae-bfc3-40d3-947d-b228c809987f
- :feature: Account Policy Plugin
+ :id: e821cbae-bfc3-40d3-947d-b228c809987f
:setup: Standalone instance, ou=people subtree configured for Local account
policy plugin configuration, set accountInactivityLimit to few secs.
- :steps: 1. Add few users to ou=people subtree in the default suffix
- 2. Wait for few secs and check if users not inactivated, expected 0.
- 3. Move users from ou=people to ou=groups subtree
- 4. Wait till accountInactivityLimit is exceeded
- 5. Check if users are active in ou=groups subtree, expected 0
- :assert: Should return error code 0
+ :steps:
+ 1. Add few users to ou=people subtree in the default suffix
+ 2. Wait for few secs and check if users not inactivated, expected 0.
+ 3. Move users from ou=people to ou=groups subtree
+ 4. Wait till accountInactivityLimit is exceeded
+ 5. Check if users are active in ou=groups subtree, expected 0
+ :assert:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
"""
suffix = DEFAULT_SUFFIX
| 0 |
00b81fed600b1ccef6bc87fcdaf562283e183002
|
389ds/389-ds-base
|
610281 - fix coverity Defect Type: Control flow issues
https://bugzilla.redhat.com/show_bug.cgi?id=610281
11832 DEADCODE Triaged Unassigned Bug Minor Fix Required
do_modrdn() ds/ldap/servers/slapd/modrdn.c
Comment:
At the line 201 and 212, the condition "rawnewsuperior" cannot be false.
201 rawnewsuperior?rawnewsuperior:"",
212 rawnewsuperior?rawnewsuperior:"",
This patch is removing the checks.
|
commit 00b81fed600b1ccef6bc87fcdaf562283e183002
Author: Noriko Hosoi <[email protected]>
Date: Thu Jul 8 11:03:36 2010 -0700
610281 - fix coverity Defect Type: Control flow issues
https://bugzilla.redhat.com/show_bug.cgi?id=610281
11832 DEADCODE Triaged Unassigned Bug Minor Fix Required
do_modrdn() ds/ldap/servers/slapd/modrdn.c
Comment:
At the line 201 and 212, the condition "rawnewsuperior" cannot be false.
201 rawnewsuperior?rawnewsuperior:"",
212 rawnewsuperior?rawnewsuperior:"",
This patch is removing the checks.
diff --git a/ldap/servers/slapd/modrdn.c b/ldap/servers/slapd/modrdn.c
index 6951fb053..b721fc79d 100644
--- a/ldap/servers/slapd/modrdn.c
+++ b/ldap/servers/slapd/modrdn.c
@@ -197,8 +197,7 @@ do_modrdn( Slapi_PBlock *pb )
/* check that the dn is formatted correctly */
err = slapi_dn_syntax_check(pb, rawnewsuperior, 1);
if (err) { /* syntax check failed */
- op_shared_log_error_access(pb, "MODRDN",
- rawnewsuperior?rawnewsuperior:"",
+ op_shared_log_error_access(pb, "MODRDN", rawnewsuperior,
"strict: invalid new superior");
send_ldap_result(pb, LDAP_INVALID_DN_SYNTAX,
NULL, "invalid new superior", 0, NULL);
@@ -208,8 +207,7 @@ do_modrdn( Slapi_PBlock *pb )
}
err = slapi_dn_normalize_ext(rawnewsuperior, 0, &newsuperior, &dnlen);
if (err < 0) {
- op_shared_log_error_access(pb, "MODRDN",
- rawnewsuperior?rawnewsuperior:"",
+ op_shared_log_error_access(pb, "MODRDN", rawnewsuperior,
"invalid new superior");
send_ldap_result(pb, LDAP_INVALID_DN_SYNTAX,
NULL, "invalid new superior", 0, NULL);
| 0 |
a9fa210431e8480bf4e2715826199a3c7f22018f
|
389ds/389-ds-base
|
Ticket 49857 - RPM scriptlet for 389-ds-base-legacy-tools throws an error
Bug Description: If no instances are present when the legacy tool
subpackage is installed the RPM scriplet throws an
error:
line 29: [: -eq: unary operator expected
Fix Description: The error occurs because a variable that counts the
existing instances is not initialized.
https://pagure.io/389-ds-base/issue/49857
Reviewed by: mreynolds (one line commit rule)
|
commit a9fa210431e8480bf4e2715826199a3c7f22018f
Author: Mark Reynolds <[email protected]>
Date: Wed Jul 18 09:31:42 2018 -0400
Ticket 49857 - RPM scriptlet for 389-ds-base-legacy-tools throws an error
Bug Description: If no instances are present when the legacy tool
subpackage is installed the RPM scriplet throws an
error:
line 29: [: -eq: unary operator expected
Fix Description: The error occurs because a variable that counts the
existing instances is not initialized.
https://pagure.io/389-ds-base/issue/49857
Reviewed by: mreynolds (one line commit rule)
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index 7d99feb7a..99fdabdb3 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -544,6 +544,7 @@ fi
echo looking for instances in %{_sysconfdir}/%{pkgname} > $output 2>&1 || :
instbase="%{_sysconfdir}/%{pkgname}"
+ninst=0
for dir in $instbase/slapd-* ; do
echo dir = $dir >> $output 2>&1 || :
if [ ! -d "$dir" ] ; then continue ; fi
| 0 |
ff6e852301aa394402a3986e8d591e375eaf2a00
|
389ds/389-ds-base
|
Issue 4322 - Fix a source link (#4332)
Description: Source0 should point to a local file instead of
a remote URL. We use it for testing/development only so
there is no need in external links.
Reviewed by: @Firstyear (Thanks!)
Fixes: #4322
|
commit ff6e852301aa394402a3986e8d591e375eaf2a00
Author: Simon Pichugin <[email protected]>
Date: Tue Sep 22 13:23:47 2020 +0200
Issue 4322 - Fix a source link (#4332)
Description: Source0 should point to a local file instead of
a remote URL. We use it for testing/development only so
there is no need in external links.
Reviewed by: @Firstyear (Thanks!)
Fixes: #4322
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index 95a901caf..27d19ef5b 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -177,7 +177,7 @@ Requires: cracklib-dicts
# Picks up our systemd deps.
%{?systemd_requires}
-Source0: https://github.com/389ds/%{name}/archive/%{name}-%{version}%{?prerel}.tar.gz
+Source0: %{name}-%{version}%{?prerel}.tar.bz2
# 389-ds-git.sh should be used to generate the source tarball from git
Source1: %{name}-git.sh
Source2: %{name}-devel.README
| 0 |
2d885470b8beff5e593413507b390ab32ddac891
|
389ds/389-ds-base
|
I noticed a problem with the autoconf style builds that shows up on 64 bit - some of the scripts had /usr/lib/mozldap6 in the PATH, and some had /usr/lib64/mozldap6. All of them should have lib64. What was happening is that make would create some script templates from the corresponding template .in file, and some it would just copy over (those that have both a template-script and a template-script.in). The template-script files have hardcoded /usr/lib/ - it requires the conversion from template-script.in to template-script to substitute /usr/lib64/ instead.
To solve this problem, I touched all of the *.in files so that they would have a later date than their corresponding template file, so that make would create the template-script from template-script.in. Make uses VPATH, and VPATH is used not only to look for dependencies, but also to find _targets_. So if the .in file is older than its template file, make will find ../ldap/admin/src/scripts/template-foo (the target) is newer than ../ldap/admin/src/scripts/template-foo (the dependency) and will think it is up to date. What should happen is that make should see that built/ldap/admin/src/scripts/template-foo does not exist, and create it from ../ldap/admin/src/scripts/template-foo.in, but apparently VPATH overrides that.
This problem will go away once we remove all of the template-script files and just create all of them from the template-script.in files.
|
commit 2d885470b8beff5e593413507b390ab32ddac891
Author: Rich Megginson <[email protected]>
Date: Tue Nov 28 17:13:48 2006 +0000
I noticed a problem with the autoconf style builds that shows up on 64 bit - some of the scripts had /usr/lib/mozldap6 in the PATH, and some had /usr/lib64/mozldap6. All of them should have lib64. What was happening is that make would create some script templates from the corresponding template .in file, and some it would just copy over (those that have both a template-script and a template-script.in). The template-script files have hardcoded /usr/lib/ - it requires the conversion from template-script.in to template-script to substitute /usr/lib64/ instead.
To solve this problem, I touched all of the *.in files so that they would have a later date than their corresponding template file, so that make would create the template-script from template-script.in. Make uses VPATH, and VPATH is used not only to look for dependencies, but also to find _targets_. So if the .in file is older than its template file, make will find ../ldap/admin/src/scripts/template-foo (the target) is newer than ../ldap/admin/src/scripts/template-foo (the dependency) and will think it is up to date. What should happen is that make should see that built/ldap/admin/src/scripts/template-foo does not exist, and create it from ../ldap/admin/src/scripts/template-foo.in, but apparently VPATH overrides that.
This problem will go away once we remove all of the template-script files and just create all of them from the template-script.in files.
| 0 |
7096094e0f26ad3f4acbbffeefc0a174a791686d
|
389ds/389-ds-base
|
Ticket 50077 - RFE - improve automember plugin to work with modify ops
Description:
Previously automember was only invoked for ADD operations. This enhancement
allows it to work with modify operations, and it will also maintain the
correct memberships. So if a modify changes which groups the user would
belong to, it will add the user to the new group, and remove them from the
old group.
https://pagure.io/389-ds-base/issue/50077
Reviewed by: spichugi & firstyear (Thanks!!)
|
commit 7096094e0f26ad3f4acbbffeefc0a174a791686d
Author: Mark Reynolds <[email protected]>
Date: Tue Dec 11 17:14:54 2018 -0500
Ticket 50077 - RFE - improve automember plugin to work with modify ops
Description:
Previously automember was only invoked for ADD operations. This enhancement
allows it to work with modify operations, and it will also maintain the
correct memberships. So if a modify changes which groups the user would
belong to, it will add the user to the new group, and remove them from the
old group.
https://pagure.io/389-ds-base/issue/50077
Reviewed by: spichugi & firstyear (Thanks!!)
diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
new file mode 100644
index 000000000..4ba3bc8b9
--- /dev/null
+++ b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
@@ -0,0 +1,142 @@
+import logging
+import pytest
+import os
+from lib389.utils import ds_is_older
+from lib389._constants import *
+from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions
+from lib389.idm.user import UserAccounts
+from lib389.idm.group import Groups
+from lib389.topologies import topology_st as topo
+
+# Skip on older versions
+pytestmark = pytest.mark.skipif(ds_is_older('1.4.0'), reason="Not implemented")
+
+DEBUGGING = os.getenv("DEBUGGING", default=False)
+if DEBUGGING:
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
+else:
+ logging.getLogger(__name__).setLevel(logging.INFO)
+log = logging.getLogger(__name__)
+
+
[email protected](scope="module")
+def automember_fixture(topo, request):
+ # Create group
+ groups = []
+ group_obj = Groups(topo.standalone, DEFAULT_SUFFIX)
+ groups.append(group_obj.create(properties={'cn': 'testgroup'}))
+ groups.append(group_obj.create(properties={'cn': 'testgroup2'}))
+ groups.append(group_obj.create(properties={'cn': 'testgroup3'}))
+
+ # Create test user
+ user_accts = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
+ user = user_accts.create_test_user()
+
+ # Create automember definitions and regex rules
+ automember_prop = {
+ 'cn': 'testgroup_definition',
+ 'autoMemberScope': DEFAULT_SUFFIX,
+ 'autoMemberFilter': 'objectclass=posixaccount',
+ 'autoMemberDefaultGroup': groups[0].dn,
+ 'autoMemberGroupingAttr': 'member:dn',
+ }
+ automembers = AutoMembershipDefinitions(topo.standalone)
+ auto_def = automembers.create(properties=automember_prop)
+ auto_def.add_regex_rule("regex1", groups[1].dn, include_regex=['cn=mark.*'])
+ auto_def.add_regex_rule("regex2", groups[2].dn, include_regex=['cn=simon.*'])
+
+ # Enable plugin
+ automemberplugin = AutoMembershipPlugin(topo.standalone)
+ automemberplugin.enable()
+ topo.standalone.restart()
+
+ return (user, groups)
+
+
+def test_mods(automember_fixture, topo):
+ """Modify the user so that it is added to the various automember groups
+
+ :id: 28a2b070-7f16-4905-8831-c80fa6441693
+ :setup: Standalone Instance
+ :steps:
+ 1. Update user that should add it to group[0]
+ 2. Update user that should add it to group[1]
+ 3. Update user that should add it to group[2]
+ 4. Update user that should add it to group[0]
+ 5. Test rebuild task correctly moves user to group[1]
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ """
+ (user, groups) = automember_fixture
+
+ # Update user which should go into group[0]
+ user.replace('cn', 'whatever')
+ groups[0].is_member(user.dn)
+ if groups[1].is_member(user.dn):
+ assert False
+ if groups[2].is_member(user.dn):
+ assert False
+
+ # Update user0 which should go into group[1]
+ user.replace('cn', 'mark')
+ groups[1].is_member(user.dn)
+ if groups[0].is_member(user.dn):
+ assert False
+ if groups[2].is_member(user.dn):
+ assert False
+
+ # Update user which should go into group[2]
+ user.replace('cn', 'simon')
+ groups[2].is_member(user.dn)
+ if groups[0].is_member(user.dn):
+ assert False
+ if groups[1].is_member(user.dn):
+ assert False
+
+ # Update user which should go back into group[0] (full circle)
+ user.replace('cn', 'whatever')
+ groups[0].is_member(user.dn)
+ if groups[1].is_member(user.dn):
+ assert False
+ if groups[2].is_member(user.dn):
+ assert False
+
+ #
+ # Test rebuild task. First disable plugin
+ #
+ automemberplugin = AutoMembershipPlugin(topo.standalone)
+ automemberplugin.disable()
+ topo.standalone.restart()
+
+ # Make change that would move the entry from group[0] to group[1]
+ user.replace('cn', 'mark')
+
+ # Enable plugin
+ automemberplugin.enable()
+ topo.standalone.restart()
+
+ # Run rebuild task
+ task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount")
+ task.wait()
+
+ # Test membership
+ groups[1].is_member(user.dn)
+ if groups[0].is_member(user.dn):
+ assert False
+ if groups[2].is_member(user.dn):
+ assert False
+
+ # Success
+ log.info("Test PASSED")
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main(["-s", CURRENT_FILE])
+
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
index 41a6a7c2a..9efff58c5 100644
--- a/ldap/ldif/template-dse.ldif.in
+++ b/ldap/ldif/template-dse.ldif.in
@@ -696,6 +696,7 @@ nsslapd-plugininitfunc: automember_init
nsslapd-plugintype: betxnpreoperation
nsslapd-pluginenabled: on
nsslapd-plugin-depends-on-type: database
+autoMemberProcessModifyOps: on
dn: cn=Bitwise Plugin,cn=plugins,cn=config
objectClass: top
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
index 08d9e49fe..6641745ad 100644
--- a/ldap/servers/plugins/automember/automember.c
+++ b/ldap/servers/plugins/automember/automember.c
@@ -73,7 +73,8 @@ static struct automemberRegexRule *automember_parse_regex_rule(char *rule_string
static void automember_free_regex_rule(struct automemberRegexRule *rule);
static int automember_parse_grouping_attr(char *value, char **grouping_attr, char **grouping_value);
static int automember_update_membership(struct configEntry *config, Slapi_Entry *e, PRFileDesc *ldif_fd);
-static int automember_add_member_value(Slapi_Entry *member_e, const char *group_dn, char *grouping_attr, char *grouping_value, PRFileDesc *ldif_fd);
+static int automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char *grouping_attr,
+ char *grouping_value, PRFileDesc *ldif_fd, int add);
/*
* task functions
@@ -89,6 +90,8 @@ static void automember_task_export_destructor(Slapi_Task *task);
static void automember_task_map_destructor(Slapi_Task *task);
#define DEFAULT_FILE_MODE PR_IRUSR | PR_IWUSR
+static uint64_t plugin_do_modify = 1;
+static uint64_t plugin_is_betxn = 0;
/*
* Config cache locking functions
@@ -139,8 +142,6 @@ automember_get_plugin_sdn(void)
return _PluginDN;
}
-static int plugin_is_betxn = 0;
-
/*
* Plug-in initialization functions
*/
@@ -158,8 +159,7 @@ automember_init(Slapi_PBlock *pb)
"--> automember_init\n");
/* get args */
- if ((slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_ENTRY, &plugin_entry) == 0) &&
- plugin_entry &&
+ if ((slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_ENTRY, &plugin_entry) == 0) && plugin_entry &&
(plugin_type = slapi_entry_attr_get_charptr(plugin_entry, "nsslapd-plugintype")) &&
plugin_type && strstr(plugin_type, "betxn")) {
plugin_is_betxn = 1;
@@ -297,7 +297,9 @@ static int
automember_start(Slapi_PBlock *pb)
{
Slapi_DN *plugindn = NULL;
+ Slapi_Entry *plugin_entry = NULL;
char *config_area = NULL;
+ const char *do_modify = NULL;
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
"--> automember_start\n");
@@ -342,8 +344,22 @@ automember_start(Slapi_PBlock *pb)
return -1;
}
+ /* Check and set if we should process modify operations */
+ plugin_do_modify = 1; /* default is "on" */
+ if ((slapi_pblock_get(pb, SLAPI_ADD_ENTRY, &plugin_entry) == 0) && plugin_entry){
+ if ((do_modify = slapi_fetch_attr(plugin_entry, AUTOMEMBER_DO_MODIFY, NULL)) ) {
+ if (strcasecmp(do_modify, "on") && strcasecmp(do_modify, "off")) {
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_start - %s: invalid value \"%s\". Valid values are \"on\" or \"off\". Using default of \"on\"\n",
+ AUTOMEMBER_DO_MODIFY, do_modify);
+ } else if (strcasecmp(do_modify, "off") == 0 ){
+ plugin_do_modify = 0;
+ }
+ }
+ }
+
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
- "auto membership plug-in: ready for service\n");
+ "automember_start - ready for service\n");
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
"<-- automember_start\n");
@@ -1365,37 +1381,49 @@ bail:
}
/*
- * automember_update_membership()
- *
- * Determines which target groups need to be updated according to
- * the rules in config, then performs the updates.
+ * Free the exclusion and inclusion group dn's created by
+ * automember_get_membership_lists()
*/
-static int
-automember_update_membership(struct configEntry *config, Slapi_Entry *e, PRFileDesc *ldif_fd)
+static void
+automember_free_membership_lists(PRCList *exclusions, PRCList *targets)
+{
+ struct automemberDNListItem *dnitem = NULL;
+
+ /*
+ * Free the exclusions and targets lists. Remember that the DN's
+ * are not ours, so don't free them!
+ */
+ while (!PR_CLIST_IS_EMPTY(exclusions)) {
+ dnitem = (struct automemberDNListItem *)PR_LIST_HEAD(exclusions);
+ PR_REMOVE_LINK((PRCList *)dnitem);
+ slapi_ch_free((void **)&dnitem);
+ }
+
+ while (!PR_CLIST_IS_EMPTY(targets)) {
+ dnitem = (struct automemberDNListItem *)PR_LIST_HEAD(targets);
+ PR_REMOVE_LINK((PRCList *)dnitem);
+ slapi_ch_free((void **)&dnitem);
+ }
+}
+
+/*
+ * Populate the exclusion and inclusion(target) PRCLists based on the
+ * slapi entry and configEntry provided. The PRCLists should be freed
+ * using automember_free_membership_lists()
+ */
+static void
+automember_get_membership_lists(struct configEntry *config, PRCList *exclusions, PRCList *targets, Slapi_Entry *e)
{
PRCList *rule = NULL;
struct automemberRegexRule *curr_rule = NULL;
- PRCList exclusions;
- PRCList targets;
struct automemberDNListItem *dnitem = NULL;
Slapi_DN *last = NULL;
PRCList *curr_exclusion = NULL;
char **vals = NULL;
- int rc = 0;
int i = 0;
- if (!config || !e) {
- return -1;
- }
-
- slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
- "automember_update_membership - Processing \"%s\" "
- "definition entry for candidate entry \"%s\".\n",
- config->dn, slapi_entry_get_dn(e));
-
- /* Initialize our lists that keep track of targets. */
- PR_INIT_CLIST(&exclusions);
- PR_INIT_CLIST(&targets);
+ PR_INIT_CLIST(exclusions);
+ PR_INIT_CLIST(targets);
/* Go through exclusive rules and build an exclusion list. */
if (config->exclusive_rules) {
@@ -1416,20 +1444,19 @@ automember_update_membership(struct configEntry *config, Slapi_Entry *e, PRFileD
/* Found a match. Add to end of the exclusion list
* and set last as a hint to ourselves. */
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
- "automember_update_membership - Adding \"%s\" "
+ "automember_get_membership_lists - Adding \"%s\" "
"to list of excluded groups for \"%s\" "
"(matched: \"%s=%s\").\n",
slapi_sdn_get_dn(curr_rule->target_group_dn),
slapi_entry_get_dn(e), curr_rule->attr,
curr_rule->regex_str);
- dnitem = (struct automemberDNListItem *)slapi_ch_calloc(1,
- sizeof(struct automemberDNListItem));
+ dnitem = (struct automemberDNListItem *)slapi_ch_calloc(1, sizeof(struct automemberDNListItem));
/* We are just referencing the dn from the regex rule. We
* will not free it when we clean up this list. This list
* is more short-lived than the regex rule list, so we can
* get away with this optimization. */
dnitem->dn = curr_rule->target_group_dn;
- PR_APPEND_LINK(&(dnitem->list), &exclusions);
+ PR_APPEND_LINK(&(dnitem->list), exclusions);
last = curr_rule->target_group_dn;
}
}
@@ -1450,8 +1477,8 @@ automember_update_membership(struct configEntry *config, Slapi_Entry *e, PRFileD
last = NULL;
/* Get the first excluded target for exclusion checking. */
- if (!PR_CLIST_IS_EMPTY(&exclusions)) {
- curr_exclusion = PR_LIST_HEAD(&exclusions);
+ if (!PR_CLIST_IS_EMPTY(exclusions)) {
+ curr_exclusion = PR_LIST_HEAD(exclusions);
}
rule = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
@@ -1468,7 +1495,7 @@ automember_update_membership(struct configEntry *config, Slapi_Entry *e, PRFileD
* until we find a target that is the same or comes after the
* current rule. */
if (curr_exclusion) {
- while ((curr_exclusion != &exclusions) && (slapi_sdn_compare(
+ while ((curr_exclusion != exclusions) && (slapi_sdn_compare(
((struct automemberDNListItem *)curr_exclusion)->dn,
curr_rule->target_group_dn) < 0)) {
curr_exclusion = PR_NEXT_LINK(curr_exclusion);
@@ -1479,7 +1506,7 @@ automember_update_membership(struct configEntry *config, Slapi_Entry *e, PRFileD
* we can skip all rules for the last target group DN that we
* added to the targets list. We also skip any rules for
* target groups that have been excluded by an exclusion rule. */
- if (((curr_exclusion == NULL) || (curr_exclusion == &exclusions) ||
+ if (((curr_exclusion == NULL) || (curr_exclusion == exclusions) ||
slapi_sdn_compare(((struct automemberDNListItem *)curr_exclusion)->dn,
curr_rule->target_group_dn) != 0) &&
((last == NULL) ||
@@ -1492,7 +1519,7 @@ automember_update_membership(struct configEntry *config, Slapi_Entry *e, PRFileD
/* Found a match. Add to the end of the targets list
* and set last as a hint to ourselves. */
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
- "automember_update_membership - Adding \"%s\" "
+ "automember_get_membership_lists - Adding \"%s\" "
"to list of target groups for \"%s\" "
"(matched: \"%s=%s\").\n",
slapi_sdn_get_dn(curr_rule->target_group_dn),
@@ -1505,7 +1532,7 @@ automember_update_membership(struct configEntry *config, Slapi_Entry *e, PRFileD
* is more short-lived than the regex rule list, so we can
* get away with this optimization. */
dnitem->dn = curr_rule->target_group_dn;
- PR_APPEND_LINK(&(dnitem->list), &targets);
+ PR_APPEND_LINK(&(dnitem->list), targets);
last = curr_rule->target_group_dn;
}
}
@@ -1518,6 +1545,42 @@ automember_update_membership(struct configEntry *config, Slapi_Entry *e, PRFileD
}
}
}
+}
+
+/*
+ * automember_update_membership()
+ *
+ * Determines which target groups need to be updated according to
+ * the rules in config, then performs the updates.
+ *
+ * Return SLAPI_PLUGIN_FAILURE for failures, or
+ * SLAPI_PLUGIN_SUCCESS for success (no memberships updated), or
+ * MEMBERSHIP_UPDATED for success (memberships updated)
+ */
+static int
+automember_update_membership(struct configEntry *config, Slapi_Entry *e, PRFileDesc *ldif_fd)
+{
+ PRCList exclusions;
+ PRCList targets;
+ struct automemberDNListItem *dnitem = NULL;
+ int rc = SLAPI_PLUGIN_SUCCESS;
+ int i = 0;
+
+ if (!config || !e) {
+ return SLAPI_PLUGIN_FAILURE;
+ }
+
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_update_membership - Processing \"%s\" "
+ "definition entry for candidate entry \"%s\".\n",
+ config->dn, slapi_entry_get_dn(e));
+
+ /* Initialize our lists that keep track of targets. */
+ PR_INIT_CLIST(&exclusions);
+ PR_INIT_CLIST(&targets);
+
+ /* get the membership lists */
+ automember_get_membership_lists(config, &exclusions, &targets, e);
/* If no targets, update default groups if set. Otherwise, update
* targets. Use a helper to do the actual updates. We can just pass an
@@ -1526,51 +1589,45 @@ automember_update_membership(struct configEntry *config, Slapi_Entry *e, PRFileD
if (PR_CLIST_IS_EMPTY(&targets)) {
/* Add to each default group. */
for (i = 0; config->default_groups && config->default_groups[i]; i++) {
- if (automember_add_member_value(e, config->default_groups[i], config->grouping_attr,
- config->grouping_value, ldif_fd)) {
+ if (automember_update_member_value(e, config->default_groups[i], config->grouping_attr,
+ config->grouping_value, ldif_fd, ADD_MEMBER))
+ {
rc = SLAPI_PLUGIN_FAILURE;
goto out;
}
+ rc = MEMBERSHIP_UPDATED;
}
} else {
/* Update the target groups. */
dnitem = (struct automemberDNListItem *)PR_LIST_HEAD(&targets);
while ((PRCList *)dnitem != &targets) {
- if (automember_add_member_value(e, slapi_sdn_get_dn(dnitem->dn), config->grouping_attr,
- config->grouping_value, ldif_fd)) {
+ if (automember_update_member_value(e, slapi_sdn_get_dn(dnitem->dn), config->grouping_attr,
+ config->grouping_value, ldif_fd, ADD_MEMBER))
+ {
rc = SLAPI_PLUGIN_FAILURE;
goto out;
}
dnitem = (struct automemberDNListItem *)PR_NEXT_LINK((PRCList *)dnitem);
+ rc = MEMBERSHIP_UPDATED;
}
}
- /* Free the exclusions and targets lists. Remember that the DN's
- * are not ours, so don't free them! */
- while (!PR_CLIST_IS_EMPTY(&exclusions)) {
- dnitem = (struct automemberDNListItem *)PR_LIST_HEAD(&exclusions);
- PR_REMOVE_LINK((PRCList *)dnitem);
- slapi_ch_free((void **)&dnitem);
- }
-
- while (!PR_CLIST_IS_EMPTY(&targets)) {
- dnitem = (struct automemberDNListItem *)PR_LIST_HEAD(&targets);
- PR_REMOVE_LINK((PRCList *)dnitem);
- slapi_ch_free((void **)&dnitem);
- }
+ /* Free the exclusions and targets lists */
+ automember_free_membership_lists(&exclusions, &targets);
out:
return rc;
}
+
/*
- * automember_add_member_value()
+ * automember_update_member_value()
*
* Adds a member entry to a group.
*/
static int
-automember_add_member_value(Slapi_Entry *member_e, const char *group_dn, char *grouping_attr, char *grouping_value, PRFileDesc *ldif_fd)
+automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char *grouping_attr, char *grouping_value, PRFileDesc *ldif_fd, int add)
{
Slapi_PBlock *mod_pb = slapi_pblock_new();
int result = LDAP_SUCCESS;
@@ -1606,34 +1663,57 @@ automember_add_member_value(Slapi_Entry *member_e, const char *group_dn, char *g
/* Set up the operation. */
vals[0] = member_value;
vals[1] = 0;
- mod.mod_op = LDAP_MOD_ADD;
+ if (add) {
+ mod.mod_op = LDAP_MOD_ADD;
+ } else {
+ mod.mod_op = LDAP_MOD_DELETE;
+ }
mod.mod_type = grouping_attr;
mod.mod_values = vals;
mods[0] = &mod;
mods[1] = 0;
/* Perform the modify operation. */
- slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
- "automember_add_member_value - Adding \"%s\" as "
- "a \"%s\" value to group \"%s\".\n",
- member_value, grouping_attr, group_dn);
+ if (add){
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_update_member_value - Adding \"%s\" as "
+ "a \"%s\" value to group \"%s\".\n",
+ member_value, grouping_attr, group_dn);
+ } else {
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_update_member_value - Deleting \"%s\" as "
+ "a \"%s\" value from group \"%s\".\n",
+ member_value, grouping_attr, group_dn);
+ }
slapi_modify_internal_set_pb(mod_pb, group_dn,
mods, 0, 0, automember_get_plugin_id(), 0);
slapi_modify_internal_pb(mod_pb);
slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &result);
- if ((result != LDAP_SUCCESS) && (result != LDAP_TYPE_OR_VALUE_EXISTS)) {
- slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
- "automember_add_member_value - Unable to add \"%s\" as "
- "a \"%s\" value to group \"%s\" (%s).\n",
- member_value, grouping_attr, group_dn,
- ldap_err2string(result));
- rc = result;
+ if(add){
+ if ((result != LDAP_SUCCESS) && (result != LDAP_TYPE_OR_VALUE_EXISTS)) {
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_update_member_value - Unable to add \"%s\" as "
+ "a \"%s\" value to group \"%s\" (%s).\n",
+ member_value, grouping_attr, group_dn,
+ ldap_err2string(result));
+ rc = result;
+ }
+ } else {
+ /* delete value */
+ if ((result != LDAP_SUCCESS) && (result != LDAP_NO_SUCH_ATTRIBUTE)) {
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_update_member_value - Unable to delete \"%s\" as "
+ "a \"%s\" value from group \"%s\" (%s).\n",
+ member_value, grouping_attr, group_dn,
+ ldap_err2string(result));
+ rc = result;
+ }
}
} else {
slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
- "automember_add_member_value - Unable to find grouping "
+ "automember_update_member_value - Unable to find grouping "
"value attribute \"%s\" in entry \"%s\".\n",
grouping_value, slapi_entry_get_dn(member_e));
}
@@ -1780,22 +1860,144 @@ automember_mod_pre_op(Slapi_PBlock *pb)
static int
automember_mod_post_op(Slapi_PBlock *pb)
{
+ Slapi_Entry *post_e = NULL;
+ Slapi_Entry *pre_e = NULL;
Slapi_DN *sdn = NULL;
+ struct configEntry *config = NULL;
+ PRCList *list = NULL;
+ int rc = SLAPI_PLUGIN_SUCCESS;
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
"--> automember_mod_post_op\n");
if (automember_oktodo(pb) && (sdn = automember_get_sdn(pb))) {
- /* Check if the config is being modified and reload if so. */
if (automember_dn_is_config(sdn)) {
+ /*
+ * The config is being modified, reload it
+ */
automember_load_config();
+ } else if ( !automember_isrepl(pb) && plugin_do_modify) {
+ /*
+ * We might be applying an update that will invoke automembership changes...
+ */
+ slapi_pblock_get(pb, SLAPI_ENTRY_POST_OP, &post_e);
+ slapi_pblock_get(pb, SLAPI_ENTRY_PRE_OP, &pre_e);
+
+ if (post_e) {
+ /*
+ * Check if a config entry applies to the entry being modified
+ */
+ automember_config_read_lock();
+
+ if (!PR_CLIST_IS_EMPTY(g_automember_config)) {
+ list = PR_LIST_HEAD(g_automember_config);
+ while (list != g_automember_config) {
+ config = (struct configEntry *)list;
+
+ /* Does the entry meet scope and filter requirements? */
+ if (slapi_dn_issuffix(slapi_sdn_get_dn(sdn), config->scope) &&
+ slapi_filter_test_simple(post_e, config->filter) == 0)
+ {
+ /* Find out what membership changes are needed and make them. */
+ if ((rc = automember_update_membership(config, post_e, NULL)) == SLAPI_PLUGIN_FAILURE) {
+ /* Failed to update our groups, break out */
+ break;
+ } else if (rc == MEMBERSHIP_UPDATED) {
+ /*
+ * We updated one of our groups, but we might need to do some cleanup in other groups
+ */
+ PRCList exclusions_post, targets_post;
+ PRCList exclusions_pre, targets_pre;
+ struct automemberDNListItem *dn_pre = NULL;
+ struct automemberDNListItem *dn_post = NULL;
+ int i;
+
+ /* reset rc */
+ rc = SLAPI_PLUGIN_SUCCESS;
+
+ /* Get the group lists */
+ automember_get_membership_lists(config, &exclusions_post, &targets_post, post_e);
+ automember_get_membership_lists(config, &exclusions_pre, &targets_pre, pre_e);
+
+ /* Process the before and after lists */
+ if (PR_CLIST_IS_EMPTY(&targets_pre) && !PR_CLIST_IS_EMPTY(&targets_post)) {
+ /*
+ * We were in the default groups, but not anymore
+ */
+ for (i = 0; config->default_groups && config->default_groups[i]; i++) {
+ if (automember_update_member_value(post_e, config->default_groups[i], config->grouping_attr,
+ config->grouping_value, NULL, DEL_MEMBER))
+ {
+ rc = SLAPI_PLUGIN_FAILURE;
+ break;
+ }
+ }
+ } else if (!PR_CLIST_IS_EMPTY(&targets_pre) && PR_CLIST_IS_EMPTY(&targets_post)) {
+ /*
+ * We were in non-default groups, but not anymore
+ */
+ dn_pre = (struct automemberDNListItem *)PR_LIST_HEAD(&targets_pre);
+ while ((PRCList *)dn_pre != &targets_pre) {
+ if (automember_update_member_value(post_e, slapi_sdn_get_dn(dn_pre->dn), config->grouping_attr,
+ config->grouping_value, NULL, DEL_MEMBER))
+ {
+ rc = SLAPI_PLUGIN_FAILURE;
+ break;
+ }
+ dn_pre = (struct automemberDNListItem *)PR_NEXT_LINK((PRCList *)dn_pre);
+ }
+ } else {
+ /*
+ * We were previously in non-default groups, and still in non-default groups.
+ * Compare before and after memberships and cleanup the orphaned memberships
+ */
+ dn_pre = (struct automemberDNListItem *)PR_LIST_HEAD(&targets_pre);
+ while ((PRCList *)dn_pre != &targets_pre) {
+ int found = 0;
+ dn_post = (struct automemberDNListItem *)PR_LIST_HEAD(&targets_post);
+ while ((PRCList *)dn_post != &targets_post) {
+ if (slapi_sdn_compare(dn_pre->dn, dn_post->dn) == 0) {
+ /* found */
+ found = 1;
+ break;
+ }
+ /* Next dn */
+ dn_post = (struct automemberDNListItem *)PR_NEXT_LINK((PRCList *)dn_post);
+ }
+ if (!found){
+ /* Remove user from dn_pre->dn */
+ if (automember_update_member_value(post_e, slapi_sdn_get_dn(dn_pre->dn), config->grouping_attr,
+ config->grouping_value, NULL, DEL_MEMBER))
+ {
+ rc = SLAPI_PLUGIN_FAILURE;
+ break;
+ }
+ }
+ /* Next dn */
+ dn_pre = (struct automemberDNListItem *)PR_NEXT_LINK((PRCList *)dn_pre);
+ }
+ }
+
+ /* All done with this config entry, free the lists */
+ automember_free_membership_lists(&exclusions_post, &targets_post);
+ automember_free_membership_lists(&exclusions_pre, &targets_pre);
+ if (rc == SLAPI_PLUGIN_FAILURE) {
+ break;
+ }
+ }
+ }
+ list = PR_NEXT_LINK(list);
+ }
+ }
+ automember_config_unlock();
+ }
}
}
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
- "<-- automember_mod_post_op\n");
+ "<-- automember_mod_post_op (%d)\n", rc);
- return SLAPI_PLUGIN_SUCCESS;
+ return rc;
}
static int
@@ -1854,7 +2056,7 @@ automember_add_post_op(Slapi_PBlock *pb)
if (slapi_dn_issuffix(slapi_sdn_get_dn(sdn), config->scope) &&
(slapi_filter_test_simple(e, config->filter) == 0)) {
/* Find out what membership changes are needed and make them. */
- if (automember_update_membership(config, e, NULL)) {
+ if (automember_update_membership(config, e, NULL) == SLAPI_PLUGIN_FAILURE) {
rc = SLAPI_PLUGIN_FAILURE;
break;
}
@@ -2098,8 +2300,9 @@ automember_rebuild_task_thread(void *arg)
Slapi_Entry **entries = NULL;
task_data *td = NULL;
PRCList *list = NULL;
+ PRCList *include_list = NULL;
int result = 0;
- int i = 0;
+ size_t i = 0, ii = 0;
if (!task) {
return; /* no task */
@@ -2175,9 +2378,55 @@ automember_rebuild_task_thread(void *arg)
config = (struct configEntry *)list;
/* Does the entry meet scope and filter requirements? */
if (slapi_dn_issuffix(slapi_entry_get_dn(entries[i]), config->scope) &&
- (slapi_filter_test_simple(entries[i], config->filter) == 0)) {
+ (slapi_filter_test_simple(entries[i], config->filter) == 0))
+ {
+ /* First clear out all the defaults groups */
+ for (ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
+ if ((result = automember_update_member_value(entries[i], config->default_groups[ii],
+ config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
+ {
+ slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
+ "member from default group (%s) error (%d)\n",
+ config->default_groups[ii], result);
+ slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
+ "member from default group (%s) error (%d)\n",
+ config->default_groups[ii], result);
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
+ config->default_groups[ii], result);
+ automember_config_unlock();
+ goto out;
+ }
+ }
+
+ /* Then clear out the non-default group */
+ if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
+ include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
+ while (include_list != (PRCList *)config->inclusive_rules) {
+ struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
+ if ((result = automember_update_member_value(entries[i], slapi_sdn_get_dn(curr_rule->target_group_dn),
+ config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
+ {
+ slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
+ "member from group (%s) error (%d)\n",
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
+ slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
+ "member from group (%s) error (%d)\n",
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
+ automember_config_unlock();
+ goto out;
+ }
+ include_list = PR_NEXT_LINK(include_list);
+ }
+ }
+
+ /* Update the memberships for this entries */
if (slapi_is_shutting_down() ||
- automember_update_membership(config, entries[i], NULL)) {
+ automember_update_membership(config, entries[i], NULL) == SLAPI_PLUGIN_FAILURE)
+ {
result = SLAPI_PLUGIN_FAILURE;
automember_config_unlock();
goto out;
@@ -2390,7 +2639,7 @@ automember_export_task_thread(void *arg)
if (slapi_dn_issuffix(slapi_sdn_get_dn(td->base_dn), config->scope) &&
(slapi_filter_test_simple(entries[i], config->filter) == 0)) {
if (slapi_is_shutting_down() ||
- automember_update_membership(config, entries[i], ldif_fd)) {
+ automember_update_membership(config, entries[i], ldif_fd) == SLAPI_PLUGIN_FAILURE) {
result = SLAPI_DSE_CALLBACK_ERROR;
automember_config_unlock();
goto out;
@@ -2594,7 +2843,7 @@ automember_map_task_thread(void *arg)
if (slapi_dn_issuffix(slapi_entry_get_dn_const(e), config->scope) &&
(slapi_filter_test_simple(e, config->filter) == 0)) {
if (slapi_is_shutting_down() ||
- automember_update_membership(config, e, ldif_fd_out)) {
+ automember_update_membership(config, e, ldif_fd_out) == SLAPI_PLUGIN_FAILURE) {
result = SLAPI_DSE_CALLBACK_ERROR;
slapi_entry_free(e);
slapi_ch_free_string(&entrystr);
@@ -2702,7 +2951,7 @@ automember_modrdn_post_op(Slapi_PBlock *pb)
if (slapi_dn_issuffix(slapi_sdn_get_dn(new_sdn), config->scope) &&
(slapi_filter_test_simple(post_e, config->filter) == 0)) {
/* Find out what membership changes are needed and make them. */
- if (automember_update_membership(config, post_e, NULL)) {
+ if (automember_update_membership(config, post_e, NULL) == SLAPI_PLUGIN_FAILURE) {
rc = SLAPI_PLUGIN_FAILURE;
break;
}
diff --git a/ldap/servers/plugins/automember/automember.h b/ldap/servers/plugins/automember/automember.h
index 37b88b0b5..4bd379af6 100644
--- a/ldap/servers/plugins/automember/automember.h
+++ b/ldap/servers/plugins/automember/automember.h
@@ -44,6 +44,7 @@
#define AUTOMEMBER_GROUPING_ATTR_TYPE "autoMemberGroupingAttr"
#define AUTOMEMBER_DISABLED_TYPE "autoMemberDisabled"
#define AUTOMEMBER_TARGET_GROUP_TYPE "autoMemberTargetGroup"
+#define AUTOMEMBER_DO_MODIFY "autoMemberProcessModifyOps"
/*
* Config loading filters
@@ -55,6 +56,10 @@
* Helper defines
*/
#define IS_ATTRDESC_CHAR(c) (isalnum(c) || (c == '.') || (c == ';') || (c == '-'))
+#define MEMBERSHIP_UPDATED 1
+#define ADD_MEMBER 1
+#define DEL_MEMBER 0
+
struct automemberRegexRule
{
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
index 13b62b2fb..adc5eb3d9 100644
--- a/src/lib389/lib389/plugins.py
+++ b/src/lib389/lib389/plugins.py
@@ -884,6 +884,24 @@ class AutoMembershipPlugin(Plugin):
return task
+class AutoMembershipRegexRule(DSLdapObject):
+ def __init__(self, instance, dn=None):
+ super(AutoMembershipRegexRule, self).__init__(instance, dn)
+ self._rdn_attribute = 'cn'
+ self._must_attributes = ['cn', 'autoMemberTargetGroup']
+ self._create_objectclasses = ['top', 'autoMemberRegexRule']
+ self._protected = False
+
+
+class AutoMembershipRegexRules(DSLdapObjects):
+ def __init__(self, instance, basedn="cn=Auto Membership Plugin,cn=plugins,cn=config"):
+ super(AutoMembershipRegexRules, self).__init__(instance)
+ self._objectclasses = ['top', 'autoMemberRegexRule']
+ self._filterattrs = ['cn']
+ self._childobject = AutoMembershipRegexRule
+ self._basedn = basedn
+
+
class AutoMembershipDefinition(DSLdapObject):
"""A single instance of Auto Membership Plugin config entry
@@ -918,7 +936,7 @@ class AutoMembershipDefinition(DSLdapObject):
def set_defaultgroup(self, attr):
"""Set autoMemberDefaultGroup attribute"""
- self.set('autoMemberDefaultGroup', attr)
+ self.set('autoMemberDefaultGroup', attr)
def get_scope(self, attr):
"""Get autoMemberScope attributes"""
@@ -940,6 +958,42 @@ class AutoMembershipDefinition(DSLdapObject):
self.set('autoMemberFilter', attr)
+ def add_regex_rule(self, rule_name, target, include_regex=None, exclude_regex=None):
+ """Add a regex rule
+ :param rule_name - Name of the rule - used dfor the "cn" value inthe DN of the rule entry
+ :param target - the target group DN
+ :param include_regex - a List of regex rules used for group inclusion
+ :param exclude_regex - a List of regex rules used for group exclusion
+ """
+ props = {'cn': rule_name,
+ 'autoMemberTargetGroup': target}
+
+ if include_regex is not None:
+ props['autoMemberInclusiveRegex'] = include_regex
+ if exclude_regex is not None:
+ props['autoMemberInclusiveRegex'] = exclude_regex
+
+ rules = AutoMembershipRegexRules(self._instance, basedn=self.dn)
+ rules.create(properties=props)
+
+ def del_regex_rule(self, rule_name):
+ """Delete a regex rule from this definition
+ :param rule_name - The "cn" values of the regex rule entry
+ :raises ValueError - If a regex rule entry can not be found using rule_name
+ """
+ rules = AutoMembershipRegexRules(self._instance, basedn=self.dn)
+ regex = rules.get(selector=rule_name)
+ if regex is not None:
+ regex.delete()
+ else:
+ raise ValueError("No regex rule found with the name ({}) under ({})".format(rule_name, self.dn))
+
+ def list_regex_rules(self):
+ """Return a list of regex rule entries for this definition
+ """
+ rules = AutoMembershipRegexRules(self._instance, basedn=self.dn)
+ return rules.list()
+
class AutoMembershipDefinitions(DSLdapObjects):
"""A DSLdapObjects entity which represents Auto Membership Plugin config entry
| 0 |
9eee3a8e06978a081c302ba8907af9e84d3c3cf2
|
389ds/389-ds-base
|
Ticket 48617 - Server ram checks work in isolation
Bug Description: Previously we would check all server cache allocations in
isolation. We would only know if I single cache would exceed our system ram.
This meant if you had many backends, it was possible to configure them each with
75% of the system ram, but be a valid configuration, even though it increased
the risk of oom.
Fix Description: Now, all backends are check in isolation *and* they are check
as a whole regardless of manual or auto tuning. This also improves the auto
tuning system to check it's request, and to LOWER it if it is in excess of the
system ram. IE auto tuning a server should not ever be possible to create an OOM
condition.
https://fedorahosted.org/389/ticket/48617
Author: wibrown
Review by: nhosoi (Thanks!)
|
commit 9eee3a8e06978a081c302ba8907af9e84d3c3cf2
Author: William Brown <[email protected]>
Date: Wed May 11 09:09:29 2016 +1000
Ticket 48617 - Server ram checks work in isolation
Bug Description: Previously we would check all server cache allocations in
isolation. We would only know if I single cache would exceed our system ram.
This meant if you had many backends, it was possible to configure them each with
75% of the system ram, but be a valid configuration, even though it increased
the risk of oom.
Fix Description: Now, all backends are check in isolation *and* they are check
as a whole regardless of manual or auto tuning. This also improves the auto
tuning system to check it's request, and to LOWER it if it is in excess of the
system ram. IE auto tuning a server should not ever be possible to create an OOM
condition.
https://fedorahosted.org/389/ticket/48617
Author: wibrown
Review by: nhosoi (Thanks!)
diff --git a/ldap/servers/slapd/back-ldbm/start.c b/ldap/servers/slapd/back-ldbm/start.c
index 5058942ae..17d9228a0 100644
--- a/ldap/servers/slapd/back-ldbm/start.c
+++ b/ldap/servers/slapd/back-ldbm/start.c
@@ -35,6 +35,10 @@ ldbm_back_start( Slapi_PBlock *pb )
char *home_dir;
int action;
int retval;
+ int issane = 0;
+ PRUint64 total_cache_size = 0;
+ size_t pagesize, pages, procpages, availpages;
+ char *msg; /* This will be set by one of the two cache sizing paths below. */
LDAPDebug( LDAP_DEBUG_TRACE, "ldbm backend starting\n", 0, 0, 0 );
@@ -116,8 +120,6 @@ ldbm_back_start( Slapi_PBlock *pb )
LDAPDebug( LDAP_DEBUG_ANY, "cache autosizing: bad settings, "
"value or sum of values can not larger than 100.\n", 0, 0, 0 );
} else {
- size_t pagesize, pages, procpages, availpages;
-
if (util_info_sys_pages(&pagesize, &pages, &procpages, &availpages) != 0) {
LDAPDebug( LDAP_DEBUG_ANY, "start: Unable to determine system page limits\n",
0, 0, 0 );
@@ -130,47 +132,73 @@ ldbm_back_start( Slapi_PBlock *pb )
Object *inst_obj;
ldbm_instance *inst;
PRUint64 cache_size;
+ PRUint64 dncache_size;
PRUint64 db_size;
- PRUint64 total_cache_size = 0;
+#ifndef LINUX
PRUint64 memsize = pages * pagesize;
- PRUint64 extra = 0; /* e.g., dncache size */
-
- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
- inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
- inst = (ldbm_instance *)object_get_data(inst_obj);
- cache_size = (PRUint64)cache_get_max_size(&(inst->inst_cache));
- db_size = dblayer_get_id2entry_size(inst);
- if (cache_size < db_size) {
- LDAPDebug(LDAP_DEBUG_ANY,
- "WARNING: %s: entry cache size %" NSPRIu64 "B is "
- "less than db size %" NSPRIu64 "B; "
- "We recommend to increase the entry cache size "
- "nsslapd-cachememsize.\n",
- inst->inst_name, cache_size, db_size);
- } else {
+#endif
+ if (li->li_cache_autosize == 0) {
+ /* First, set our message. */
+ msg = "This can be corrected by altering the values of nsslapd-dbcachesize, nsslapd-cachememsize and nsslapd-dncachememsize\n";
+
+ for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
+ inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
+ inst = (ldbm_instance *)object_get_data(inst_obj);
+ cache_size = (PRUint64)cache_get_max_size(&(inst->inst_cache));
+ db_size = dblayer_get_id2entry_size(inst);
+ if (cache_size < db_size) {
+ LDAPDebug(LDAP_DEBUG_ANY,
+ "WARNING: %s: entry cache size %llu B is "
+ "less than db size %llu B; "
+ "We recommend to increase the entry cache size "
+ "nsslapd-cachememsize.\n",
+ inst->inst_name, cache_size, db_size);
+ } else {
+ LDAPDebug(LDAP_DEBUG_BACKLDBM,
+ "%s: entry cache size: %llu B; db size: %llu B\n",
+ inst->inst_name, cache_size, db_size);
+ }
+ /* Get the dn_cachesize */
+ dncache_size = (PRUint64)cache_get_max_size(&(inst->inst_dncache));
+ total_cache_size += cache_size + dncache_size;
LDAPDebug(LDAP_DEBUG_BACKLDBM,
- "%s: entry cache size: %" NSPRIu64 "B; db size: %" NSPRIu64 "B\n",
- inst->inst_name, cache_size, db_size);
+ "total cache size: %llu B; \n",
+ total_cache_size, 0 ,0 );
}
- total_cache_size += cache_size;
- /* estimated overhead: dncache size * 2 */
- extra += (PRUint64)cache_get_max_size(&(inst->inst_dncache)) * 2;
- }
- LDAPDebug(LDAP_DEBUG_BACKLDBM,
- "Total entry cache size: %" NSPRIu64 "B; "
- "dbcache size: %" NSPRIu64 "B; "
- "available memory size: %" NSPRIu64 "B\n",
- total_cache_size, (PRUint32)li->li_dbcachesize, memsize - extra);
+ LDAPDebug(LDAP_DEBUG_BACKLDBM,
+ "Total entry cache size: %llu B; "
+ "dbcache size: %llu B; "
+ "available memory size: %llu B; \n",
+#ifdef LINUX
+ (PRUint64)total_cache_size, (PRUint64)li->li_dbcachesize, availpages * pagesize
+#else
+ (PRUint64)total_cache_size, (PRUint64)li->li_dbcachesize, memsize
+#endif
+ );
+
/* autosizing dbCache and entryCache */
- if (li->li_cache_autosize > 0) {
+ } else if (li->li_cache_autosize > 0) {
+ msg = "This can be corrected by altering the values of nsslapd-cache-autosize, nsslapd-cache-autosize-split and nsslapd-dncachememsize\n";
zone_pages = (li->li_cache_autosize * pages) / 100;
- /* now split it according to user prefs */
+ size_t zone_size = zone_pages * pagesize;
+ /* This is how much we "might" use, lets check it's sane. */
+ /* In the case it is not, this will *reduce* the allocation */
+ issane = util_is_cachesize_sane(&zone_size);
+ if (!issane) {
+ LDAPDebug(LDAP_DEBUG_ANY, "Your autosized cache values have been reduced. Likely your nsslapd-cache-autosize percentage is too high.\n", 0,0,0);
+ LDAPDebug(LDAP_DEBUG_ANY, msg, 0,0,0);
+ }
+ /* It's valid, lets divide it up and set according to user prefs */
+ zone_pages = zone_size / pagesize;
db_pages = (li->li_cache_autosize_split * zone_pages) / 100;
- /* fudge an extra instance into our calculations... */
- entry_pages = (zone_pages - db_pages) /
- (objset_size(li->li_instance_set) + 1);
+ entry_pages = (zone_pages - db_pages) / objset_size(li->li_instance_set);
+ /* We update this for the is-sane check below. */
+ total_cache_size = (zone_pages - db_pages) * pagesize;
+
LDAPDebug(LDAP_DEBUG_ANY, "cache autosizing. found %dk physical memory\n",
pages*(pagesize/1024), 0, 0);
+ LDAPDebug(LDAP_DEBUG_ANY, "cache autosizing. found %dk avaliable\n",
+ zone_pages*(pagesize/1024), 0, 0);
LDAPDebug(LDAP_DEBUG_ANY, "cache autosizing: db cache: %dk, "
"each entry cache (%d total): %dk\n",
db_pages*(pagesize/1024), objset_size(li->li_instance_set),
@@ -193,6 +221,10 @@ ldbm_back_start( Slapi_PBlock *pb )
cache_set_max_entries(&(inst->inst_cache), -1);
cache_set_max_size(&(inst->inst_cache),
li->li_cache_autosize_ec, CACHE_TYPE_ENTRY);
+ /* We need to get each instances dncache size to add to the total */
+ /* Else we can't properly check the cache allocations below */
+ /* Trac 48831 exists to allow this to be auto-sized too ... */
+ total_cache_size += (PRUint64)cache_get_max_size(&(inst->inst_dncache));
}
}
/* autosizing importCache */
@@ -202,6 +234,10 @@ ldbm_back_start( Slapi_PBlock *pb )
li->li_import_cache_autosize = 50;
}
import_pages = (li->li_import_cache_autosize * pages) / 100;
+ size_t import_size = import_pages * pagesize;
+ issane = util_is_cachesize_sane(&import_size);
+ /* We just accept the reduced allocation here. */
+ import_pages = import_size / pagesize;
LDAPDebug(LDAP_DEBUG_ANY, "cache autosizing: import cache: %dk \n",
import_pages*(pagesize/1024), NULL, NULL);
@@ -211,6 +247,29 @@ ldbm_back_start( Slapi_PBlock *pb )
}
}
+ /* Finally, lets check that the total result is sane. */
+
+ size_t total_size = total_cache_size + (PRUint64)li->li_dbcachesize;
+ issane = util_is_cachesize_sane(&total_size);
+ if (!issane) {
+ /* Right, it's time to panic */
+ LDAPDebug( LDAP_DEBUG_ANY, "CRITICAL: It is highly likely your memory configuration will EXCEED your systems memory.\n", 0, 0, 0 );
+ LDAPDebug(LDAP_DEBUG_ANY,
+ "Total entry cache size: %llu B; "
+ "dbcache size: %llu B; "
+ "available memory size: %llu B; \n",
+#ifdef LINUX
+ (PRUint64)total_cache_size, (PRUint64)li->li_dbcachesize, availpages * pagesize
+#else
+ (PRUint64)total_cache_size, (PRUint64)li->li_dbcachesize, memsize
+#endif
+ );
+ LDAPDebug(LDAP_DEBUG_ANY, msg, 0,0,0);
+ return SLAPI_FAIL_GENERAL;
+ }
+
+
+
retval = check_db_version(li, &action);
if (0 != retval)
{
diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c
index f2971763b..b0fd73bc4 100644
--- a/ldap/servers/slapd/util.c
+++ b/ldap/servers/slapd/util.c
@@ -1784,10 +1784,10 @@ int util_is_cachesize_sane(size_t *cachesize)
(unsigned long)cachepages,(unsigned long)availpages,0);
if (!issane) {
- /* Since we are ask for more than what's available, we give half of
+ /* Since we are ask for more than what's available, we give 3/4 of the remaining.
* the remaining system mem to the cachesize instead, and log a warning
*/
- *cachesize = (size_t)((availpages / 2) * pagesize);
+ *cachesize = (size_t)((availpages * 0.75 ) * pagesize);
slapi_log_error(SLAPI_LOG_FATAL, "util_is_cachesize_sane", "WARNING adjusted cachesize to %lu\n", (unsigned long)*cachesize);
}
#else
| 0 |
471dda07da584b389ee0b2c78f9ecf44defd835d
|
389ds/389-ds-base
|
Added testpreop_search to verify bug 147585
Plus fixed branding/version number.
|
commit 471dda07da584b389ee0b2c78f9ecf44defd835d
Author: Noriko Hosoi <[email protected]>
Date: Thu Jun 9 17:42:14 2005 +0000
Added testpreop_search to verify bug 147585
Plus fixed branding/version number.
diff --git a/ldap/servers/slapd/test-plugins/testbind.c b/ldap/servers/slapd/test-plugins/testbind.c
index d2fadc7dc..08e12a0c8 100644
--- a/ldap/servers/slapd/test-plugins/testbind.c
+++ b/ldap/servers/slapd/test-plugins/testbind.c
@@ -69,7 +69,7 @@
#include <string.h>
#include "slapi-plugin.h"
-Slapi_PluginDesc bindpdesc = { "test-bind", "Netscape", "0.5",
+Slapi_PluginDesc bindpdesc = { "test-bind", "Fedora Project", "7.1",
"sample bind pre-operation plugin" };
static Slapi_ComponentId *plugin_id = NULL;
diff --git a/ldap/servers/slapd/test-plugins/testdatainterop.c b/ldap/servers/slapd/test-plugins/testdatainterop.c
index 728a146d2..2c871838f 100644
--- a/ldap/servers/slapd/test-plugins/testdatainterop.c
+++ b/ldap/servers/slapd/test-plugins/testdatainterop.c
@@ -88,8 +88,8 @@
nsslapd-pluginType: preoperation
nsslapd-pluginEnabled: on
nsslapd-pluginId: nullsuffix-preop
- nsslapd-pluginVersion: 6.2
- nsslapd-pluginVendor: Netscape
+ nsslapd-pluginVersion: 7.1
+ nsslapd-pluginVendor: Fedora Project
nsslapd-pluginDescription: sample pre-operation null suffix plugin
******************************************/
@@ -113,7 +113,7 @@
/*
* Static variables.
*/
-static Slapi_PluginDesc plugindesc = { PLUGIN_NAME, "Netscape", "0.5",
+static Slapi_PluginDesc plugindesc = { PLUGIN_NAME, "Fedora Project", "7.1",
"sample pre-operation null suffix plugin" };
static Slapi_ComponentId *plugin_id = NULL;
diff --git a/ldap/servers/slapd/test-plugins/testentry.c b/ldap/servers/slapd/test-plugins/testentry.c
index 72464d2b4..fabb1a6ec 100644
--- a/ldap/servers/slapd/test-plugins/testentry.c
+++ b/ldap/servers/slapd/test-plugins/testentry.c
@@ -90,7 +90,7 @@ nsslapd-pluginId: test-entry
#include <string.h>
#include "slapi-plugin.h"
-Slapi_PluginDesc entrypdesc = { "test-entry", "Netscape", "0.5",
+Slapi_PluginDesc entrypdesc = { "test-entry", "Fedora Project", "7.1",
"sample entry modification plugin" };
/* Entry store plug-in function */
diff --git a/ldap/servers/slapd/test-plugins/testextendedop.c b/ldap/servers/slapd/test-plugins/testextendedop.c
index d6e0841de..5f5b638a8 100644
--- a/ldap/servers/slapd/test-plugins/testextendedop.c
+++ b/ldap/servers/slapd/test-plugins/testextendedop.c
@@ -83,7 +83,7 @@
/* OID of the extended operation handled by this plug-in */
#define MY_OID "1.2.3.4"
-Slapi_PluginDesc expdesc = { "test-extendedop", "Netscape", "0.5",
+Slapi_PluginDesc expdesc = { "test-extendedop", "Fedora Project", "7.1",
"sample extended operation plugin" };
diff --git a/ldap/servers/slapd/test-plugins/testgetip.c b/ldap/servers/slapd/test-plugins/testgetip.c
index 8e781ee4a..8867ee297 100644
--- a/ldap/servers/slapd/test-plugins/testgetip.c
+++ b/ldap/servers/slapd/test-plugins/testgetip.c
@@ -74,7 +74,7 @@
#include "slapi-plugin.h"
#include "nspr.h"
-Slapi_PluginDesc getippdesc = { "test-getip", "Netscape", "0.5",
+Slapi_PluginDesc getippdesc = { "test-getip", "Fedora Project", "7.1",
"sample pre-operation plugin" };
static char *netaddr2str( PRNetAddr *addrp, char *buf, size_t buflen );
diff --git a/ldap/servers/slapd/test-plugins/testpostop.c b/ldap/servers/slapd/test-plugins/testpostop.c
index f59825924..7c82bf7fd 100644
--- a/ldap/servers/slapd/test-plugins/testpostop.c
+++ b/ldap/servers/slapd/test-plugins/testpostop.c
@@ -93,7 +93,7 @@ static char changelogfile[MAX_PATH+1];
static char *changelogfile = "/tmp/changelog";
#endif
-Slapi_PluginDesc postoppdesc = { "test-postop", "Netscape", "0.5",
+Slapi_PluginDesc postoppdesc = { "test-postop", "Fedora Project", "7.1",
"sample post-operation plugin" };
static void write_changelog( int optype, char *dn, void *change, int flag );
diff --git a/ldap/servers/slapd/test-plugins/testpreop.c b/ldap/servers/slapd/test-plugins/testpreop.c
index af3ca1f43..c71fba299 100644
--- a/ldap/servers/slapd/test-plugins/testpreop.c
+++ b/ldap/servers/slapd/test-plugins/testpreop.c
@@ -45,6 +45,7 @@
* testpreop_bind (called before an LDAP bind operation)
* testpreop_add (called before an LDAP add operation)
+ * testpreop_search (called before an LDAP search operation)
* testpreop_abandon (called before an LDAP abandon operation)
testpreop_bind logs information about the LDAP bind operation
@@ -78,7 +79,7 @@
#include <string.h>
#include "slapi-plugin.h"
-Slapi_PluginDesc preoppdesc = { "test-preop", "Netscape", "0.5",
+Slapi_PluginDesc preoppdesc = { "test-preop", "Fedora Project", "7.1",
"sample pre-operation plugin" };
/* Pre-operation plug-in function */
@@ -159,6 +160,24 @@ testpreop_add( Slapi_PBlock *pb )
return( 0 ); /* allow the operation to continue */
}
+/* Pre-operation plug-in function */
+int
+testpreop_search( Slapi_PBlock *pb )
+{
+ char *base;
+ /* Log a message to indicate when the plug-in function starts */
+ slapi_log_error( SLAPI_LOG_FATAL, "testpreop_search",
+ "*** PREOPERATION SEARCH PLUGIN ***\n");
+ /* Get and log the base DN of the search criteria */
+ if ( slapi_pblock_get( pb, SLAPI_SEARCH_TARGET, &base ) == 0 )
+ slapi_log_error( SLAPI_LOG_FATAL, "SLAPI_SEARCH_TARGET",
+ "%s\n", base );
+ /* Get and log the original base DN */
+ if ( slapi_pblock_get( pb, SLAPI_ORIGINAL_TARGET_DN, &base ) == 0 )
+ slapi_log_error( SLAPI_LOG_FATAL, "SLAPI_ORIGINAL_TARGET_DN",
+ "%s\n", base );
+}
+
/* Pre-operation plug-in function */
int
@@ -236,9 +255,11 @@ testpreop_init( Slapi_PBlock *pb )
(void *) testpreop_bind ) != 0 ||
slapi_pblock_set( pb, SLAPI_PLUGIN_PRE_ADD_FN,
(void *) testpreop_add ) != 0 ||
+ slapi_pblock_set( pb, SLAPI_PLUGIN_PRE_SEARCH_FN,
+ (void *) testpreop_search ) != 0 ||
slapi_pblock_set( pb, SLAPI_PLUGIN_PRE_ABANDON_FN,
(void *) testpreop_abandon ) != 0 ) {
- slapi_log_error( SLAPI_LOG_PLUGIN, "testpreop_init",
+ slapi_log_error( SLAPI_LOG_FATAL, "testpreop_init",
"Failed to set version and function\n" );
return( -1 );
}
diff --git a/ldap/servers/slapd/test-plugins/testsaslbind.c b/ldap/servers/slapd/test-plugins/testsaslbind.c
index 295e5d318..b259a80d9 100644
--- a/ldap/servers/slapd/test-plugins/testsaslbind.c
+++ b/ldap/servers/slapd/test-plugins/testsaslbind.c
@@ -65,7 +65,7 @@ objectclass: top
objectclass: nsSlapdPlugin
objectclass: extensibleObject
cn: test-saslbind
-nsslapd-pluginpath: /usr/fedora/servers/plugins/slapd/slapi/examples/libtest-plugin.so
+nsslapd-pluginpath: <serverroot>/plugins/slapd/slapi/examples/libtest-plugin.so
nsslapd-plugininitfunc: testsasl_init
nsslapd-plugintype: preoperation
nsslapd-pluginenabled: on
| 0 |
2971ca1c3671b4685d347ecdcb20c950f4d03d4c
|
389ds/389-ds-base
|
Revert "Ticket 47599 - Reduce lock scope in retro changelog plug-in"
Wrong branch was pushed.
This reverts commit d708fb290cf5d9be5efe6fd1096f63014ed265d3.
|
commit 2971ca1c3671b4685d347ecdcb20c950f4d03d4c
Author: Mark Reynolds <[email protected]>
Date: Wed Nov 20 11:14:32 2013 -0500
Revert "Ticket 47599 - Reduce lock scope in retro changelog plug-in"
Wrong branch was pushed.
This reverts commit d708fb290cf5d9be5efe6fd1096f63014ed265d3.
diff --git a/ldap/servers/plugins/retrocl/retrocl.c b/ldap/servers/plugins/retrocl/retrocl.c
index b0d0dfde9..141cfd22f 100644
--- a/ldap/servers/plugins/retrocl/retrocl.c
+++ b/ldap/servers/plugins/retrocl/retrocl.c
@@ -482,8 +482,6 @@ retrocl_plugin_init(Slapi_PBlock *pb)
rc= slapi_register_plugin_ext("internalpostoperation", 1 /* Enabled */, "retrocl_internalpostop_init", retrocl_internalpostop_init, "Retrocl internal postoperation plugin", NULL, identity, precedence);
}
- retrocl_internal_cn = slapi_counter_new();
- retrocl_first_cn = slapi_counter_new();
retrocl_internal_lock = PR_NewLock();
if (retrocl_internal_lock == NULL) return -1;
}
diff --git a/ldap/servers/plugins/retrocl/retrocl.h b/ldap/servers/plugins/retrocl/retrocl.h
index e93a454a8..214f3afb1 100644
--- a/ldap/servers/plugins/retrocl/retrocl.h
+++ b/ldap/servers/plugins/retrocl/retrocl.h
@@ -57,7 +57,7 @@
/* max len of a long (2^64), represented as a string, including null byte */
#define CNUMSTR_LEN 21
-typedef PRUint64 changeNumber;
+typedef unsigned long changeNumber;
typedef struct _cnum_result_t {
int crt_nentries; /* number of entries returned from search */
@@ -131,8 +131,6 @@ extern const char *attr_nsuniqueid;
extern const char *attr_isreplicated;
extern PRLock *retrocl_internal_lock;
-extern Slapi_Counter *retrocl_internal_cn;
-extern Slapi_Counter *retrocl_first_cn;
/* Functions */
diff --git a/ldap/servers/plugins/retrocl/retrocl_cn.c b/ldap/servers/plugins/retrocl/retrocl_cn.c
index 6923856e7..d2b15a4b9 100644
--- a/ldap/servers/plugins/retrocl/retrocl_cn.c
+++ b/ldap/servers/plugins/retrocl/retrocl_cn.c
@@ -43,8 +43,8 @@
#include "retrocl.h"
-Slapi_Counter *retrocl_internal_cn;
-Slapi_Counter *retrocl_first_cn;
+static changeNumber retrocl_internal_cn = 0;
+static changeNumber retrocl_first_cn = 0;
/*
* Function: a2changeNumber
@@ -86,31 +86,36 @@ handle_cnum_entry( Slapi_Entry *e, void *callback_data )
cr->cr_time = NULL;
if ( NULL != e ) {
- Slapi_Attr *chattr = NULL;
- sval = NULL;
- value = NULL;
- if ( slapi_entry_attr_find( e, attr_changenumber, &chattr ) == 0 ) {
- slapi_attr_first_value( chattr,&sval );
- if ( NULL != sval ) {
- value = slapi_value_get_berval ( sval );
- if( NULL != value && NULL != value->bv_val && '\0' != value->bv_val[0]) {
- cr->cr_cnum = a2changeNumber( value->bv_val );
- }
- }
- }
- chattr = NULL;
- sval = NULL;
- value = NULL;
-
- if ( slapi_entry_attr_find( e, attr_changetime, &chattr ) == 0 ) {
- slapi_attr_first_value( chattr,&sval );
- if ( NULL != sval) {
- value = slapi_value_get_berval ( sval );
- if (NULL != value && NULL != value->bv_val && '\0' != value->bv_val[0]) {
- cr->cr_time = slapi_ch_strdup( value->bv_val );
- }
- }
- }
+ Slapi_Attr *chattr = NULL;
+ sval = NULL;
+ value = NULL;
+ if ( slapi_entry_attr_find( e, attr_changenumber, &chattr ) == 0 ) {
+ slapi_attr_first_value( chattr,&sval );
+ if ( NULL != sval ) {
+ value = slapi_value_get_berval ( sval );
+ if( NULL != value && NULL != value->bv_val &&
+ '\0' != value->bv_val[0]) {
+ cr->cr_cnum = a2changeNumber( value->bv_val );
+ }
+ }
+ }
+ chattr = NULL;
+ sval = NULL;
+ value = NULL;
+
+ chattr = NULL;
+ sval = NULL;
+ value = NULL;
+ if ( slapi_entry_attr_find( e, attr_changetime, &chattr ) == 0 ) {
+ slapi_attr_first_value( chattr,&sval );
+ if ( NULL != sval) {
+ value = slapi_value_get_berval ( sval );
+ if (NULL != value && NULL != value->bv_val &&
+ '\0' != value->bv_val[0]) {
+ cr->cr_time = slapi_ch_strdup( value->bv_val );
+ }
+ }
+ }
}
return 0;
}
@@ -158,7 +163,7 @@ int retrocl_get_changenumbers(void)
NULL,NULL,0,&cr,NULL,handle_cnum_result,
handle_cnum_entry, NULL);
- slapi_counter_set_value(retrocl_first_cn, cr.cr_cnum);
+ retrocl_first_cn = cr.cr_cnum;
slapi_ch_free(( void **) &cr.cr_time );
@@ -167,11 +172,11 @@ int retrocl_get_changenumbers(void)
NULL,NULL,0,&cr,NULL,handle_cnum_result,
handle_cnum_entry, NULL);
- slapi_counter_set_value(retrocl_internal_cn,cr.cr_cnum);
+ retrocl_internal_cn = cr.cr_cnum;
slapi_log_error(SLAPI_LOG_PLUGIN,"retrocl","Got changenumbers %lu and %lu\n",
- slapi_counter_get_value(retrocl_first_cn),
- slapi_counter_get_value(retrocl_internal_cn));
+ retrocl_first_cn,
+ retrocl_internal_cn);
slapi_ch_free(( void **) &cr.cr_time );
@@ -195,10 +200,10 @@ time_t retrocl_getchangetime( int type, int *err )
time_t ret;
if ( type != SLAPI_SEQ_FIRST && type != SLAPI_SEQ_LAST ) {
- if ( err != NULL ) {
- *err = -1;
- }
- return NO_TIME;
+ if ( err != NULL ) {
+ *err = -1;
+ }
+ return NO_TIME;
}
memset( &cr, '\0', sizeof( cnumRet ));
slapi_seq_callback( RETROCL_CHANGELOG_DN, type,
@@ -208,13 +213,13 @@ time_t retrocl_getchangetime( int type, int *err )
handle_cnum_result, handle_cnum_entry, NULL );
if ( err != NULL ) {
- *err = cr.cr_lderr;
+ *err = cr.cr_lderr;
}
if ( NULL == cr.cr_time ) {
- ret = NO_TIME;
+ ret = NO_TIME;
} else {
- ret = parse_localTime( cr.cr_time );
+ ret = parse_localTime( cr.cr_time );
}
slapi_ch_free(( void **) &cr.cr_time );
return ret;
@@ -233,8 +238,10 @@ time_t retrocl_getchangetime( int type, int *err )
void retrocl_forget_changenumbers(void)
{
- slapi_counter_set_value(retrocl_first_cn, 0);
- slapi_counter_set_value(retrocl_internal_cn, 0);
+ PR_Lock(retrocl_internal_lock);
+ retrocl_first_cn = 0;
+ retrocl_internal_cn = 0;
+ PR_Unlock(retrocl_internal_lock);
}
/*
@@ -250,7 +257,11 @@ void retrocl_forget_changenumbers(void)
changeNumber retrocl_get_first_changenumber(void)
{
- return (changeNumber)slapi_counter_get_value(retrocl_first_cn);
+ changeNumber cn;
+ PR_Lock(retrocl_internal_lock);
+ cn = retrocl_first_cn;
+ PR_Unlock(retrocl_internal_lock);
+ return cn;
}
/*
@@ -266,7 +277,9 @@ changeNumber retrocl_get_first_changenumber(void)
void retrocl_set_first_changenumber(changeNumber cn)
{
- slapi_counter_set_value(retrocl_first_cn, cn);
+ PR_Lock(retrocl_internal_lock);
+ retrocl_first_cn = cn;
+ PR_Unlock(retrocl_internal_lock);
}
@@ -282,8 +295,12 @@ void retrocl_set_first_changenumber(changeNumber cn)
*/
changeNumber retrocl_get_last_changenumber(void)
-{
- return (changeNumber)slapi_counter_get_value(retrocl_internal_cn);
+{
+ changeNumber cn;
+ PR_Lock(retrocl_internal_lock);
+ cn = retrocl_internal_cn;
+ PR_Unlock(retrocl_internal_lock);
+ return cn;
}
/*
@@ -291,7 +308,7 @@ changeNumber retrocl_get_last_changenumber(void)
*
* Returns: none
*
- * Arguments: none
+ * Arguments: none, lock must be held
*
* Description: NOTE! MUST BE PRECEEDED BY retrocl_assign_changenumber
*
@@ -299,8 +316,8 @@ changeNumber retrocl_get_last_changenumber(void)
void retrocl_commit_changenumber(void)
{
- if ( slapi_counter_get_value(retrocl_first_cn) == 0) {
- slapi_counter_set_value(retrocl_first_cn, slapi_counter_get_value(retrocl_internal_cn));
+ if ( retrocl_first_cn == 0) {
+ retrocl_first_cn = retrocl_internal_cn;
}
}
@@ -309,7 +326,7 @@ void retrocl_commit_changenumber(void)
*
* Returns: none
*
- * Arguments: none
+ * Arguments: none, lock must be held
*
* Description: NOTE! MUST BE PRECEEDED BY retrocl_assign_changenumber
*
@@ -317,7 +334,7 @@ void retrocl_commit_changenumber(void)
void retrocl_release_changenumber(void)
{
- slapi_counter_decrement(retrocl_internal_cn);
+ retrocl_internal_cn--;
}
/*
@@ -345,8 +362,10 @@ int retrocl_update_lastchangenumber(void)
NULL,NULL,0,&cr,NULL,handle_cnum_result,
handle_cnum_entry, NULL);
- slapi_counter_set_value(retrocl_internal_cn, cr.cr_cnum);
- slapi_log_error(SLAPI_LOG_PLUGIN,"retrocl","Refetched last changenumber = %lu \n", cr.cr_cnum);
+
+ retrocl_internal_cn = cr.cr_cnum;
+ slapi_log_error(SLAPI_LOG_PLUGIN,"retrocl","Refetched last changenumber = %lu \n",
+ retrocl_internal_cn);
slapi_ch_free(( void **) &cr.cr_time );
@@ -375,7 +394,7 @@ changeNumber retrocl_assign_changenumber(void)
* validity of the internal assignment of retrocl_internal_cn
* we had from the startup */
- if(slapi_counter_get_value(retrocl_internal_cn) <= slapi_counter_get_value(retrocl_first_cn)){
+ if(retrocl_internal_cn <= retrocl_first_cn){
/* the numbers have become out of sync - retrocl_get_changenumbers
* gets called only once during startup and it may have had a problem
* getting the last changenumber.
@@ -386,8 +405,7 @@ changeNumber retrocl_assign_changenumber(void)
retrocl_update_lastchangenumber();
}
- slapi_counter_increment(retrocl_internal_cn);
- cn = slapi_counter_get_value(retrocl_internal_cn);
-
+ retrocl_internal_cn++;
+ cn = retrocl_internal_cn;
return cn;
}
| 0 |
baefaadbb182c3b282a9d186267797578a39d5d9
|
389ds/389-ds-base
|
Ticket 48309 - Fix lib389 lib imports
Description: Remove redundant imports, such as
"from lib389.properties import SER_PORT" when
"from lib389.properties import *" is presented.
Group imports by standard library, related third party,
lib389 library.
https://fedorahosted.org/389/ticket/48309
Reviewed by: mreynolds (Thanks!)
|
commit baefaadbb182c3b282a9d186267797578a39d5d9
Author: Simon Pichugin <[email protected]>
Date: Tue Oct 13 13:28:27 2015 +0200
Ticket 48309 - Fix lib389 lib imports
Description: Remove redundant imports, such as
"from lib389.properties import SER_PORT" when
"from lib389.properties import *" is presented.
Group imports by standard library, related third party,
lib389 library.
https://fedorahosted.org/389/ticket/48309
Reviewed by: mreynolds (Thanks!)
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 9890e256f..2763af5e1 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -25,9 +25,6 @@ import pwd
import grp
import os.path
import base64
-import six.moves.urllib.request
-import six.moves.urllib.parse
-import six.moves.urllib.error
import socket
import ldif
import re
@@ -42,18 +39,22 @@ import glob
import tarfile
import subprocess
import collections
+import six.moves.urllib.request
+import six.moves.urllib.parse
+import six.moves.urllib.error
import six
-
-from ldap.ldapobject import SimpleLDAPObject
from ldapurl import LDAPUrl
+from ldap.ldapobject import SimpleLDAPObject
from ldap.cidict import cidict
from ldap import LDAPError
# file in this package
from lib389._constants import *
+from lib389.properties import *
from lib389._entry import Entry
from lib389._replication import CSN, RUV
from lib389._ldifconn import LDIFConn
+from lib389.tools import DirSrvTools
from lib389.utils import (
isLocalHost,
is_a_dn,
@@ -63,10 +64,7 @@ from lib389.utils import (
update_newhost_with_fqdn,
formatInfData,
get_sbin_dir,
- get_bin_dir
- )
-from lib389.properties import *
-from lib389.tools import DirSrvTools
+ get_bin_dir)
# mixin
#from lib389.tools import DirSrvTools
diff --git a/src/lib389/lib389/_controls.py b/src/lib389/lib389/_controls.py
index b09f61e93..39f965eec 100644
--- a/src/lib389/lib389/_controls.py
+++ b/src/lib389/lib389/_controls.py
@@ -4,15 +4,12 @@ Lib389 python ldap request controls.
These should be upstreamed into python ldap when possible.
"""
-from lib389._constants import *
-
from ldap.controls import LDAPControl
-
-from pyasn1.type import namedtype,univ
-from pyasn1.codec.ber import encoder,decoder
-from pyasn1.type import tag
-from pyasn1_modules.rfc2251 import AttributeDescription, LDAPDN, AttributeValue
+from pyasn1.type import namedtype, univ, tag
+from pyasn1.codec.ber import encoder, decoder
from pyasn1 import debug
+from pyasn1_modules.rfc2251 import AttributeDescription, LDAPDN, AttributeValue
+from lib389._constants import *
# Could use AttributeDescriptionList
diff --git a/src/lib389/lib389/_entry.py b/src/lib389/lib389/_entry.py
index 482ef9e2f..def5c4c0c 100644
--- a/src/lib389/lib389/_entry.py
+++ b/src/lib389/lib389/_entry.py
@@ -1,12 +1,13 @@
-import ldif
import re
-from ldap.cidict import cidict
import six
-
import logging
+import ldif
import ldap
-from lib389 import *
+from ldap.cidict import cidict
+
+from lib389._constants import *
from lib389.properties import *
+
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
diff --git a/src/lib389/lib389/agreement.py b/src/lib389/lib389/agreement.py
index c65e1f710..675f3caec 100644
--- a/src/lib389/lib389/agreement.py
+++ b/src/lib389/lib389/agreement.py
@@ -10,10 +10,10 @@ import time
import six
from lib389._constants import *
+from lib389.properties import *
from lib389._entry import FormatDict
from lib389.utils import normalizeDN
from lib389 import Entry, DirSrv, NoSuchEntryError, InvalidArgumentError
-from lib389.properties import *
class Agreement(object):
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
index 5d2a19775..67a7d950c 100644
--- a/src/lib389/lib389/backend.py
+++ b/src/lib389/lib389/backend.py
@@ -5,15 +5,13 @@ Created on Dec 13, 2013
'''
import ldap
-from lib389 import DirSrv, Entry, NoSuchEntryError, InvalidArgumentError
-from lib389._constants import *
-from lib389.utils import (
- normalizeDN,
- suffixfilt
- )
from compiler.ast import Not
+
+from lib389._constants import *
from lib389.properties import *
-from .__init__ import UnwillingToPerformError
+from lib389.utils import normalizeDN, suffixfilt
+from lib389 import DirSrv, Entry
+from lib389 import NoSuchEntryError, InvalidArgumentError, UnwillingToPerformError
class Backend(object):
diff --git a/src/lib389/lib389/brooker.py b/src/lib389/lib389/brooker.py
index 03c62d18e..00f9c646d 100644
--- a/src/lib389/lib389/brooker.py
+++ b/src/lib389/lib389/brooker.py
@@ -7,23 +7,18 @@
You will access this from:
DirSrv.backend.methodName()
"""
-import ldap
import os
import re
import time
import glob
-
+import ldap
from lib389._constants import *
-from lib389 import Entry, DirSrv, InvalidArgumentError
-from lib389.utils import normalizeDN, escapeDNValue, suffixfilt
-from lib389 import (
- NoSuchEntryError
-)
-
-
from lib389._replication import RUV
from lib389._entry import FormatDict
+from lib389.utils import normalizeDN, escapeDNValue, suffixfilt
+from lib389 import Entry, DirSrv
+from lib389 import NoSuchEntryError, InvalidArgumentError
class Config(object):
diff --git a/src/lib389/lib389/changelog.py b/src/lib389/lib389/changelog.py
index 471089900..cea25e7f7 100644
--- a/src/lib389/lib389/changelog.py
+++ b/src/lib389/lib389/changelog.py
@@ -4,11 +4,12 @@ Created on Jan 6, 2014
@author: tbordaz
'''
-import ldap
import os
-from lib389 import DirSrv, Entry, InvalidArgumentError
+import ldap
+
from lib389._constants import *
from lib389.properties import *
+from lib389 import DirSrv, Entry, InvalidArgumentError
class Changelog(object):
@@ -100,4 +101,4 @@ class Changelog(object):
self.conn.modify_s(ents[0].dn, mods)
def getProperties(self, changelogdn=None, properties=None):
- raise NotImplemented
\ No newline at end of file
+ raise NotImplemented
diff --git a/src/lib389/lib389/mappingTree.py b/src/lib389/lib389/mappingTree.py
index 72aacdc34..8d4d589e1 100644
--- a/src/lib389/lib389/mappingTree.py
+++ b/src/lib389/lib389/mappingTree.py
@@ -7,13 +7,11 @@ Created on Dec 13, 2013
import ldap
import six
-from lib389 import DirSrv, Entry, NoSuchEntryError, UnwillingToPerformError, InvalidArgumentError
from lib389._constants import *
-from lib389.utils import (
- suffixfilt,
- normalizeDN
- )
from lib389.properties import *
+from lib389.utils import suffixfilt, normalizeDN
+from lib389 import DirSrv, Entry
+from lib389 import NoSuchEntryError, UnwillingToPerformError, InvalidArgumentError
class MappingTree(object):
diff --git a/src/lib389/lib389/schema.py b/src/lib389/lib389/schema.py
index e4821a6ad..7932398c1 100644
--- a/src/lib389/lib389/schema.py
+++ b/src/lib389/lib389/schema.py
@@ -2,17 +2,17 @@
You will access this from:
DirSrv.schema.methodName()
"""
-import ldap
-from ldap.schema.models import AttributeType, ObjectClass
import os
import re
import time
import glob
import six
+import ldap
+from ldap.schema.models import AttributeType, ObjectClass
from lib389._constants import *
-from lib389 import Entry
from lib389.utils import normalizeDN, escapeDNValue, suffixfilt
+from lib389 import Entry
class Schema(object):
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
index 0ec61ba34..be82b21b6 100644
--- a/src/lib389/lib389/tasks.py
+++ b/src/lib389/lib389/tasks.py
@@ -3,12 +3,13 @@ Created on Feb 10, 2014
@author: tbordaz
'''
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
-import ldap
import time
import os.path
+import ldap
+
+from lib389._constants import *
+from lib389.properties import *
+from lib389 import DirSrv, Entry
class Tasks(object):
diff --git a/src/lib389/lib389/tools.py b/src/lib389/lib389/tools.py
index a61d5d52f..af72a61f3 100644
--- a/src/lib389/lib389/tools.py
+++ b/src/lib389/lib389/tools.py
@@ -14,10 +14,6 @@ import sys
import os
import os.path
import base64
-import six.moves.urllib.request
-import six.moves.urllib.parse
-import six.moves.urllib.error
-import ldap
import operator
import select
import time
@@ -28,24 +24,31 @@ import re
import glob
import pwd
import grp
+import logging
+import six.moves.urllib.request
+import six.moves.urllib.parse
+import six.moves.urllib.error
+import ldap
-import lib389
-from lib389 import *
+from lib389._constants import *
+from lib389._ldifconn import LDIFConn
from lib389.properties import *
-
from lib389.utils import (
getcfgdsuserdn,
getcfgdsinfo,
getcfgdsuserdn,
update_newhost_with_fqdn,
- get_sbin_dir, get_server_user, getdomainname,
- isLocalHost, formatInfData, getserverroot,
- update_admin_domain, getadminport, getdefaultsuffix,
- )
-from lib389._ldifconn import LDIFConn
-from lib389._constants import DN_DM
+ get_sbin_dir,
+ get_server_user,
+ getdomainname,
+ isLocalHost,
+ formatInfData,
+ getserverroot,
+ update_admin_domain,
+ getadminport,
+ getdefaultsuffix)
+
-import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py
index cde48f714..924a812ab 100644
--- a/src/lib389/lib389/utils.py
+++ b/src/lib389/lib389/utils.py
@@ -2,8 +2,6 @@
TODO put them in a module!
"""
-from lib389.properties import SER_PORT, SER_ROOT_PW, SER_SERVERID_PROP,\
- SER_ROOT_DN
try:
from subprocess import Popen as my_popen, PIPE
except ImportError:
@@ -19,23 +17,20 @@ except ImportError:
import re
import os
-import socket
import logging
import shutil
import time
-logging.basicConfig(level=logging.DEBUG)
-log = logging.getLogger(__name__)
-
+import ldap
import socket
from socket import getfqdn
-
from ldapurl import LDAPUrl
-import ldap
-import lib389
-from lib389 import DN_CONFIG
+
from lib389._constants import *
from lib389.properties import *
+
+logging.basicConfig(level=logging.DEBUG)
+log = logging.getLogger(__name__)
#
# Decorator
#
@@ -614,7 +609,7 @@ def formatInfData(args):
"""
args = args.copy()
- args['CFGSUFFIX'] = lib389.CFGSUFFIX
+ args['CFGSUFFIX'] = CFGSUFFIX
content = ("[General]" "\n")
content += ("FullMachineName= %s\n" % args[SER_HOST])
| 0 |
ce7378990855abe1a5b52cd4fbe78ffc33365dcb
|
389ds/389-ds-base
|
Ticket 47880 - CI test: added test cases for ticket 47880
Description: Ticket #47880 - provide enabled ciphers as search result
https://fedorahosted.org/389/ticket/47880
|
commit ce7378990855abe1a5b52cd4fbe78ffc33365dcb
Author: Noriko Hosoi <[email protected]>
Date: Thu Sep 25 14:43:16 2014 -0700
Ticket 47880 - CI test: added test cases for ticket 47880
Description: Ticket #47880 - provide enabled ciphers as search result
https://fedorahosted.org/389/ticket/47880
diff --git a/dirsrvtests/tickets/ticket47838_test.py b/dirsrvtests/tickets/ticket47838_test.py
index 15753768d..c98c36ee2 100644
--- a/dirsrvtests/tickets/ticket47838_test.py
+++ b/dirsrvtests/tickets/ticket47838_test.py
@@ -216,6 +216,24 @@ def test_ticket47838_init(topology):
'nsSSLToken': 'internal (software)',
'nsSSLActivation': 'on'})))
+def comp_nsSSLEnableCipherCount(topology, ecount):
+ """
+ Check nsSSLEnabledCipher count with ecount
+ """
+ log.info("Checking nsSSLEnabledCiphers...")
+ msgid = topology.standalone.search_ext(ENCRYPTION_DN, ldap.SCOPE_BASE, 'cn=*', ['nsSSLEnabledCiphers'])
+ enabledciphercnt = 0
+ rtype, rdata, rmsgid = topology.standalone.result2(msgid)
+ topology.standalone.log.info("%d results" % len(rdata))
+
+ topology.standalone.log.info("Results:")
+ for dn, attrs in rdata:
+ topology.standalone.log.info("dn: %s" % dn)
+ if attrs.has_key('nsSSLEnabledCiphers'):
+ enabledciphercnt = len(attrs['nsSSLEnabledCiphers'])
+ topology.standalone.log.info("enabledCipherCount: %d" % enabledciphercnt)
+ assert ecount == enabledciphercnt
+
def test_ticket47838_run_0(topology):
"""
Check nsSSL3Ciphers: +all
@@ -248,6 +266,8 @@ def test_ticket47838_run_0(topology):
log.info("Weak ciphers: %d" % wcount)
assert wcount <= 29
+ comp_nsSSLEnableCipherCount(topology, ecount)
+
def test_ticket47838_run_1(topology):
"""
Check nsSSL3Ciphers: +all
@@ -287,6 +307,8 @@ def test_ticket47838_run_1(topology):
log.info("Weak ciphers: %d" % wcount)
assert wcount <= 29
+ comp_nsSSLEnableCipherCount(topology, ecount)
+
def test_ticket47838_run_2(topology):
"""
Check nsSSL3Ciphers: +rsa_aes_128_sha,+rsa_aes_256_sha
@@ -316,6 +338,8 @@ def test_ticket47838_run_2(topology):
assert ecount == 2
assert dcount == (plus_all_ecount + plus_all_dcount - ecount)
+ comp_nsSSLEnableCipherCount(topology, ecount)
+
def test_ticket47838_run_3(topology):
"""
Check nsSSL3Ciphers: -all
@@ -344,6 +368,8 @@ def test_ticket47838_run_3(topology):
log.info("Disabling SSL message?: %s" % disabledmsg.readline())
assert disabledmsg != ''
+ comp_nsSSLEnableCipherCount(topology, ecount)
+
def test_ticket47838_run_4(topology):
"""
Check no nsSSL3Ciphers
@@ -377,6 +403,8 @@ def test_ticket47838_run_4(topology):
log.info("Weak ciphers in the default setting: %d" % wcount)
assert wcount == 0
+ comp_nsSSLEnableCipherCount(topology, ecount)
+
def test_ticket47838_run_5(topology):
"""
Check nsSSL3Ciphers: default
@@ -410,6 +438,8 @@ def test_ticket47838_run_5(topology):
log.info("Weak ciphers in the default setting: %d" % wcount)
assert wcount == 0
+ comp_nsSSLEnableCipherCount(topology, ecount)
+
def test_ticket47838_run_6(topology):
"""
Check nssSSL3Chiphers: +all,-rsa_rc4_128_md5
@@ -441,6 +471,8 @@ def test_ticket47838_run_6(topology):
assert ecount == (plus_all_ecount_noweak - 1)
assert dcount == (plus_all_dcount_noweak + 1)
+ comp_nsSSLEnableCipherCount(topology, ecount)
+
def test_ticket47838_run_7(topology):
"""
Check nssSSL3Chiphers: -all,+rsa_rc4_128_md5
@@ -470,6 +502,8 @@ def test_ticket47838_run_7(topology):
assert ecount == 1
assert dcount == (plus_all_ecount + plus_all_dcount - ecount)
+ comp_nsSSLEnableCipherCount(topology, ecount)
+
def test_ticket47838_run_8(topology):
"""
Check nsSSL3Ciphers: default + allowWeakCipher: off
@@ -503,6 +537,8 @@ def test_ticket47838_run_8(topology):
log.info("Weak ciphers in the default setting: %d" % wcount)
assert wcount == 0
+ comp_nsSSLEnableCipherCount(topology, ecount)
+
def test_ticket47838_run_9(topology):
"""
Check no nsSSL3Ciphers
@@ -537,6 +573,8 @@ def test_ticket47838_run_9(topology):
log.info("Weak ciphers in the default setting: %d" % wcount)
assert wcount == 11
+ comp_nsSSLEnableCipherCount(topology, ecount)
+
def test_ticket47838_run_10(topology):
"""
Check nssSSL3Chiphers: -TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,
@@ -579,6 +617,8 @@ def test_ticket47838_run_10(topology):
topology.standalone.log.info("ticket47838 was successfully verified.");
+ comp_nsSSLEnableCipherCount(topology, ecount)
+
def test_ticket47838_run_11(topology):
"""
Check nssSSL3Chiphers: +fortezza
@@ -603,6 +643,8 @@ def test_ticket47838_run_11(topology):
log.info("Expected error message was not found")
assert False
+ comp_nsSSLEnableCipherCount(topology, 0)
+
def test_ticket47838_run_last(topology):
"""
Check nssSSL3Chiphers: all <== invalid value
@@ -627,7 +669,9 @@ def test_ticket47838_run_last(topology):
log.info("Expected error message was not found")
assert False
- topology.standalone.log.info("ticket47838 was successfully verified.");
+ comp_nsSSLEnableCipherCount(topology, 0)
+
+ topology.standalone.log.info("ticket47838, 47880, 47908 were successfully verified.");
def test_ticket47838_final(topology):
topology.standalone.simple_bind_s(DN_DM, PASSWORD)
| 0 |
fc2008c497542970168d988a3b1cfadd59b5bcb4
|
389ds/389-ds-base
|
Ticket 49926 - Fix replication tests on 1.3.x
Description:
nsAccount is not supported by 1.3.x branch.
Fix description:
Remove nsAccount objectClass from Replication Manager
if DS is older than 1.4.x.
https://pagure.io/389-ds-base/issue/49926
Reviewed by: mreynolds, firstyear (Thanks!)
|
commit fc2008c497542970168d988a3b1cfadd59b5bcb4
Author: Viktor Ashirov <[email protected]>
Date: Fri Sep 14 21:35:41 2018 +0200
Ticket 49926 - Fix replication tests on 1.3.x
Description:
nsAccount is not supported by 1.3.x branch.
Fix description:
Remove nsAccount objectClass from Replication Manager
if DS is older than 1.4.x.
https://pagure.io/389-ds-base/issue/49926
Reviewed by: mreynolds, firstyear (Thanks!)
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
index 348a7b403..4542a420d 100644
--- a/src/lib389/lib389/replica.py
+++ b/src/lib389/lib389/replica.py
@@ -1292,6 +1292,8 @@ class BootstrapReplicationManager(DSLdapObject):
'netscapeServer',
'nsAccount'
]
+ if ds_is_older('1.4.0'):
+ self._create_objectclasses.remove('nsAccount')
self._protected = False
self.common_name = 'replication manager'
| 0 |
7c36748c034d9c1ccf236211f18f498974f322db
|
389ds/389-ds-base
|
Issue 5697 - Obsolete nsslapd-ldapimaprootdn attribute (#5698)
Description: LDAPI code uses nsslapd-ldapimaprootdn to map Unix root entry
to a Root DN entry. It usually has the same value as nsslapd-rootdn.
Changing one attribute but not changing the other leads to a non-functional
autobind configuration that breaks dsconf and WebUI.
LDAPI code should use nsslapd-rootdn value instead of having two separate entries
that should be kept in sync. This should make changing Root DN simpler and without
fear that it will break dsconf or WebUI access.
Fixes: https://github.com/389ds/389-ds-base/issues/5697
Reviewed by: @mreynolds389 (Thanks!)
|
commit 7c36748c034d9c1ccf236211f18f498974f322db
Author: Simon Pichugin <[email protected]>
Date: Wed Mar 22 18:28:37 2023 -0700
Issue 5697 - Obsolete nsslapd-ldapimaprootdn attribute (#5698)
Description: LDAPI code uses nsslapd-ldapimaprootdn to map Unix root entry
to a Root DN entry. It usually has the same value as nsslapd-rootdn.
Changing one attribute but not changing the other leads to a non-functional
autobind configuration that breaks dsconf and WebUI.
LDAPI code should use nsslapd-rootdn value instead of having two separate entries
that should be kept in sync. This should make changing Root DN simpler and without
fear that it will break dsconf or WebUI access.
Fixes: https://github.com/389ds/389-ds-base/issues/5697
Reviewed by: @mreynolds389 (Thanks!)
diff --git a/ldap/ldif/template-dse-minimal.ldif.in b/ldap/ldif/template-dse-minimal.ldif.in
index a1700a2da..916e38652 100644
--- a/ldap/ldif/template-dse-minimal.ldif.in
+++ b/ldap/ldif/template-dse-minimal.ldif.in
@@ -23,7 +23,6 @@ nsslapd-rootpw: %ds_passwd%
nsslapd-ldapilisten: %ldapi_enabled%
nsslapd-ldapifilepath: %ldapi%
nsslapd-ldapiautobind: %ldapi_autobind%
-nsslapd-ldapimaprootdn: %rootdn%
dn: cn=auto_bind,cn=config
objectclass: top
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
index a793f91c2..846947a35 100644
--- a/ldap/ldif/template-dse.ldif.in
+++ b/ldap/ldif/template-dse.ldif.in
@@ -24,7 +24,6 @@ nsslapd-rootpw: %ds_passwd%
nsslapd-ldapilisten: %ldapi_enabled%
nsslapd-ldapifilepath: %ldapi%
nsslapd-ldapiautobind: %ldapi_autobind%
-nsslapd-ldapimaprootdn: %rootdn%
dn: cn=encryption,cn=config
objectClass: top
diff --git a/ldap/ldif/template-ldapi-autobind.ldif.in b/ldap/ldif/template-ldapi-autobind.ldif.in
index 314e6bc2a..6ce428804 100644
--- a/ldap/ldif/template-ldapi-autobind.ldif.in
+++ b/ldap/ldif/template-ldapi-autobind.ldif.in
@@ -3,9 +3,6 @@ changetype: modify
replace: nsslapd-ldapiautobind
nsslapd-ldapiautobind: on
-
-replace: nsslapd-ldapimaprootdn
-nsslapd-ldapimaprootdn: %rootdn%
--
replace: nsslapd-ldapimaptoentries
nsslapd-ldapimaptoentries: off
-
diff --git a/ldap/servers/slapd/ldapi.c b/ldap/servers/slapd/ldapi.c
index 7bec5ba77..31573412f 100644
--- a/ldap/servers/slapd/ldapi.c
+++ b/ldap/servers/slapd/ldapi.c
@@ -321,7 +321,7 @@ slapd_bind_local_user(Connection *conn)
if (ret && (0 == uid || proc_uid == uid || proc_gid == gid)) {
/* map unix root (uidNumber:0)? */
- char *root_dn = config_get_ldapi_root_dn();
+ char *root_dn = config_get_rootdn();
if (root_dn) {
Slapi_PBlock *entry_pb = NULL;
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index b2442db9e..34c964283 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -2775,6 +2775,9 @@ config_set_ldapi_root_dn(const char *attrname, char *value, char *errorbuf, int
}
if (apply) {
+ slapi_log_err(SLAPI_LOG_WARNING, "config_set_ldapi_root_dn",
+ "The \"nsslapd-ldapimaprootdn\" setting is obsolete and kept for compatibility reasons. "
+ "For LDAPI configuration, \"nsslapd-rootdn\" is used instead.\n");
CFG_LOCK_WRITE(slapdFrontendConfig);
slapi_ch_free((void **)&(slapdFrontendConfig->ldapi_root_dn));
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 76546338f..a966e9dc8 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -2628,7 +2628,7 @@ typedef struct _slapdFrontendConfig
char *ldapi_filename; /* filename for ldapi socket */
slapi_onoff_t ldapi_switch; /* switch to turn ldapi on/off */
slapi_onoff_t ldapi_bind_switch; /* switch to turn ldapi auto binding on/off */
- char *ldapi_root_dn; /* DN to map root to over LDAPI */
+ char *ldapi_root_dn; /* DN to map root to over LDAPI. Obsolete setting. rootds is used instead */
slapi_onoff_t ldapi_map_entries; /* turns ldapi entry bind mapping on/off */
char *ldapi_uidnumber_type; /* type that contains uid number */
char *ldapi_gidnumber_type; /* type that contains gid number */
diff --git a/src/cockpit/389-console/src/ds.jsx b/src/cockpit/389-console/src/ds.jsx
index ca1c4d0eb..53ab4718a 100644
--- a/src/cockpit/389-console/src/ds.jsx
+++ b/src/cockpit/389-console/src/ds.jsx
@@ -73,7 +73,7 @@ const staticStates = {
<TextContent>
<Text className="ds-margin-top-xlg ds-indent-md" component={TextVariants.h2}>
Problem accessing required server configuration. Check LDAPI is properly
- configured for the current Root DN (nsslapd-rootdn & nsslapd-ldapimaprootdn).
+ configured on this instance.
</Text>
</TextContent>
)
diff --git a/src/cockpit/389-console/src/lib/server/ldapi.jsx b/src/cockpit/389-console/src/lib/server/ldapi.jsx
index 09f441ee9..f4b94bdca 100644
--- a/src/cockpit/389-console/src/lib/server/ldapi.jsx
+++ b/src/cockpit/389-console/src/lib/server/ldapi.jsx
@@ -340,7 +340,7 @@ export class ServerLDAPI extends React.Component {
/>
</GridItem>
</Grid>
- <Grid title="Map the Unix root entry to this Directory Manager DN (nsslapd-ldapimaprootdn). The UI requires this to be set to the current root DN so it is a read-only setting.">
+ <Grid title="Map the Unix root entry to this Directory Manager DN (nsslapd-ldapimaprootdn). The UI requires this to be set to the current root DN so it is a read-only setting. The attribute is obsolete. Current root DN is used.">
<GridItem className="ds-label" span={3}>
LDAPI Map To Root DN
</GridItem>
diff --git a/src/lib389/lib389/cli_conf/config.py b/src/lib389/lib389/cli_conf/config.py
index ce38bc1eb..293851514 100644
--- a/src/lib389/lib389/cli_conf/config.py
+++ b/src/lib389/lib389/cli_conf/config.py
@@ -16,6 +16,15 @@ from lib389.cli_base import (
)
+def _config_display_ldapimaprootdn_warning(log, args):
+ """If we update the rootdn we need to update the ldapi settings too"""
+
+ for attr in args.attr:
+ if attr.lower().startswith('nsslapd-ldapimaprootdn='):
+ log.warning("The \"nsslapd-ldapimaprootdn\" setting is obsolete and kept for compatibility reasons. "
+ "For LDAPI configuration, \"nsslapd-rootdn\" is used instead.")
+
+
def config_get(inst, basedn, log, args):
if args and args.attrs:
_generic_get_attr(inst, basedn, log.getChild('config_get'), Config, args)
@@ -27,17 +36,14 @@ def config_get(inst, basedn, log, args):
def config_add_attr(inst, basedn, log, args):
_generic_add_attr(inst, basedn, log.getChild('config_add_attr'), Config, args)
+ _config_display_ldapimaprootdn_warning(log, args)
+
def config_replace_attr(inst, basedn, log, args):
_generic_replace_attr(inst, basedn, log.getChild('config_replace_attr'), Config, args)
- # If we update the rootdn we need to update the ldapi settings too
- for attr in args.attr:
- if attr.startswith('nsslapd-rootdn='):
- [rootdn_attr, rootdn_val] = attr.split("=", 1)
- args.attr = ['nsslapd-ldapimaprootdn=' + rootdn_val]
- _generic_replace_attr(inst, basedn, log.getChild('config_get'),
- Config, args)
+ _config_display_ldapimaprootdn_warning(log, args)
+
def config_del_attr(inst, basedn, log, args):
_generic_del_attr(inst, basedn, log.getChild('config_del_attr'), Config, args)
| 0 |
986cb5c7c0c9c027a12c56d011c42af94dd2ebd1
|
389ds/389-ds-base
|
Issue 6485 - Fix double free in USN cleanup task
Description:
ASAN report shows double free of bind dn in the USN cleanup task data. The bind
dn was passed as a reference so it should never have to be freed by the cleanup
task.
Relates: https://github.com/389ds/389-ds-base/issues/6485
Reviewed by: tbordaz(Thanks!)
|
commit 986cb5c7c0c9c027a12c56d011c42af94dd2ebd1
Author: Mark Reynolds <[email protected]>
Date: Wed Jan 8 12:57:52 2025 -0500
Issue 6485 - Fix double free in USN cleanup task
Description:
ASAN report shows double free of bind dn in the USN cleanup task data. The bind
dn was passed as a reference so it should never have to be freed by the cleanup
task.
Relates: https://github.com/389ds/389-ds-base/issues/6485
Reviewed by: tbordaz(Thanks!)
diff --git a/ldap/servers/plugins/usn/usn_cleanup.c b/ldap/servers/plugins/usn/usn_cleanup.c
index 8b5938350..fcc6ebfb4 100644
--- a/ldap/servers/plugins/usn/usn_cleanup.c
+++ b/ldap/servers/plugins/usn/usn_cleanup.c
@@ -240,7 +240,7 @@ usn_cleanup_add(Slapi_PBlock *pb,
char *suffix = NULL;
char *backend_str = NULL;
char *maxusn = NULL;
- char *bind_dn;
+ char *bind_dn = NULL;
struct usn_cleanup_data *cleanup_data = NULL;
int rv = SLAPI_DSE_CALLBACK_OK;
Slapi_Task *task = NULL;
@@ -323,8 +323,7 @@ usn_cleanup_add(Slapi_PBlock *pb,
suffix = NULL; /* don't free in this function */
cleanup_data->maxusn_to_delete = maxusn;
maxusn = NULL; /* don't free in this function */
- cleanup_data->bind_dn = bind_dn;
- bind_dn = NULL; /* don't free in this function */
+ cleanup_data->bind_dn = slapi_ch_strdup(bind_dn);
slapi_task_set_data(task, cleanup_data);
/* start the USN tombstone cleanup task as a separate thread */
@@ -363,7 +362,6 @@ usn_cleanup_task_destructor(Slapi_Task *task)
slapi_ch_free_string(&mydata->suffix);
slapi_ch_free_string(&mydata->maxusn_to_delete);
slapi_ch_free_string(&mydata->bind_dn);
- /* Need to cast to avoid a compiler warning */
slapi_ch_free((void **)&mydata);
}
}
| 0 |
42b7146fa220f45b1ede6f8d4543344ca08ba74b
|
389ds/389-ds-base
|
RPM packaging - the ldapserver.spec file and the builddsrpm.sh script to do the prep work
|
commit 42b7146fa220f45b1ede6f8d4543344ca08ba74b
Author: Rich Megginson <[email protected]>
Date: Fri Feb 4 00:08:51 2005 +0000
RPM packaging - the ldapserver.spec file and the builddsrpm.sh script to do the prep work
diff --git a/builddsrpm.sh b/builddsrpm.sh
new file mode 100755
index 000000000..6ce63bc61
--- /dev/null
+++ b/builddsrpm.sh
@@ -0,0 +1,40 @@
+#!/bin/sh -v
+
+mkdirs() {
+ for d in "$@" ; do
+ if [ -d $d ]; then
+ mv $d $d.deleted
+ rm -rf $d.deleted &
+ fi
+ mkdir -p $d
+ done
+}
+
+if [ ! -f $HOME/.rpmmacros ]; then
+ echo "This script assumes you want to build as a non-root"
+ echo "user and in a non-default place (e.g. your home dir)"
+ echo "You must have a $HOME/.rpmmacros file that redefines"
+ echo "_topdir e.g."
+ echo "%_topdir /home/rmeggins/ds71"
+ echo "Please create that file with the above contents and"
+ echo "rerun this script."
+ exit 1
+fi
+
+NAME=ldapserver
+VERSION=7.1
+# change HEAD to a real static tag when available
+CVSTAG=HEAD
+
+mkdirs SOURCES BUILD SRPMS RPMS
+cd SOURCES
+rm -rf $NAME-$VERSION $NAME-$VERSION.tar.gz
+echo "Checking out source code . . ."
+cvs export -r $CVSTAG -d $NAME-$VERSION $NAME > /dev/null 2>&1
+echo "Building tarball . . ."
+tar cf - $NAME-$VERSION | gzip > $NAME-$VERSION.tar.gz
+rm -rf $NAME-$VERSION
+cd ..
+echo "Executing rpmbuild . . ."
+rpmbuild -ba $NAME.spec
+echo "Finished doing rpmbuild $NAME.spec"
diff --git a/ldapserver.spec b/ldapserver.spec
new file mode 100644
index 000000000..d32267841
--- /dev/null
+++ b/ldapserver.spec
@@ -0,0 +1,78 @@
+Summary: Directory Server
+Name: ldapserver
+Version: 7.1
+Release: 1
+License: GPL
+Group: System Environment/Daemons
+URL: http://www.redhat.com
+Source0: %{name}-%{version}.tar.gz
+BuildRoot: %{_builddir}/%{name}-root
+BuildPreReq: perl, fileutils, make
+# Without Autoreq: 0, rpmbuild finds all sorts of crazy
+# dependencies that we don't care about, and refuses to install
+Autoreq: 0
+# Without Requires: something, rpmbuild will abort!
+Requires: perl
+Prefix: /opt/ldapserver
+
+%description
+ldapserver is an LDAPv3 compliant server.
+
+# prep and setup expect there to be a Source0 file
+# in the SOURCES directory - it will be unpacked
+# in the _builddir (not BuildRoot)
+%prep
+%setup
+
+%build
+# This will do a regular make build and make pkg
+# including grabbing the admin server, setup, etc.
+# The resultant zip files and setup program will
+# be in ldapserver/pkg
+# INSTDIR is relative to ldap/cm
+# build the file structure to package under ldapserver/pkg
+# instead of MM.DD/platform
+# remove BUILD_DEBUG=optimize to build the debug version
+make BUILD_DEBUG=optimize NO_INSTALLER_TAR_FILES=1 INSTDIR=../../pkg
+
+%install
+# all we do here is run setup -b to unpack the binaries
+# into the BuildRoot
+rm -rf $RPM_BUILD_ROOT
+cd pkg
+# hack hack hack
+# hack for unbundled jre - please fix!!!!!!
+export NSJRE=/share/builds/components/jdk/1.4.2/Linux/jre
+mkdir tmp
+cd tmp
+mkdir -p bin/base/jre
+cp -r $NSJRE/bin bin/base/jre
+cp -r $NSJRE/lib bin/base/jre
+zip -q -r ../base/nsjre.zip bin
+cd ..
+rm -rf tmp
+echo yes | ./setup -b $RPM_BUILD_ROOT/opt/ldapserver
+# this is our setup script that sets up the initial
+# server instances after installation
+cd ..
+cp ldap/cm/newinst/setup $RPM_BUILD_ROOT/opt/ldapserver
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%files
+# rather than listing individual files, we just package (and own)
+# the entire ldapserver directory - if we change this to put
+# files in different places, we don't be able to do this anymore
+%defattr(-,root,root,-)
+/opt/ldapserver
+
+%post
+echo ""
+echo "Please cd /opt/ldapserver and run ./setup"
+
+%changelog
+* Thu Jan 20 2005 Richard Megginson <[email protected]>
+- Initial build.
+
+
| 0 |
c73cd26dd9c828024c8451493699917cda185207
|
389ds/389-ds-base
|
Issue 49963 - ASan build fails on F28
Bug Description:
When building with gcc, we need to link wit libasan.
Fix Description:
Add -lasan to compiler flags.
https://pagure.io/389-ds-base/issue/49963
Reviewed by: tbordaz (Thanks!)
|
commit c73cd26dd9c828024c8451493699917cda185207
Author: Viktor Ashirov <[email protected]>
Date: Wed Sep 26 13:27:20 2018 +0200
Issue 49963 - ASan build fails on F28
Bug Description:
When building with gcc, we need to link wit libasan.
Fix Description:
Add -lasan to compiler flags.
https://pagure.io/389-ds-base/issue/49963
Reviewed by: tbordaz (Thanks!)
diff --git a/configure.ac b/configure.ac
index eab6d4c5b..7b67e0db2 100644
--- a/configure.ac
+++ b/configure.ac
@@ -135,7 +135,7 @@ AC_MSG_CHECKING(for --enable-asan)
AC_ARG_ENABLE(asan, AS_HELP_STRING([--enable-asan], [Enable gcc/clang address sanitizer options (default: no)]),
[
AC_MSG_RESULT(yes)
- asan_cflags="-fsanitize=address -fno-omit-frame-pointer"
+ asan_cflags="-fsanitize=address -fno-omit-frame-pointer -lasan"
asan_rust_defs="-Z sanitizer=address"
],
[
diff --git a/src/svrcore/configure.ac b/src/svrcore/configure.ac
index 32f5ed82f..44c7f0914 100644
--- a/src/svrcore/configure.ac
+++ b/src/svrcore/configure.ac
@@ -46,7 +46,7 @@ AC_MSG_CHECKING(for --enable-asan)
AC_ARG_ENABLE(asan, AS_HELP_STRING([--enable-asan], [Enable gcc address sanitizer options (default: no)]),
[
AC_MSG_RESULT(yes)
- asan_cflags="-fsanitize=address -fno-omit-frame-pointer"
+ asan_cflags="-fsanitize=address -fno-omit-frame-pointer -lasan"
],
[
AC_MSG_RESULT(no)
| 0 |
ab61eff1dfae54e84419fd4c059a3bae902fdf95
|
389ds/389-ds-base
|
Ticket 49495 - cos stress test and improvements.
Bug Description: We previously had no way to test the cos plugin.
Fix Description: Add cos types, and a stress test for the
template system to demonstrate the issue with 49495
https://pagure.io/389-ds-base/issue/49495
Author: wibrown
Review by: spichugi (Thanks!)
|
commit ab61eff1dfae54e84419fd4c059a3bae902fdf95
Author: William Brown <[email protected]>
Date: Mon Dec 11 13:16:33 2017 +0100
Ticket 49495 - cos stress test and improvements.
Bug Description: We previously had no way to test the cos plugin.
Fix Description: Add cos types, and a stress test for the
template system to demonstrate the issue with 49495
https://pagure.io/389-ds-base/issue/49495
Author: wibrown
Review by: spichugi (Thanks!)
diff --git a/dirsrvtests/tests/stress/cos/cos_scale_template_test.py b/dirsrvtests/tests/stress/cos/cos_scale_template_test.py
new file mode 100644
index 000000000..c01aac5ea
--- /dev/null
+++ b/dirsrvtests/tests/stress/cos/cos_scale_template_test.py
@@ -0,0 +1,148 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2017 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+
+import pytest
+
+from lib389.topologies import topology_st
+
+from lib389.plugins import ClassOfServicePlugin
+from lib389.cos import CosIndirectDefinitions, CosTemplates, CosTemplate
+from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES
+from lib389.idm.organisationalunit import OrganisationalUnits
+
+from lib389._constants import DEFAULT_SUFFIX
+
+import time
+
+# Given this should complete is about 0.005, this is generous.
+# For the final test with 20 templates, about 0.02 is an acceptable time.
+THRESHOLD = 0.05
+
+class OUCosTemplate(CosTemplate):
+ def __init__(self, instance, dn=None):
+ """Create a OU specific cos template to replicate a specific user setup.
+ This template provides ou attrs onto the target entry.
+
+ :param instance: A dirsrv instance
+ :type instance: DirSrv
+ :param dn: The dn of the template
+ :type dn: str
+ """
+ super(OUCosTemplate, self).__init__(instance, dn)
+ self._rdn_attribute = 'ou'
+ self._must_attributes = ['ou']
+ self._create_objectclasses = [
+ 'top',
+ 'cosTemplate',
+ 'organizationalUnit',
+ ]
+
+class OUCosTemplates(CosTemplates):
+ def __init__(self, instance, basedn, rdn=None):
+ """Create an OU specific cos templates to replicate a specific use setup.
+ This costemplates object allows access to the OUCosTemplate types.
+
+ :param instance: A dirsrv instance
+ :type instance: DirSrv
+ :param basedn: The basedn of the templates
+ :type basedn: str
+ :param rdn: The rdn of the templates
+ :type rdn: str
+ """
+ super(OUCosTemplates, self).__init__(instance, basedn, rdn)
+ self._objectclasses = [
+ 'cosTemplate',
+ 'organizationalUnit',
+ ]
+ self._filterattrs = ['ou']
+ self._childobject = OUCosTemplate
+
+def test_indirect_template_scale(topology_st):
+ """Test that cos templates can be added at a reasonable scale
+
+ :id: 7cbcdf22-1f9c-4222-9e76-685fe374fc20
+ :steps:
+ 1. Enable COS plugin
+ 2. Create the test user
+ 3. Add an indirect cos template
+ 4. Add a cos template
+ 5. Add the user to the cos template and assert it works.
+ 6. Add 25,000 templates to the database
+ 7. Search the user. It should not exceed THRESHOLD.
+ :expected results:
+ 1. It is enabled.
+ 2. It is created.
+ 3. Is is created.
+ 4. It is created.
+ 5. It is valid.
+ 6. They are created.
+ 7. It is fast.
+ """
+
+ cos_plugin = ClassOfServicePlugin(topology_st.standalone)
+ cos_plugin.enable()
+
+ topology_st.standalone.restart()
+
+ # Now create, the indirect specifier, and a user to template onto.
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
+ user = users.create(properties=TEST_USER_PROPERTIES)
+
+ cos_inds = CosIndirectDefinitions(topology_st.standalone, DEFAULT_SUFFIX)
+ cos_ind = cos_inds.create(properties={
+ 'cn' : 'cosIndirectDef',
+ 'cosIndirectSpecifier': 'seeAlso',
+ 'cosAttribute': [
+ 'ou merge-schemes',
+ 'description merge-schemes',
+ 'postalCode merge-schemes',
+ ],
+ })
+
+ ous = OrganisationalUnits(topology_st.standalone, DEFAULT_SUFFIX)
+ ou_temp = ous.create(properties={'ou': 'templates'})
+ cos_temps = OUCosTemplates(topology_st.standalone, ou_temp.dn)
+
+ cos_temp_u = cos_temps.create(properties={
+ 'ou' : 'ou_temp_u',
+ 'description' : 'desc_temp_u',
+ 'postalCode': '0'
+ })
+ # Edit the user to add the seeAlso ...
+ user.set('seeAlso', cos_temp_u.dn)
+
+ # Now create 25,0000 templates, they *don't* need to apply to the user though!
+ for i in range(1, 25001):
+ cos_temp_u = cos_temps.create(properties={
+ 'ou' : 'ou_temp_%s' % i,
+ 'description' : 'desc_temp_%s' % i,
+ 'postalCode': '%s' % i
+ })
+
+ if i % 500 == 0:
+ start_time = time.monotonic()
+ u_search = users.get('testuser')
+ attrs = u_search.get_attr_vals_utf8('postalCode')
+ end_time = time.monotonic()
+ diff_time = end_time - start_time
+ assert diff_time < THRESHOLD
+
+ if i == 10000:
+ # Now add our user to this template also.
+ user.add('seeAlso', cos_temp_u.dn)
+
+ start_time = time.monotonic()
+ attrs_after = u_search.get_attr_vals_utf8('postalCode')
+ end_time = time.monotonic()
+ diff_time = end_time - start_time
+ assert(set(attrs) < set(attrs_after))
+ assert diff_time < THRESHOLD
+
+
+
diff --git a/src/lib389/lib389/cos.py b/src/lib389/lib389/cos.py
new file mode 100644
index 000000000..783e5e5b4
--- /dev/null
+++ b/src/lib389/lib389/cos.py
@@ -0,0 +1,146 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2017 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+
+# Implement types for COS handling with lib389 and DS.
+
+
+from lib389._mapped_object import DSLdapObject, DSLdapObjects
+
+from lib389.utils import ensure_str
+
+class CosTemplate(DSLdapObject):
+ def __init__(self, instance, dn=None):
+ """A Cos Template defining the values to override on a target.
+
+ :param instance: DirSrv instance
+ :type instance: DirSrv
+ :param dn: The dn of the template
+ :type dn: str
+ """
+ super(CosTemplate, self).__init__(instance, dn)
+ self._rdn_attribute = 'cn'
+ self._must_attributes = ['cn']
+ # This is the ONLY TIME i'll allow extensible object ...
+ # You have been warned ...
+ self._create_objectclasses = [
+ 'top',
+ 'cosTemplate',
+ 'extensibleObject',
+ ]
+ self._protected = False
+
+class CosTemplates(DSLdapObjects):
+ def __init__(self, instance, basedn, rdn=None):
+ """The set of costemplates that exist for direct and indirect
+ implementations.
+
+ :param instance: A dirsrv instance
+ :type instance: DirSrv
+ :param basedn: The basedn of the templates
+ :type basedn: str
+ :param rdn: The rdn of the templates
+ :type rdn: str
+ """
+ super(CosTemplates, self).__init__(instance)
+ self._objectclasses = [
+ 'cosTemplate'
+ ]
+ self._filterattrs = ['cn']
+ self._childobject = CosTemplate
+ self._basedn = basedn
+ if rdn is not None:
+ self._basedn = '{},{}'.format(ensure_str(rdn), ensure_str(basedn))
+
+
+class CosIndirectDefinition(DSLdapObject):
+ def __init__(self, instance, dn=None):
+ """A Cos Indirect Definition associating an attr:value pair as a link
+ attr to a template type.
+
+ :param instance: DirSrv instance
+ :type instance: DirSrv
+ :param dn: The dn of the template
+ :type dn: str
+ """
+ super(CosIndirectDefinition, self).__init__(instance, dn)
+ self._rdn_attribute = 'cn'
+ self._must_attributes = ['cn', 'cosIndirectSpecifier', 'cosAttribute']
+ self._create_objectclasses = [
+ 'top',
+ 'cosSuperDefinition',
+ 'cosIndirectDefinition',
+ ]
+ self._protected = False
+
+class CosIndirectDefinitions(DSLdapObjects):
+ def __init__(self, instance, basedn, rdn=None):
+ """The set of cos indirect definitions that exist.
+
+ :param instance: A dirsrv instance
+ :type instance: DirSrv
+ :param basedn: The basedn of the templates
+ :type basedn: str
+ :param rdn: The rdn of the templates
+ :type rdn: str
+ """
+ super(CosIndirectDefinitions, self).__init__(instance)
+ self._objectclasses = [
+ 'cosSuperDefinition',
+ 'cosIndirectDefinition',
+ ]
+ self._filterattrs = ['cn']
+ self._childobject = CosIndirectDefinition
+ self._basedn = basedn
+ if rdn is not None:
+ self._basedn = '{},{}'.format(ensure_str(rdn), ensure_str(basedn))
+
+
+class CosPointerDefinition(DSLdapObject):
+ def __init__(self, instance, dn=None):
+ """A Cos Pointer Definition associating a dn syntax type as a link
+ attr to a template type.
+
+ :param instance: DirSrv instance
+ :type instance: DirSrv
+ :param dn: The dn of the template
+ :type dn: str
+ """
+ super(CosPointerDefinition, self).__init__(instance, dn)
+ self._rdn_attribute = 'cn'
+ self._must_attributes = ['cn', 'cosTemplateDn', 'cosAttribute']
+ self._create_objectclasses = [
+ 'top',
+ 'cosSuperDefinition',
+ 'cosPointerDefinition',
+ ]
+ self._protected = False
+
+class CosPointerDefinitions(DSLdapObjects):
+ def __init__(self, instance, basedn, rdn=None):
+ """The set of cos pointer definitions that exist.
+
+ :param instance: A dirsrv instance
+ :type instance: DirSrv
+ :param basedn: The basedn of the templates
+ :type basedn: str
+ :param rdn: The rdn of the templates
+ :type rdn: str
+ """
+ super(CosPointerDefinitions, self).__init__(instance)
+ self._objectclasses = [
+ 'cosSuperDefinition',
+ 'cosPointerDefinition',
+ ]
+ self._filterattrs = ['cn']
+ self._childobject = CosPointerDefinition
+ self._basedn = basedn
+ if rdn is not None:
+ self._basedn = '{},{}'.format(ensure_str(rdn), ensure_str(basedn))
+
+
| 0 |
a98a49328cf470d3b13cebebfc1229a843549121
|
389ds/389-ds-base
|
Bug 619122 - fix coverify Defect Type: Resource leaks issues CID 11975 - 12053
https://bugzilla.redhat.com/show_bug.cgi?id=619122
Resolves: bug 619122
Bug description: fix coverify Defect Type: Resource leaks issues CID 12051.
description: The legacy_preop_compare() has been modified to release referral before it returns.
|
commit a98a49328cf470d3b13cebebfc1229a843549121
Author: Endi S. Dewata <[email protected]>
Date: Fri Jul 30 15:02:32 2010 -0500
Bug 619122 - fix coverify Defect Type: Resource leaks issues CID 11975 - 12053
https://bugzilla.redhat.com/show_bug.cgi?id=619122
Resolves: bug 619122
Bug description: fix coverify Defect Type: Resource leaks issues CID 12051.
description: The legacy_preop_compare() has been modified to release referral before it returns.
diff --git a/ldap/servers/plugins/replication/repl_compare.c b/ldap/servers/plugins/replication/repl_compare.c
index 0771511c2..acb0d692c 100644
--- a/ldap/servers/plugins/replication/repl_compare.c
+++ b/ldap/servers/plugins/replication/repl_compare.c
@@ -67,5 +67,6 @@ legacy_preop_compare( Slapi_PBlock *pb )
slapi_send_ldap_result(pb, LDAP_REFERRAL, NULL, NULL, 0, referral);
return_code = 1; /* return 1 to prevent further search processing */
}
+ slapi_ch_free((void**)&referral);
return return_code;
}
| 0 |
7f16473ac08fbbe23368dfcd738db187bf43e511
|
389ds/389-ds-base
|
Issue 4758 - Add tests for WebUI
Description:
Added WebUI visibility tests for server tab, database tab, replication tab, schema tab, monitoring tab.
Relates: https://github.com/389ds/389-ds-base/issues/4758
Reviewed by: https://github.com/bsimonova (Thank you!)
|
commit 7f16473ac08fbbe23368dfcd738db187bf43e511
Author: Vladimir Cech <[email protected]>
Date: Tue Mar 14 16:44:48 2023 +0100
Issue 4758 - Add tests for WebUI
Description:
Added WebUI visibility tests for server tab, database tab, replication tab, schema tab, monitoring tab.
Relates: https://github.com/389ds/389-ds-base/issues/4758
Reviewed by: https://github.com/bsimonova (Thank you!)
diff --git a/dirsrvtests/tests/suites/webui/database/__init__.py b/dirsrvtests/tests/suites/webui/database/__init__.py
new file mode 100644
index 000000000..15d3b6c8d
--- /dev/null
+++ b/dirsrvtests/tests/suites/webui/database/__init__.py
@@ -0,0 +1,3 @@
+"""
+ :Requirement: WebUI: Database
+"""
diff --git a/dirsrvtests/tests/suites/webui/database/database_test.py b/dirsrvtests/tests/suites/webui/database/database_test.py
new file mode 100644
index 000000000..afffe45a9
--- /dev/null
+++ b/dirsrvtests/tests/suites/webui/database/database_test.py
@@ -0,0 +1,301 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2023 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import logging
+import time
+import subprocess
+import pytest
+
+from lib389.cli_idm.account import *
+from lib389.tasks import *
+from lib389.utils import *
+from lib389.topologies import topology_st
+from .. import setup_page, check_frame_assignment, setup_login
+
+pytestmark = pytest.mark.skipif(os.getenv('WEBUI') is None, reason="These tests are only for WebUI environment")
+pytest.importorskip('playwright')
+
+SERVER_ID = 'standalone1'
+
+
+def test_database_tab_availability(topology_st, page, browser_name):
+ """ Test Database tab visibility
+
+ :id: 863863e0-4ba7-4309-8f56-e6719cdf2bbe
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Database tab.
+ 2. Check if Limits tab under Global Database Configuration is visible.
+ :expectedresults:
+ 1. Success
+ 2. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Check if database tab contents are loaded.')
+ frame.get_by_role('tab', name='Database', exact=True).click()
+ frame.get_by_role('tab', name='Limits').wait_for()
+ assert frame.get_by_role('tab', name='Limits').is_visible()
+
+
+def test_global_database_configuration_availability(topology_st, page, browser_name):
+ """ Test Global Database Configuration tabs visibility
+
+ :id: d0efda45-4e8e-4703-b9c0-ab53249dafc3
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Database tab and check if ID List Scan Limit label is visible.
+ 2. Click on Database Cache tab and check if Automatic Cache Tuning checkbox is visible.
+ 3. Click on Import Cache tab and check if Automatic Import Cache Tuning checkbox is visible.
+ 4. Click on NDN Cache tab and check if Normalized DN Cache Max Size label is visible.
+ 5. Click on Database Locks tab and check if Enable DB Lock Monitoring checkbox is visible.
+ 6. Click on Advanced Settings and check if Transaction Logs Directory input field is visible.
+ :expectedresults:
+ 1. Element is visible
+ 2. Element is visible
+ 3. Element is visible
+ 4. Element is visible
+ 5. Element is visible
+ 6. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Check if element on Limits tab is loaded.')
+ frame.get_by_role('tab', name='Database', exact=True).click()
+ frame.get_by_text('ID List Scan Limit', exact=True).wait_for()
+ assert frame.get_by_text('ID List Scan Limit', exact=True).is_visible()
+
+ log.info('Click on Database Cache tab and check if element is loaded')
+ frame.get_by_role('tab', name='Database Cache', exact=True).click()
+ assert frame.locator('#db_cache_auto').is_visible()
+
+ log.info('Click on Import Cache tab and check if element is loaded')
+ frame.get_by_role('tab', name='Import Cache', exact=True).click()
+ assert frame.locator('#import_cache_auto').is_visible()
+
+ log.info('Click on NDN Cache tab and check if element is loaded')
+ frame.get_by_role('tab', name='NDN Cache', exact=True).click()
+ assert frame.get_by_text('Normalized DN Cache Max Size').is_visible()
+
+ log.info('Click on Database Locks tab and check if element is loaded')
+ frame.get_by_role('tab', name='Database Locks', exact=True).click()
+ assert frame.locator('#dblocksMonitoring').is_visible()
+
+ log.info('Click on Advanced Settings tab and check if element is loaded')
+ frame.get_by_role('tab', name='Advanced Settings', exact=True).click()
+ assert frame.locator('#txnlogdir').is_visible()
+
+
+def test_chaining_configuration_availability(topology_st, page, browser_name):
+ """ Test Chaining Configuration settings visibility
+
+ :id: 1f936968-d2fc-4fee-beeb-caeeb5df8c3f
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Database tab, click on Chaining Configuration button on the side panel.
+ 2. Check if Size Limit input field is visible.
+ 3. Click on Controls & Components tab and check if Forwarded LDAP Controls heading is visible.
+ :expectedresults:
+ 1. Success
+ 2. Element is visible
+ 3. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on Chaining Configuration and check if element is loaded.')
+ frame.get_by_role('tab', name='Database', exact=True).click()
+ frame.locator('#chaining-config').click()
+ frame.locator('#defSizeLimit').wait_for()
+ assert frame.locator('#defSizeLimit').is_visible()
+
+ log.info('Click on Controls & Components tab and check if element is loaded')
+ frame.get_by_role('tab', name='Controls & Components').click()
+ assert frame.get_by_role('heading', name='Forwarded LDAP Controls').is_visible()
+
+
+def test_backups_and_ldifs_availability(topology_st, page, browser_name):
+ """ Test Backups & LDIFs settings visibility.
+
+ :id: 90571e96-f3c9-4bec-83d6-04c61e8a0e78
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Database tab, click on Backups & LDIFs button on the side panel.
+ 2. Check if Create Backup button is visible.
+ 3. Click on LDIFs tab and check if Create LDIF button is visible.
+ :expectedresults:
+ 1. Success
+ 2. Element is visible
+ 3. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on Backups & LDIFs button and check if element is loaded.')
+ frame.get_by_role('tab', name='Database', exact=True).click()
+ frame.locator('#backups').click()
+ assert frame.get_by_role('button', name='Create Backup').is_visible()
+
+ log.info('Click on LDIFs tab and check if element is loaded.')
+ frame.get_by_role('tab', name='LDIFs').click()
+ assert frame.get_by_role('button', name='Create LDIF').is_visible()
+
+
+def test_global_policy_availability(topology_st, page, browser_name):
+ """ Check if Global Policy settings is visible
+
+ :id: 2bdd219d-c28d-411d-9758-18386f472ad2
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Database tab, click on Global Policy button on the side panel.
+ 2. Check if Password Minimum Age input field is visible.
+ 3. Click on Expiration tab and click on Enforce Password Expiration checkbox.
+ 4. Check if Allowed Logins After Password Expires input field is visible.
+ 5. Click on Account Lockout tab and click on Enable Account Lockout checkbox.
+ 6. Check if Number of Failed Logins That Locks Out Account input field is visible.
+ 7. Click on Syntax Checking tab and click on Enable Password Syntax Checking checkbox.
+ 8. Check if Minimum Length input field is visible.
+ 9. Click on Temporary Password Rules tab and check if Password Max Use input field is visible.
+ :expectedresults:
+ 1. Success
+ 2. Element is visible
+ 3. Success
+ 4. Element is visible
+ 5. Success
+ 6. Element is visible
+ 7. Success
+ 8. Element is visible
+ 9. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on Global Policy button and check if element is loaded.')
+ frame.get_by_role('tab', name='Database', exact=True).click()
+ frame.locator('#pwpolicy').click()
+ frame.locator('#passwordminage').wait_for()
+ assert frame.locator('#passwordminage').is_visible()
+
+ log.info('Click on Expiration tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Expiration').click()
+ frame.get_by_text('Enforce Password Expiration').click()
+ assert frame.locator('#passwordgracelimit').is_visible()
+
+ log.info('Click on Account Lockout tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Account Lockout').click()
+ frame.get_by_text('Enable Account Lockout').click()
+ assert frame.locator('#passwordmaxfailure').is_visible()
+
+ log.info('Click on Syntax Checking tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Syntax Checking').click()
+ frame.get_by_text('Enable Password Syntax Checking').click()
+ assert frame.locator('#passwordminlength').is_visible()
+
+ log.info('Click on Temporary Password Rules tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Temporary Password Rules').click()
+ assert frame.locator('#passwordtprmaxuse').is_visible()
+
+
+def test_local_policy_availability(topology_st, page, browser_name):
+ """ Test Local Policies settings visibility
+
+ :id: f540e0fa-a4c6-4c88-b97a-d21ada68f627
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Database tab, click on Local Policies button on side panel.
+ 2. Check if Local Password Policies columnheader is visible.
+ 3. Click on Edit Policy tab and check if Please choose a policy from the Local Policy Table heading is visible.
+ 4. Click on Create A Policy tab and check if Target DN input field is visible.
+ :expectedresults:
+ 1. Success
+ 2. Element is visible
+ 3. Element is visible
+ 4. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on Local Policies button and check if element is loaded.')
+ frame.get_by_role('tab', name='Database', exact=True).click()
+ frame.locator('#localpwpolicy').click()
+ frame.get_by_role('columnheader', name='Local Password Policies').wait_for()
+ assert frame.get_by_role('columnheader', name='Local Password Policies').is_visible()
+
+ log.info('Click on Edit Policy tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Edit Policy').click()
+ assert frame.get_by_role('heading', name='Please choose a policy from the Local Policy Table.').is_visible()
+
+ log.info('Click on Create A Policy tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Create A Policy').click()
+ assert frame.locator('#policyDN').is_visible()
+
+
+def test_suffixes_policy_availability(topology_st, page, browser_name):
+ """ Test Suffixes settings visibility
+
+ :id: b8399229-3b98-46d7-af15-f5ff0bcc6be9
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Database tab, click on dc=example,dc=com button.
+ 2. Check if Entry Cache Size input field is visible.
+ 3. Click on Referrals tab and check if Referrals columnheader is visible.
+ 4. Click on Indexes tab and check if Database Indexes-sub tab is visible.
+ 5. Click on VLV Indexes and check if VLV Indexes columnheader is visible.
+ 6. Click on Encrypted Attributes and check if Encrypted Attribute columnheader is visible.
+ :expectedresults:
+ 1. Success
+ 2. Element is visible
+ 3. Element is visible
+ 4. Element is visible
+ 5. Element is visible
+ 6. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on Suffixes and check if element is loaded.')
+ frame.get_by_role('tab', name='Database', exact=True).click()
+ frame.locator('#dc\=example\,dc\=com').click()
+ frame.locator('#cachememsize').wait_for()
+ assert frame.locator('#cachememsize').is_visible()
+
+ log.info('Click on Referrals tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Referrals').click()
+ frame.get_by_role('columnheader', name='Referrals').wait_for()
+ assert frame.get_by_role('columnheader', name='Referrals').is_visible()
+
+ log.info('Click on Indexes tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Indexes', exact=True).click()
+ frame.get_by_role('tab', name='Database Indexes').wait_for()
+ assert frame.get_by_role('tab', name='Database Indexes').is_visible()
+
+ log.info('Click on VLV Indexes tab and check if element is loaded.')
+ frame.get_by_role('tab', name='VLV Indexes').click()
+ frame.get_by_role('columnheader', name='VLV Indexes').wait_for()
+ assert frame.get_by_role('columnheader', name='VLV Indexes').is_visible()
+
+ log.info('Click on Encrypted Attributes tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Encrypted Attributes').click()
+ frame.get_by_role('columnheader', name='Encrypted Attribute').wait_for()
+ assert frame.get_by_role('columnheader', name='Encrypted Attribute').is_visible()
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/webui/monitoring/__init__.py b/dirsrvtests/tests/suites/webui/monitoring/__init__.py
new file mode 100644
index 000000000..ae9b15246
--- /dev/null
+++ b/dirsrvtests/tests/suites/webui/monitoring/__init__.py
@@ -0,0 +1,3 @@
+"""
+ :Requirement: WebUI: Monitoring
+"""
diff --git a/dirsrvtests/tests/suites/webui/monitoring/monitoring_test.py b/dirsrvtests/tests/suites/webui/monitoring/monitoring_test.py
new file mode 100644
index 000000000..173e503cb
--- /dev/null
+++ b/dirsrvtests/tests/suites/webui/monitoring/monitoring_test.py
@@ -0,0 +1,244 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2023 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import time
+import subprocess
+import pytest
+
+from lib389.cli_idm.account import *
+from lib389.tasks import *
+from lib389.utils import *
+from lib389.topologies import topology_st
+from .. import setup_page, check_frame_assignment, setup_login
+
+pytestmark = pytest.mark.skipif(os.getenv('WEBUI') is None, reason="These tests are only for WebUI environment")
+pytest.importorskip('playwright')
+
+SERVER_ID = 'standalone1'
+
+
+def test_monitoring_tab_visibility(topology_st, page, browser_name):
+ """ Test Monitoring tab visibility
+
+ :id: e16be05a-4465-4a2b-bfe2-7c5aafb55c91
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Monitoring tab.
+ 2. Check if Resource Charts tab is visible.
+ :expectedresults:
+ 1. Success
+ 2. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Check if Monitoring tab is loaded.')
+ frame.get_by_role('tab', name='Monitoring', exact=True).click()
+ frame.get_by_role('tab', name='Resource Charts').wait_for()
+ assert frame.get_by_role('tab', name='Resource Charts').is_visible()
+
+
+def test_server_statistics_visibility(topology_st, page, browser_name):
+ """ Test Server Statistics monitoring visibility
+
+ :id: 90e964e8-99d7-45e5-ad20-520099db054e
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Monitoring tab and check if Connections heading is visible.
+ 2. Click on Server Stats tab and check if Server Instance label is visible.
+ 3. Click on Connection Table tab and check if Client Connections heading is visible.
+ 4. Click on Disk Space tab and check if Refresh button is visible.
+ 5. Click on SNMP Counters and check if Bytes Sent label is visible.
+ :expectedresults:
+ 1. Element is visible
+ 2. Element is visible
+ 3. Element is visible
+ 4. Element is visible
+ 5. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on Monitoring tab and check if element in Server Statistics is loaded.')
+ frame.get_by_role('tab', name='Monitoring', exact=True).click()
+ frame.get_by_role('heading', name='Connections').wait_for()
+ assert frame.get_by_role('heading', name='Connections').is_visible()
+
+ log.info('Click on Server Stats tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Server Stats').click()
+ assert frame.get_by_text('Server Instance').is_visible()
+
+ log.info('Click on Connection Table tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Connection Table').click()
+ assert frame.get_by_role('heading', name='Client Connections').is_visible()
+
+ log.info('Click on Disk Space tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Disk Space').click()
+ assert frame.get_by_role('button', name='Refresh').is_visible()
+
+ log.info('Click on SNMP Counters tab and check if element is loaded.')
+ frame.get_by_role('tab', name='SNMP Counters').click()
+ assert frame.get_by_text('Bytes Sent', exact=True).is_visible()
+
+
+def test_replication_visibility(topology_st, page, browser_name):
+ """ Test Replication monitoring visibility
+
+ :id: 65b271e5-a172-461b-ad36-605706d68780
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Replication Tab, Click on Enable Replication.
+ 2. Fill Password and Confirm password.
+ 3. Click on Enable Replication button and wait until Add Replication Manager is visible.
+ 4. Click on Monitoring tab, click on Replication button on the side panel.
+ 5. Check if Generate Report button is visible.
+ 6. Click on Agreements tab and check if Replication Agreements columnheader is visible.
+ 7. Click on Winsync tab and check if Winsync Agreements columnheader is visible.
+ 8. Click on Tasks tab and check if CleanAllRUV Tasks columnheader is visible.
+ 9. Click on Conflict Entries tab and check if Replication Conflict Entries columnheader is visible.
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Element is visible
+ 6. Element is visible
+ 7. Element is visible
+ 8. Element is visible
+ 9. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Enable replication in order to proceed with replication visibility testing.')
+ frame.get_by_role('tab', name='Replication').click()
+ frame.get_by_role('button', name='Enable Replication').click()
+ frame.fill('#enableBindPW', 'redhat')
+ frame.fill('#enableBindPWConfirm', 'redhat')
+ frame.get_by_role("dialog", name="Enable Replication").get_by_role("button", name="Enable Replication").click()
+ frame.get_by_role('button', name='Add Replication Manager').wait_for()
+
+ log.info('Click on Monitoring tab and then on Replication in the menu and check if element is loaded.')
+ frame.get_by_role('tab', name='Monitoring', exact=True).click()
+ frame.locator('#replication-monitor').click()
+ frame.get_by_role('button', name='Generate Report').wait_for()
+ assert frame.get_by_role('button', name='Generate Report').is_visible()
+
+ log.info('Click on Agreements tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Agreements').click()
+ assert frame.get_by_role('columnheader', name='Replication Agreements').is_visible()
+
+ log.info('Click on Winsync tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Winsync').click()
+ assert frame.get_by_role('columnheader', name='Winsync Agreements').is_visible()
+
+ log.info('Click on Tasks tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Tasks').click()
+ assert frame.get_by_role('columnheader', name='CleanAllRUV Tasks').is_visible()
+
+ log.info('Click on Conflict Entries tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Conflict Entries').click()
+ assert frame.get_by_role('columnheader', name='Replication Conflict Entries').is_visible()
+
+
+def test_database_visibility(topology_st, page, browser_name):
+ """ Test Database monitoring visibility
+
+ :id: bf3f3e42-e748-41b8-bda2-a1856343a995
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Monitoring tab, click on dc=example,dc=com button on the side panel.
+ 2. Check if Entry Cache Hit Ratio label is visible.
+ 3. Click on DN Cache tab and check if DN Cache Hit Ratio label is visible.
+ :expectedresults:
+ 1. Success
+ 2. Element is visible
+ 3. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on Monitoring tab, then click on database button and check if element is loaded.')
+ frame.get_by_role('tab', name='Monitoring', exact=True).click()
+ frame.locator('#dc\=example\,dc\=com').click()
+ frame.get_by_text('Entry Cache Hit Ratio').wait_for()
+ assert frame.get_by_text('Entry Cache Hit Ratio').is_visible()
+
+ log.info('Click on DN Cache tab and check if element is loaded.')
+ frame.get_by_role('tab', name='DN Cache').click()
+ assert frame.get_by_text('DN Cache Hit Ratio').is_visible()
+
+
+def test_logging_visibility(topology_st, page, browser_name):
+ """ Test Logging monitoring visibility
+
+ :id: c3e91cd4-569e-45e2-adc7-cbffb4ee7b6c
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Monitoring tab, click on Access Log button on side panel.
+ 2. Check if Access Log text field is visible.
+ 3. Click on Audit Log button on side panel.
+ 4. Check if Audit Log text field is visible.
+ 5. Click on Audit Failure Log button on side panel.
+ 6. Check if Audit Failure Log text field is visible.
+ 7. Click on Errors Log button on side panel.
+ 8. Check if Errors Log text field is visible.
+ 9. Click on Security Log button on side panel.
+ 10. Check if Security Log text field is visible.
+ :expectedresults:
+ 1. Success
+ 2. Element is visible
+ 3. Success
+ 4. Element is visible
+ 5. Success
+ 6. Element is visible
+ 7. Success
+ 8. Element is visible
+ 9. Success
+ 10. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on Monitoring tab, then click on Access Log button and check if element is loaded.')
+ frame.get_by_role('tab', name='Monitoring', exact=True).click()
+ frame.locator('#access-log-monitor').click()
+ frame.locator('#accesslog-area').wait_for()
+ assert frame.locator('#accesslog-area').is_visible()
+
+ log.info('Click on Audit Log button and check if element is loaded.')
+ frame.locator('#audit-log-monitor').click()
+ frame.locator('#auditlog-area').wait_for()
+ assert frame.locator('#auditlog-area').is_visible()
+
+ log.info('Click on Audit Failure Log button and check if element is loaded.')
+ frame.locator('#auditfail-log-monitor').click()
+ frame.locator('#auditfaillog-area').wait_for()
+ assert frame.locator('#auditfaillog-area').is_visible()
+
+ log.info('Click on Errors Log button and check if element is loaded.')
+ frame.locator('#error-log-monitor').click()
+ frame.locator('#errorslog-area').wait_for()
+ assert frame.locator('#errorslog-area').is_visible()
+
+ log.info('Click on Security Log button and check if element is loaded.')
+ frame.locator('#security-log-monitor').click()
+ frame.locator('#securitylog-area').wait_for()
+ assert frame.locator('#securitylog-area').is_visible()
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/webui/replication/replication_test.py b/dirsrvtests/tests/suites/webui/replication/replication_test.py
index f30e31d8c..97df56afa 100644
--- a/dirsrvtests/tests/suites/webui/replication/replication_test.py
+++ b/dirsrvtests/tests/suites/webui/replication/replication_test.py
@@ -95,6 +95,50 @@ def test_enable_replication(topology_st, page, browser_name):
assert frame.get_by_role('button', name='Add Replication Manager').is_visible()
+def test_suffixes_visibility(topology_st, page, browser_name):
+ """ Test visibility of created suffixes in replication tab
+
+ :id: 47141eaa-a506-4a60-a3ae-8e960f692faa
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Replication tab check if Add Replication Manager Button is visible.
+ 2. Click on Agreements tab check if column header with Replication Agreement text is visible.
+ 3. Click on Winsync Agreements tab and check if column header with Winsync Agreements text is visible.
+ 4. Click on Change Log tab and check if label with Changelog Maximum Entries text is visible.
+ 5. Click on RUV's & Tasks tab and check if Export Changelog button is visible.
+ :expectedresults:
+ 1. Element is visible.
+ 2. Element is visible.
+ 3. Element is visible.
+ 4. Element is visible.
+ 5. Element is visible.
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on Replication tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Replication').click()
+ frame.get_by_role('button', name='Add Replication Manager').wait_for()
+ assert frame.get_by_role('button', name='Add Replication Manager').is_visible()
+
+ log.info('Click on Agreements tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Agreements (0)', exact=True).click()
+ assert frame.get_by_role('columnheader', name='Replication Agreements').is_visible()
+
+ log.info('Click on Winsync Agreements tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Winsync Agreements').click()
+ assert frame.get_by_role('columnheader', name='Replication Agreements').is_visible()
+
+ log.info('Click on Change Log tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Change Log').click()
+ assert frame.get_by_text('Changelog Maximum Entries').is_visible()
+
+ log.info("Click on RUV'S & Tasks tab and check if element is loaded.")
+ frame.get_by_role('tab', name="RUV'S & Tasks").click()
+ assert frame.get_by_role('button', name='Export Changelog')
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/dirsrvtests/tests/suites/webui/schema/__init__.py b/dirsrvtests/tests/suites/webui/schema/__init__.py
new file mode 100644
index 000000000..f00925faf
--- /dev/null
+++ b/dirsrvtests/tests/suites/webui/schema/__init__.py
@@ -0,0 +1,3 @@
+"""
+ :Requirement: WebUI: Schema
+"""
diff --git a/dirsrvtests/tests/suites/webui/schema/schema_test.py b/dirsrvtests/tests/suites/webui/schema/schema_test.py
new file mode 100644
index 000000000..fe80f9ec7
--- /dev/null
+++ b/dirsrvtests/tests/suites/webui/schema/schema_test.py
@@ -0,0 +1,64 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2023 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import logging
+import time
+import subprocess
+import pytest
+
+from lib389.cli_idm.account import *
+from lib389.tasks import *
+from lib389.utils import *
+from lib389.topologies import topology_st
+from .. import setup_page, check_frame_assignment, setup_login
+
+pytestmark = pytest.mark.skipif(os.getenv('WEBUI') is None, reason="These tests are only for WebUI environment")
+pytest.importorskip('playwright')
+
+SERVER_ID = 'standalone1'
+
+
+def test_schema_tab_visibility(topology_st, page, browser_name):
+ """ Test Schema tab visibility
+
+ :id: 4cbca624-b7be-49db-93f6-f9a9df79a9b2
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Schema tab and check if Add Object Class button is visible.
+ 2. Click on Attributes tab and check if Add Attribute button is visible.
+ 3. Click on Matching Rules tab and check if Matching Rule columnheader is visible.
+ :expectedresults:
+ 1. Element is visible
+ 2. Element is visible
+ 3. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on Schema tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Schema', exact=True).click()
+ frame.get_by_role('button', name='Add ObjectClass').wait_for()
+ assert frame.get_by_role('button', name='Add ObjectClass').is_visible()
+
+ log.info('Click on Attributes tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Attributes').click()
+ frame.get_by_role('button', name='Add Attribute').wait_for()
+ assert frame.get_by_role('button', name='Add Attribute').is_visible()
+
+ log.info('Click on Matching Rules tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Matching Rules').click()
+ frame.get_by_role('columnheader', name='Matching Rule').wait_for()
+ assert frame.get_by_role('columnheader', name='Matching Rule').is_visible()
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/webui/server/__init__.py b/dirsrvtests/tests/suites/webui/server/__init__.py
new file mode 100644
index 000000000..2dfbaeba0
--- /dev/null
+++ b/dirsrvtests/tests/suites/webui/server/__init__.py
@@ -0,0 +1,3 @@
+"""
+ :Requirement: WebUI: Server
+"""
diff --git a/dirsrvtests/tests/suites/webui/server/server_test.py b/dirsrvtests/tests/suites/webui/server/server_test.py
new file mode 100644
index 000000000..9732d3d66
--- /dev/null
+++ b/dirsrvtests/tests/suites/webui/server/server_test.py
@@ -0,0 +1,351 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2023 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import time
+import subprocess
+import pytest
+
+from lib389.cli_idm.account import *
+from lib389.tasks import *
+from lib389.utils import *
+from lib389.topologies import topology_st
+from .. import setup_page, check_frame_assignment, setup_login
+
+pytestmark = pytest.mark.skipif(os.getenv('WEBUI') is None, reason="These tests are only for WebUI environment")
+pytest.importorskip('playwright')
+
+SERVER_ID = 'standalone1'
+
+
+def test_server_settings_availability(topology_st, page, browser_name):
+ """ Test visibility of Server Settings in server tab
+
+ :id: e87a3c6f-3fda-49fa-91c4-a8ca418f32c2
+ :setup: Standalone instance
+ :steps:
+ 1. Check if General Settings tab is visible.
+ :expectedresults:
+ 1. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Check if server settings tabs are loaded.')
+ frame.get_by_role('tab', name='General Settings', exact=True).wait_for()
+ assert frame.get_by_role('tab', name='General Settings').is_visible()
+
+
+def test_server_settings_tabs_availability(topology_st, page, browser_name):
+ """ Test visibility of individual tabs under Server Settings
+
+ :id: 08cd0f84-e233-4a94-8230-a0cc54636595
+ :setup: Standalone instance
+ :steps:
+ 1. Check if Server Hostname is visible
+ 2. Click on Directory manager tab and check if Directory Manager DN is visible.
+ 3. Click on Disk Monitoring tab, click on checkbox and check if Disk Monitoring Threshold label is visible.
+ 4. Click on Advanced Settings tab and check if Anonymous Resource Limits DN text input is visible.
+ :expectedresults:
+ 1. Element is visible.
+ 2. Element is visible.
+ 3. Element is visible.
+ 4. Element is visible.
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Check if General Settings tab is loaded.')
+ frame.locator('#nsslapd-localhost').wait_for()
+ assert frame.locator('#nsslapd-localhost').is_visible()
+
+ log.info('Click on Directory Manager tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Directory Manager').click()
+ assert frame.locator('#nsslapd-rootdn').is_visible()
+
+ log.info('Click on Disk Monitoring tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Disk Monitoring').click()
+ frame.locator('#nsslapd-disk-monitoring').click()
+ assert frame.get_by_text('Disk Monitoring Threshold').is_visible()
+
+ log.info('Click on Advanced Settings tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Advanced Settings').click()
+ assert frame.locator('#nsslapd-anonlimitsdn').is_visible()
+
+
+def test_tuning_and_limits_availability(topology_st, page, browser_name):
+ """ Test visibility of Tuning & Limits settings
+
+ :id: c09af833-0359-46ad-a701-52b67f315f70
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Tuning & Limits button on the side panel and check if Number Of Worker Threads is visible.
+ 2. Click on Show Advanced Settings button.
+ 3. Check if Outbound IO Timeout label is visible.
+ :expectedresults:
+ 1. Element is visible
+ 2. Success
+ 3. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on Tuning & Limits button and check if element is loaded.')
+ frame.locator('#tuning-config').click()
+ frame.get_by_text("Number Of Worker Threads").wait_for()
+ assert frame.get_by_text("Number Of Worker Threads").is_visible()
+
+ log.info('Open expandable section and check if element is loaded.')
+ frame.get_by_role('button', name='Show Advanced Settings').click()
+ frame.get_by_text('Outbound IO Timeout').wait_for()
+ assert frame.get_by_text('Outbound IO Timeout').is_visible()
+
+
+def test_security_availability(topology_st, page, browser_name):
+ """ Test Security Settings tabs visibility
+
+ :id: 6cd72564-798c-4524-89d3-aa2691535905
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Security button on the side panel and check if Security Configuration tab is visible.
+ 2. Click on Certificate Management tab and check if Add CA Certificate button is visible.
+ 3. Click on Cipher Preferences and check if Enabled Ciphers heading is visible.
+ :expectedresults:
+ 1. Element is visible
+ 2. Element is visible
+ 3. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on Security button and check if element is loaded.')
+ frame.locator('#security-config').click()
+ frame.get_by_role('tab', name='Security Configuration').wait_for()
+ assert frame.get_by_role('tab', name='Security Configuration').is_visible()
+
+ log.info('Click on Certificate Management tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Certificate Management').click()
+ assert frame.get_by_role('button', name='Add CA Certificate').is_visible()
+
+ log.info('Click on Cipher Preferences tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Cipher Preferences').click()
+ assert frame.get_by_role('heading', name='Enabled Ciphers').is_visible()
+
+
+def test_sasl_settings_and_mappings_availability(topology_st, page, browser_name):
+ """ Test SASL Settings & Mappings visibility
+
+ :id: 88954828-7533-4ac9-bfc0-e9c68f95278f
+ :setup: Standalone instance
+ :steps:
+ 1. Click on SASL Settings & Mappings button on the side panel.
+ 2. Check if Max SASL Buffer size text input field is visible.
+ :expectedresults:
+ 1. Success
+ 2. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on SASL Settings & Mappings and check if element is loaded.')
+ frame.locator('#sasl-config').click()
+ frame.locator('#maxBufSize').wait_for()
+ assert frame.locator('#maxBufSize').is_visible()
+
+
+def test_ldapi_and_autobind_availability(topology_st, page, browser_name):
+ """ Test LDAPI & AutoBind settings visibility
+
+ :id: 505f1e3b-5d84-4734-8c64-fbb8b2805d6b
+ :setup: Standalone instance
+ :steps:
+ 1. Click on LDAPI & Autobind button on the side panel.
+ 2. Check if LDAPI Socket File Path is visible.
+ :expectedresults:
+ 1. Success
+ 2. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on LDAPI & Autobind and check if element is loaded.')
+ frame.locator('#ldapi-config').click()
+ frame.locator('#nsslapd-ldapifilepath').wait_for()
+ assert frame.locator('#nsslapd-ldapifilepath').is_visible()
+
+
+def test_access_log_availability(topology_st, page, browser_name):
+ """ Test Access Log tabs visibility
+
+ :id: 48f8e778-b28b-45e1-8946-29456a53cf58
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Access Log button on the side panel and check if Access Log Location input field is visible.
+ 2. Click on Rotation Policy tab and check if Maximum Number Of Logs label is visible.
+ 3. Click on Deletion Policy and check if Log Archive Exceeds label is visible.
+ :expectedresults:
+ 1. Element is visible
+ 2. Element is visible
+ 3. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on Access Log button and check if element is loaded.')
+ frame.locator('#access-log-config').click()
+ frame.locator('#nsslapd-accesslog').wait_for()
+ assert frame.locator('#nsslapd-accesslog').is_visible()
+
+ log.info('Click on Rotation Policy tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Rotation Policy').click()
+ assert frame.get_by_text('Maximum Number Of Logs').is_visible()
+
+ log.info('Click on Deletion Policy tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Deletion Policy').click()
+ assert frame.get_by_text('Log Archive Exceeds (in MB)').is_visible()
+
+
+def test_audit_log_availability(topology_st, page, browser_name):
+ """ Test Audit Log tabs visibility
+
+ :id: a1539010-22b8-4e6b-b377-666a10c20573
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Audit Log button on the side panel and check if Audit Log Location input field is visible.
+ 2. Click on Rotation Policy tab and check if Maximum Number Of Logs label is visible.
+ 3. Click on Deletion Policy and check if Log Archive Exceeds label is visible.
+ :expectedresults:
+ 1. Element is visible
+ 2. Element is visible
+ 3. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on Audit Log button and check if element is loaded.')
+ frame.locator('#audit-log-config').click()
+ frame.locator('#nsslapd-auditlog').wait_for()
+ assert frame.locator('#nsslapd-auditlog').is_visible()
+
+ log.info('Click on Rotation Policy tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Rotation Policy').click()
+ assert frame.get_by_text('Maximum Number Of Logs').is_visible()
+
+ log.info('Click on Deletion Policy tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Deletion Policy').click()
+ assert frame.get_by_text('Log Archive Exceeds (in MB)').is_visible()
+
+
+def test_audit_failure_log_availability(topology_st, page, browser_name):
+ """ Test Audit Failure Log tabs visibility
+
+ :id: 0adcd31f-98a0-4b70-9efa-e810bc971f77
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Audit Failure Log button on the side panel and check if Audit Log Location input field is visible.
+ 2. Click on Rotation Policy tab and check if Maximum Number Of Logs label is visible.
+ 3. Click on Deletion Policy and check if Log Archive Exceeds label is visible.
+ :expectedresults:
+ 1. Element is visible
+ 2. Element is visible
+ 3. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on Audit Failure Log button and check if element is loaded.')
+ frame.locator('#auditfail-log-config').click()
+ frame.locator('#nsslapd-auditfaillog').wait_for()
+ assert frame.locator('#nsslapd-auditfaillog').is_visible()
+
+ log.info('Click on Rotation Policy tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Rotation Policy').click()
+ assert frame.get_by_text('Maximum Number Of Logs').is_visible()
+
+ log.info('Click on Deletion Policy tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Deletion Policy').click()
+ assert frame.get_by_text('Log Archive Exceeds (in MB)').is_visible()
+
+
+def test_errors_log_availability(topology_st, page, browser_name):
+ """ Test Errors Log tabs visibility
+
+ :id: 52cac1fd-a0cd-4c6e-8963-16d764955b86
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Errors Log button in the side panel and check if Errors Log Location input field is visible.
+ 2. Click on Rotation Policy tab and check if Maximum Number Of Logs label is visible.
+ 3. Click on Deletion Policy and check if Log Archive Exceeds label is visible.
+ :expectedresults:
+ 1. Element is visible
+ 2. Element is visible
+ 3. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on Errors Log button and check if element is loaded.')
+ frame.locator('#error-log-config').click()
+ frame.locator('#nsslapd-errorlog').wait_for()
+ assert frame.locator('#nsslapd-errorlog').is_visible()
+
+ log.info('Click on Rotation Policy tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Rotation Policy').click()
+ assert frame.get_by_text('Maximum Number Of Logs').is_visible()
+
+ log.info('Click on Deletion Policy tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Deletion Policy').click()
+ assert frame.get_by_text('Log Archive Exceeds (in MB)').is_visible()
+
+
+def test_security_log_availability(topology_st, page, browser_name):
+ """ Test Security Log tabs visibility
+
+ :id: 1b851fa2-38c9-4865-9e24-f762ef80825f
+ :setup: Standalone instance
+ :steps:
+ 1. Click on Security Log button in the side panel and check if Security Log Location input field is visible.
+ 2. Click on Rotation Policy tab and check if Maximum Number Of Logs label is visible.
+ 3. Click on Deletion Policy and check if Log Archive Exceeds label is visible.
+ :expectedresults:
+ 1. Element is visible
+ 2. Element is visible
+ 3. Element is visible
+ """
+ setup_login(page)
+ time.sleep(1)
+ frame = check_frame_assignment(page, browser_name)
+
+ log.info('Click on Security Log button and check if element is loaded.')
+ frame.locator('#security-log-config').click()
+ frame.locator('#nsslapd-securitylog').wait_for()
+ assert frame.locator('#nsslapd-securitylog').is_visible()
+
+ log.info('Click on Rotation Policy tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Rotation Policy').click()
+ assert frame.get_by_text('Maximum Number Of Logs').is_visible()
+
+ log.info('Click on Deletion Policy tab and check if element is loaded.')
+ frame.get_by_role('tab', name='Deletion Policy').click()
+ assert frame.get_by_text('Log Archive Exceeds (in MB)').is_visible()
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
| 0 |
a007a6f59afcff5a81a2873dbbef44ee942adda7
|
389ds/389-ds-base
|
Issue 5046 - BUG - update concread (#5047)
Bug Description: an update to concread changed how the cache was
constructed and how stats are used.
Fix Description: Update to adapt to these changes. Additionally
this update has a number of performance improvements.
fixes: https://github.com/389ds/389-ds-base/issues/5046
Author: William Brown <[email protected]>
Review by: @vashirov, @droideck
|
commit a007a6f59afcff5a81a2873dbbef44ee942adda7
Author: Firstyear <[email protected]>
Date: Fri Dec 10 08:06:37 2021 +1000
Issue 5046 - BUG - update concread (#5047)
Bug Description: an update to concread changed how the cache was
constructed and how stats are used.
Fix Description: Update to adapt to these changes. Additionally
this update has a number of performance improvements.
fixes: https://github.com/389ds/389-ds-base/issues/5046
Author: William Brown <[email protected]>
Review by: @vashirov, @droideck
diff --git a/src/Cargo.lock b/src/Cargo.lock
index 45c534033..2cc343975 100644
--- a/src/Cargo.lock
+++ b/src/Cargo.lock
@@ -4,9 +4,9 @@ version = 3
[[package]]
name = "ahash"
-version = "0.7.4"
+version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "43bb833f0bf979d8475d38fbf09ed3b8a55e1885fe93ad3f93239fc6a4f17b98"
+checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47"
dependencies = [
"getrandom",
"once_cell",
@@ -15,9 +15,9 @@ dependencies = [
[[package]]
name = "ansi_term"
-version = "0.11.0"
+version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
+checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
dependencies = [
"winapi",
]
@@ -76,9 +76,9 @@ dependencies = [
[[package]]
name = "cc"
-version = "1.0.70"
+version = "1.0.72"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d26a6ce4b6a484fa3edb70f7efa6fc430fd2b87285fe8b84304fd0936faa0dc0"
+checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee"
dependencies = [
"jobserver",
]
@@ -91,9 +91,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "clap"
-version = "2.33.3"
+version = "2.34.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
+checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
dependencies = [
"ansi_term",
"atty",
@@ -106,14 +106,15 @@ dependencies = [
[[package]]
name = "concread"
-version = "0.2.18"
+version = "0.2.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c874e11ba0c799d6d586ee8b0c5ad0f7444c0b8ec4e50c351a4f6417ae5bfb26"
+checksum = "dcaf6932721214c194881341e90099265f304bd010ed84dec84d96dc0e6765e5"
dependencies = [
"ahash",
"crossbeam",
"crossbeam-epoch",
"crossbeam-utils",
+ "lru",
"parking_lot",
"rand",
"smallvec",
@@ -249,6 +250,15 @@ dependencies = [
"wasi",
]
+[[package]]
+name = "hashbrown"
+version = "0.11.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
+dependencies = [
+ "ahash",
+]
+
[[package]]
name = "hermit-abi"
version = "0.1.19"
@@ -260,9 +270,9 @@ dependencies = [
[[package]]
name = "instant"
-version = "0.1.10"
+version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d"
+checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
dependencies = [
"cfg-if",
]
@@ -290,9 +300,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
-version = "0.2.102"
+version = "0.2.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2a5ac8f984bfcf3a823267e5fde638acc3325f6496633a5da6bb6eb2171e103"
+checksum = "f98a04dce437184842841303488f70d0188c5f51437d2a834dc097eafa909a01"
[[package]]
name = "librnsslapd"
@@ -331,11 +341,20 @@ dependencies = [
"cfg-if",
]
+[[package]]
+name = "lru"
+version = "0.6.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7ea2d928b485416e8908cff2d97d621db22b27f7b3b6729e438bcf42c671ba91"
+dependencies = [
+ "hashbrown",
+]
+
[[package]]
name = "memoffset"
-version = "0.6.4"
+version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9"
+checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
dependencies = [
"autocfg",
]
@@ -348,9 +367,9 @@ checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56"
[[package]]
name = "openssl"
-version = "0.10.36"
+version = "0.10.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8d9facdb76fec0b73c406f125d44d86fdad818d66fef0531eec9233ca425ff4a"
+checksum = "0c7ae222234c30df141154f159066c5093ff73b63204dcda7121eb082fc56a95"
dependencies = [
"bitflags",
"cfg-if",
@@ -362,9 +381,9 @@ dependencies = [
[[package]]
name = "openssl-sys"
-version = "0.9.67"
+version = "0.9.71"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "69df2d8dfc6ce3aaf44b40dec6f487d5a886516cf6879c49e98e0710f310a058"
+checksum = "7df13d165e607909b363a4757a6f133f8a818a74e9d3a98d09c6128e15fa4c73"
dependencies = [
"autocfg",
"cc",
@@ -425,15 +444,15 @@ checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443"
[[package]]
name = "pkg-config"
-version = "0.3.19"
+version = "0.3.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
+checksum = "d1a3ea4f0dd7f1f3e512cf97bf100819aa547f36a6eccac8dbaae839eb92363e"
[[package]]
name = "ppv-lite86"
-version = "0.2.10"
+version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
+checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba"
[[package]]
name = "proc-macro-hack"
@@ -443,9 +462,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
[[package]]
name = "proc-macro2"
-version = "1.0.29"
+version = "1.0.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b9f5105d4fdaab20335ca9565e106a5d9b82b6219b5ba735731124ac6711d23d"
+checksum = "fb37d2df5df740e582f28f8560cf425f52bb267d872fe58358eadb554909f07a"
dependencies = [
"unicode-xid",
]
@@ -465,9 +484,9 @@ dependencies = [
[[package]]
name = "quote"
-version = "1.0.9"
+version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
+checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05"
dependencies = [
"proc-macro2",
]
@@ -532,9 +551,9 @@ dependencies = [
[[package]]
name = "ryu"
-version = "1.0.5"
+version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
+checksum = "3c9613b5a66ab9ba26415184cfc41156594925a9cf3a2057e57f31ff145f6568"
[[package]]
name = "scopeguard"
@@ -564,9 +583,9 @@ dependencies = [
[[package]]
name = "serde_json"
-version = "1.0.68"
+version = "1.0.72"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8"
+checksum = "d0ffa0837f2dfa6fb90868c2b5468cad482e175f7dad97e7421951e663f2b527"
dependencies = [
"itoa",
"ryu",
@@ -591,9 +610,9 @@ dependencies = [
[[package]]
name = "smallvec"
-version = "1.6.1"
+version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e"
+checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309"
[[package]]
name = "strsim"
@@ -603,9 +622,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
[[package]]
name = "syn"
-version = "1.0.76"
+version = "1.0.82"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c6f107db402c2c2055242dbf4d2af0e69197202e9faacbef9571bbe47f5a1b84"
+checksum = "8daf5dd0bb60cbd4137b1b587d2fc0ae729bc07cf01cd70b36a1ed5ade3b9d59"
dependencies = [
"proc-macro2",
"quote",
@@ -614,9 +633,9 @@ dependencies = [
[[package]]
name = "synstructure"
-version = "0.12.5"
+version = "0.12.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "474aaa926faa1603c40b7885a9eaea29b444d1cb2850cb7c0e37bb1a4182f4fa"
+checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f"
dependencies = [
"proc-macro2",
"quote",
@@ -649,9 +668,9 @@ dependencies = [
[[package]]
name = "tokio"
-version = "1.12.0"
+version = "1.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c2c2416fdedca8443ae44b4527de1ea633af61d8f7169ffa6e72c5b53d24efcc"
+checksum = "70e992e41e0d2fb9f755b37446f20900f64446ef54874f40a60c78f021ac6144"
dependencies = [
"autocfg",
"pin-project-lite",
@@ -660,9 +679,9 @@ dependencies = [
[[package]]
name = "tokio-macros"
-version = "1.3.0"
+version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "54473be61f4ebe4efd09cec9bd5d16fa51d70ea0192213d754d2d500457db110"
+checksum = "c9efc1aba077437943f7515666aa2b882dfabfbfdf89c819ea75a8d6e9eaba5e"
dependencies = [
"proc-macro2",
"quote",
@@ -747,18 +766,18 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "zeroize"
-version = "1.4.2"
+version = "1.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf68b08513768deaa790264a7fac27a58cbf2705cfcdc9448362229217d7e970"
+checksum = "d68d9dcec5f9b43a30d38c49f91dfedfaac384cb8f085faca366c26207dd1619"
dependencies = [
"zeroize_derive",
]
[[package]]
name = "zeroize_derive"
-version = "1.2.0"
+version = "1.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bdff2024a851a322b08f179173ae2ba620445aef1e838f0c196820eade4ae0c7"
+checksum = "65f1a51723ec88c66d5d1fe80c841f17f63587d6691901d66be9bec6c3b51f73"
dependencies = [
"proc-macro2",
"quote",
diff --git a/src/librslapd/Cargo.toml b/src/librslapd/Cargo.toml
index 2312a6ba1..15c00a47b 100644
--- a/src/librslapd/Cargo.toml
+++ b/src/librslapd/Cargo.toml
@@ -15,7 +15,7 @@ crate-type = ["staticlib", "lib"]
[dependencies]
slapd = { path = "../slapd" }
libc = "0.2"
-concread = "^0.2.9"
+concread = "^0.2.20"
[build-dependencies]
cbindgen = "0.9"
diff --git a/src/librslapd/src/cache.rs b/src/librslapd/src/cache.rs
index e46c8fff2..092c81d83 100644
--- a/src/librslapd/src/cache.rs
+++ b/src/librslapd/src/cache.rs
@@ -1,5 +1,5 @@
// This exposes C-FFI capable bindings for the concread concurrently readable cache.
-use concread::arcache::{ARCache, ARCacheReadTxn, ARCacheWriteTxn};
+use concread::arcache::{ARCache, ARCacheBuilder, ARCacheReadTxn, ARCacheWriteTxn};
use std::borrow::Borrow;
use std::convert::TryInto;
use std::ffi::{CStr, CString};
@@ -19,9 +19,12 @@ pub struct ARCacheCharWrite<'a> {
#[no_mangle]
pub extern "C" fn cache_char_create(max: usize, read_max: usize) -> *mut ARCacheChar {
- let cache: Box<ARCacheChar> = Box::new(ARCacheChar {
- inner: ARCache::new_size(max, read_max),
- });
+ let inner = if let Some(cache) = ARCacheBuilder::new().set_size(max, read_max).build() {
+ cache
+ } else {
+ return std::ptr::null_mut();
+ };
+ let cache: Box<ARCacheChar> = Box::new(ARCacheChar { inner });
Box::into_raw(cache)
}
@@ -58,7 +61,9 @@ pub extern "C" fn cache_char_stats(
*reader_hits = stats.reader_hits.try_into().unwrap();
*reader_includes = stats.reader_includes.try_into().unwrap();
*write_hits = stats.write_hits.try_into().unwrap();
- *write_inc_or_mod = stats.write_inc_or_mod.try_into().unwrap();
+ *write_inc_or_mod = (stats.write_includes + stats.write_modifies)
+ .try_into()
+ .unwrap();
*shared_max = stats.shared_max.try_into().unwrap();
*freq = stats.freq.try_into().unwrap();
*recent = stats.recent.try_into().unwrap();
| 0 |
c06a8faa9140668836dcf02722908319ec2e6e1e
|
389ds/389-ds-base
|
Keep unhashed password psuedo-attribute in the adding entry
Description:
unhashed password pseudo-attribute is necessary for plugins that
handle passwords. The plugin could be any one including pre/post-
plugin and be-txn-pre/post-plugin. On the other hand, the pseudo-
attribute should not be in the database.
This patch declares the unhashed password pseudo-attribute as an
operational attribute and allows the pseudo-attribute in an entry
in the memory. When creating a flat entry by entry2str_ function
to store in the database, it omits the pseudo-attribute.
Reviewed by [email protected] and [email protected] (Thank you!!)
|
commit c06a8faa9140668836dcf02722908319ec2e6e1e
Author: Noriko Hosoi <[email protected]>
Date: Tue Oct 18 14:16:44 2011 -0700
Keep unhashed password psuedo-attribute in the adding entry
Description:
unhashed password pseudo-attribute is necessary for plugins that
handle passwords. The plugin could be any one including pre/post-
plugin and be-txn-pre/post-plugin. On the other hand, the pseudo-
attribute should not be in the database.
This patch declares the unhashed password pseudo-attribute as an
operational attribute and allows the pseudo-attribute in an entry
in the memory. When creating a flat entry by entry2str_ function
to store in the database, it omits the pseudo-attribute.
Reviewed by [email protected] and [email protected] (Thank you!!)
diff --git a/ldap/servers/plugins/acl/acl.c b/ldap/servers/plugins/acl/acl.c
index 279469402..657c5ec4e 100644
--- a/ldap/servers/plugins/acl/acl.c
+++ b/ldap/servers/plugins/acl/acl.c
@@ -1386,7 +1386,9 @@ acl_check_mods(
}
if (lastmod &&
(strcmp (mod->mod_type, "modifiersname")== 0 ||
- strcmp (mod->mod_type, "modifytimestamp")== 0)) {
+ strcmp (mod->mod_type, "modifytimestamp")== 0 ||
+ strcmp (mod->mod_type, PSEUDO_ATTR_UNHASHEDUSERPASSWORD)== 0)
+ ) {
continue;
}
diff --git a/ldap/servers/slapd/add.c b/ldap/servers/slapd/add.c
index 0bf6ef021..19a7690e5 100644
--- a/ldap/servers/slapd/add.c
+++ b/ldap/servers/slapd/add.c
@@ -668,15 +668,6 @@ static void op_shared_add (Slapi_PBlock *pb)
slapi_pblock_set(pb, SLAPI_PLUGIN, be->be_database);
set_db_default_result_handlers(pb);
-
- /* Remove the unhashed password pseudo-attribute
- from the entry before duplicating the entry */
-
- if (unhashed_password_vals)
- {
- slapi_entry_delete_values(e, pwdtype, NULL);
- }
-
/* because be_add frees the entry */
ec = slapi_entry_dup(e);
add_target_dn= slapi_ch_strdup(slapi_sdn_get_ndn(slapi_entry_get_sdn_const(ec)));
@@ -722,14 +713,6 @@ static void op_shared_add (Slapi_PBlock *pb)
send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL,
"Function not implemented", 0, NULL);
}
-
- /* Reattach the unhashed password pseudo-attribute
- to the entry copy (ec), before calling the postop plugin */
- if(unhashed_password_vals)
- {
- slapi_entry_add_values_sv(ec, pwdtype, unhashed_password_vals);
- }
-
slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, &rc);
plugin_call_plugins(pb, internal_op ? SLAPI_PLUGIN_INTERNAL_POST_ADD_FN :
SLAPI_PLUGIN_POST_ADD_FN);
diff --git a/ldap/servers/slapd/attrsyntax.c b/ldap/servers/slapd/attrsyntax.c
index dac3cc1a4..62dfea17f 100644
--- a/ldap/servers/slapd/attrsyntax.c
+++ b/ldap/servers/slapd/attrsyntax.c
@@ -1074,3 +1074,39 @@ slapi_attr_syntax_exists(const char *attr_name)
{
return attr_syntax_exists(attr_name);
}
+
+/*
+ * Add an attribute syntax using some default flags, etc.
+ * Returns an LDAP error code (LDAP_SUCCESS if all goes well)
+ */
+int
+slapi_add_internal_attr_syntax( const char *name, const char *oid,
+ const char *syntax, const char *mr_equality, unsigned long extraflags )
+{
+ int rc = LDAP_SUCCESS;
+ struct asyntaxinfo *asip;
+ char *names[2];
+ char *origins[2];
+ unsigned long std_flags = SLAPI_ATTR_FLAG_STD_ATTR | SLAPI_ATTR_FLAG_OPATTR;
+
+ names[0] = (char *)name;
+ names[1] = NULL;
+
+ origins[0] = SLAPD_VERSION_STR;
+ origins[1] = NULL;
+
+ rc = attr_syntax_create( oid, names, 1,
+ "internal server defined attribute type",
+ NULL, /* superior */
+ mr_equality, NULL, NULL, /* matching rules */
+ origins, syntax,
+ SLAPI_SYNTAXLENGTH_NONE,
+ std_flags | extraflags,
+ &asip );
+
+ if ( rc == LDAP_SUCCESS ) {
+ rc = attr_syntax_add( asip );
+ }
+
+ return rc;
+}
diff --git a/ldap/servers/slapd/back-ldbm/id2entry.c b/ldap/servers/slapd/back-ldbm/id2entry.c
index 432f80ad6..12e29514e 100644
--- a/ldap/servers/slapd/back-ldbm/id2entry.c
+++ b/ldap/servers/slapd/back-ldbm/id2entry.c
@@ -46,10 +46,6 @@
#define ID2ENTRY "id2entry"
-static char *protected_attrs_all [] = {PSEUDO_ATTR_UNHASHEDUSERPASSWORD,
- LDBM_ENTRYDN_STR,
- NULL};
-
/*
* The caller MUST check for DB_LOCK_DEADLOCK and DB_RUNRECOVERY returned
*/
@@ -64,7 +60,6 @@ id2entry_add_ext( backend *be, struct backentry *e, back_txn *txn, int encrypt
int len, rc;
char temp_id[sizeof(ID)];
struct backentry *encrypted_entry = NULL;
- char **paap = NULL;
char *entrydn = NULL;
LDAPDebug( LDAP_DEBUG_TRACE, "=> id2entry_add( %lu, \"%s\" )\n",
@@ -125,16 +120,6 @@ id2entry_add_ext( backend *be, struct backentry *e, back_txn *txn, int encrypt
LDAPDebug2Args( LDAP_DEBUG_TRACE,
"=> id2entry_add (dncache) ( %lu, \"%s\" )\n",
(u_long)e->ep_id, slapi_entry_get_dn_const(entry_to_use) );
- /*
- * If protected attributes exist in the entry,
- * we have to remove them before writing the entry to the database.
- */
- for (paap = protected_attrs_all; paap && *paap; paap++) {
- if (0 == slapi_entry_attr_find(entry_to_use, *paap, &eattr)) {
- /* a protected attr exists in the entry. removed it. */
- slapi_entry_delete_values(entry_to_use, *paap, NULL);
- }
- }
}
data.dptr = slapi_entry2str_with_options(entry_to_use, &len, options);
data.dsize = len + 1;
diff --git a/ldap/servers/slapd/back-ldbm/init.c b/ldap/servers/slapd/back-ldbm/init.c
index b41fabaf0..6859b29c9 100644
--- a/ldap/servers/slapd/back-ldbm/init.c
+++ b/ldap/servers/slapd/back-ldbm/init.c
@@ -51,9 +51,6 @@ static void *IDL_api[3];
static Slapi_PluginDesc pdesc = { "ldbm-backend", VENDOR,
DS_PACKAGE_VERSION, "high-performance LDAP backend database plugin" };
-static int add_ldbm_internal_attr_syntax( const char *name, const char *oid,
- const char *syntax, const char *mr_equality, unsigned long extraflags );
-
#ifdef _WIN32
int *module_ldap_debug = 0;
@@ -69,21 +66,21 @@ int
ldbm_back_add_schema( Slapi_PBlock *pb )
{
int rc = 0;
- rc = add_ldbm_internal_attr_syntax( LDBM_ENTRYDN_STR,
+ rc = slapi_add_internal_attr_syntax( LDBM_ENTRYDN_STR,
LDBM_ENTRYDN_OID, DN_SYNTAX_OID, DNMATCH_NAME,
- SLAPI_ATTR_FLAG_SINGLE );
+ SLAPI_ATTR_FLAG_SINGLE|SLAPI_ATTR_FLAG_NOUSERMOD );
- rc |= add_ldbm_internal_attr_syntax( "dncomp",
+ rc |= slapi_add_internal_attr_syntax( "dncomp",
LDBM_DNCOMP_OID, DN_SYNTAX_OID, DNMATCH_NAME,
- 0 );
+ SLAPI_ATTR_FLAG_NOUSERMOD );
- rc |= add_ldbm_internal_attr_syntax( LDBM_PARENTID_STR,
+ rc |= slapi_add_internal_attr_syntax( LDBM_PARENTID_STR,
LDBM_PARENTID_OID, DIRSTRING_SYNTAX_OID, CASEIGNOREMATCH_NAME,
- SLAPI_ATTR_FLAG_SINGLE );
+ SLAPI_ATTR_FLAG_SINGLE|SLAPI_ATTR_FLAG_NOUSERMOD );
- rc |= add_ldbm_internal_attr_syntax( "entryid",
+ rc |= slapi_add_internal_attr_syntax( "entryid",
LDBM_ENTRYID_OID, DIRSTRING_SYNTAX_OID, CASEIGNOREMATCH_NAME,
- SLAPI_ATTR_FLAG_SINGLE );
+ SLAPI_ATTR_FLAG_SINGLE|SLAPI_ATTR_FLAG_NOUSERMOD );
return rc;
}
@@ -280,41 +277,3 @@ fail:
slapi_pblock_set( pb, SLAPI_PLUGIN_PRIVATE, NULL );
return( -1 );
}
-
-
-/*
- * Add an attribute syntax using some default flags, etc.
- * Returns an LDAP error code (LDAP_SUCCESS if all goes well)
- */
-static int
-add_ldbm_internal_attr_syntax( const char *name, const char *oid,
- const char *syntax, const char *mr_equality, unsigned long extraflags )
-{
- int rc = LDAP_SUCCESS;
- struct asyntaxinfo *asip;
- char *names[2];
- char *origins[2];
- unsigned long std_flags = SLAPI_ATTR_FLAG_STD_ATTR | SLAPI_ATTR_FLAG_OPATTR
- | SLAPI_ATTR_FLAG_NOUSERMOD;
-
- names[0] = (char *)name;
- names[1] = NULL;
-
- origins[0] = SLAPD_VERSION_STR;
- origins[1] = NULL;
-
- rc = attr_syntax_create( oid, names, 1,
- "internal server defined attribute type",
- NULL, /* superior */
- mr_equality, NULL, NULL, /* matching rules */
- origins, syntax,
- SLAPI_SYNTAXLENGTH_NONE,
- std_flags | extraflags,
- &asip );
-
- if ( rc == LDAP_SUCCESS ) {
- rc = attr_syntax_add( asip );
- }
-
- return rc;
-}
diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c
index 9e0f0fbd6..8a91e39c2 100644
--- a/ldap/servers/slapd/entry.c
+++ b/ldap/servers/slapd/entry.c
@@ -63,6 +63,13 @@
/* a helper function to set special rdn to a tombstone entry */
static int _entry_set_tombstone_rdn(Slapi_Entry *e, const char *normdn);
+static int is_type_protected(const char *type);
+
+/* protected attributes which are not included in the flattened entry,
+ * which will be stored in the db. */
+static char *protected_attrs_all [] = {PSEUDO_ATTR_UNHASHEDUSERPASSWORD,
+ SLAPI_ATTR_ENTRYDN,
+ NULL};
/*
* An attribute name is of the form 'basename[;option]'.
@@ -1431,27 +1438,34 @@ slapi_str2entry_ext( const char *dn, char *s, int flags )
return e;
}
+/*
+ * If the attribute type is in the protected list, it returns size 0.
+ */
static size_t
-entry2str_internal_size_value( const char *attrtype, const Slapi_Value *v, int entry2str_ctrl, int attribute_state, int value_state )
-{
- size_t elen= 0;
- if(attrtype!=NULL)
+entry2str_internal_size_value( const char *attrtype, const Slapi_Value *v,
+ int entry2str_ctrl, int attribute_state,
+ int value_state )
+{
+ size_t elen = 0;
+ size_t attrtypelen;
+ if((NULL == attrtype) || is_type_protected(attrtype)) {
+ goto bail;
+ }
+ attrtypelen = strlen(attrtype);
+ if(entry2str_ctrl & SLAPI_DUMP_STATEINFO)
{
- size_t attrtypelen= strlen(attrtype);
- if(entry2str_ctrl & SLAPI_DUMP_STATEINFO)
+ attrtypelen+= csnset_string_size(v->v_csnset);
+ if (attribute_state==ATTRIBUTE_DELETED)
{
- attrtypelen+= csnset_string_size(v->v_csnset);
- if (attribute_state==ATTRIBUTE_DELETED)
- {
- attrtypelen += DELETED_ATTR_STRSIZE;
- }
- if(value_state==VALUE_DELETED)
- {
- attrtypelen += DELETED_VALUE_STRSIZE;
- }
+ attrtypelen += DELETED_ATTR_STRSIZE;
+ }
+ if(value_state==VALUE_DELETED)
+ {
+ attrtypelen += DELETED_VALUE_STRSIZE;
}
- elen = LDIF_SIZE_NEEDED(attrtypelen, slapi_value_get_berval(v)->bv_len);
}
+ elen = LDIF_SIZE_NEEDED(attrtypelen, slapi_value_get_berval(v)->bv_len);
+bail:
return elen;
}
@@ -1599,6 +1613,18 @@ entry2str_internal_put_valueset( const char *attrtype, const CSN *attrcsn, CSNTy
}
}
+static int
+is_type_protected(const char *type)
+{
+ char **paap = NULL;
+ for (paap = protected_attrs_all; paap && *paap; paap++) {
+ if (0 == strcasecmp(type, *paap)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
static void
entry2str_internal_put_attrlist( const Slapi_Attr *attrlist, int attr_state, int entry2str_ctrl, char **ecur, char **typebuf, size_t *typebuf_len)
{
@@ -1614,7 +1640,8 @@ entry2str_internal_put_attrlist( const Slapi_Attr *attrlist, int attr_state, int
/* don't dump uniqueid if not asked */
if (!(strcasecmp(a->a_type, SLAPI_ATTR_UNIQUEID) == 0 &&
- !(SLAPI_DUMP_UNIQUEID & entry2str_ctrl)))
+ !(SLAPI_DUMP_UNIQUEID & entry2str_ctrl)) &&
+ !is_type_protected(a->a_type))
{
/* Putting present attribute values */
/* put "<type>:[:] <value>" line for each value */
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
index 219ac7214..e24023767 100644
--- a/ldap/servers/slapd/modify.c
+++ b/ldap/servers/slapd/modify.c
@@ -870,19 +870,6 @@ static void op_shared_modify (Slapi_PBlock *pb, int pw_change, char *old_pw)
slapi_pblock_set(pb, SLAPI_PLUGIN, be->be_database);
set_db_default_result_handlers(pb);
-
- /* Remove the unhashed password pseudo-attribute prior */
- /* to db access */
- slapi_mods_init_passin (&smods, mods);
- if (!unhashed_pw_attr) {
- unhashed_pw_attr = slapi_attr_syntax_normalize(PSEUDO_ATTR_UNHASHEDUSERPASSWORD);
- }
- if (slapi_mods_get_num_mods(&smods)) {
- remove_mod (&smods, unhashed_pw_attr, &unhashed_pw_smod);
- slapi_pblock_set (pb, SLAPI_MODIFY_MODS,
- (void*)slapi_mods_get_ldapmods_passout (&smods));
- }
-
if (be->be_modify != NULL)
{
if ((rc = (*be->be_modify)(pb)) == 0)
@@ -920,27 +907,6 @@ static void op_shared_modify (Slapi_PBlock *pb, int pw_change, char *old_pw)
send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL,
"Function not implemented", 0, NULL);
}
- /* Add the pseudo-attribute prior to calling the postop plugins */
- if (pw_change)
- {
- LDAPMod *lc_mod = NULL;
-
- slapi_pblock_get (pb, SLAPI_MODIFY_MODS, &mods);
- slapi_mods_init_passin (&smods, mods);
- for ( lc_mod = slapi_mods_get_first_mod(&unhashed_pw_smod); lc_mod;
- lc_mod = slapi_mods_get_next_mod(&unhashed_pw_smod) )
- {
- Slapi_Mod lc_smod;
- slapi_mod_init_byval(&lc_smod, lc_mod); /* copies lc_mod */
- /* this extracts the copy of lc_mod and finalizes lc_smod too */
- slapi_mods_add_ldapmod(&smods,
- slapi_mod_get_ldapmod_passout(&lc_smod));
- }
- slapi_pblock_set (pb, SLAPI_MODIFY_MODS,
- (void*)slapi_mods_get_ldapmods_passout (&smods));
- }
- slapi_mods_done(&unhashed_pw_smod); /* can finalize now */
-
slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, &rc);
plugin_call_plugins(pb, internal_op ? SLAPI_PLUGIN_INTERNAL_POST_MODIFY_FN :
@@ -973,6 +939,7 @@ free_and_return:
slapi_be_Unlock(be);
slapi_sdn_done(&sdn);
+ slapi_mods_done(&unhashed_pw_smod); /* can finalize now */
if (unhashed_pw_attr)
slapi_ch_free ((void**)&unhashed_pw_attr);
diff --git a/ldap/servers/slapd/pw_mgmt.c b/ldap/servers/slapd/pw_mgmt.c
index 7aca14826..aa76e03a6 100644
--- a/ldap/servers/slapd/pw_mgmt.c
+++ b/ldap/servers/slapd/pw_mgmt.c
@@ -301,6 +301,10 @@ pw_init ( void ) {
slapdFrontendConfig = getFrontendConfig();
pw_mod_allowchange_aci (!slapdFrontendConfig->pw_policy.pw_change &&
!slapdFrontendConfig->pw_policy.pw_must_change);
+
+ slapi_add_internal_attr_syntax( PSEUDO_ATTR_UNHASHEDUSERPASSWORD,
+ PSEUDO_ATTR_UNHASHEDUSERPASSWORD_OID,
+ OCTETSTRING_SYNTAX_OID, 0, 0 );
}
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 98ba79cb6..3a545642d 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -2291,6 +2291,8 @@ extern char *attr_dataversion;
#define MTN_CONTROL_USE_ONE_BACKEND_OID "2.16.840.1.113730.3.4.14"
#define MTN_CONTROL_USE_ONE_BACKEND_EXT_OID "2.16.840.1.113730.3.4.20"
+#define PSEUDO_ATTR_UNHASHEDUSERPASSWORD_OID "2.16.840.1.113730.3.1.2110"
+
/* virtualListViewError is a relatively new concept that was added long
* after we implemented VLV. Until added to LDAP SDK, we define
* virtualListViewError here. Once it's added, this define would go away. */
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 862a23b08..c63e31279 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -374,6 +374,7 @@ NSPR_API(PRUint32) PR_fprintf(struct PRFileDesc* fd, const char *fmt, ...)
#define SLAPI_ATTR_NSCP_ENTRYDN "nscpEntryDN"
#define SLAPI_ATTR_ENTRYUSN "entryusn"
#define SLAPI_ATTR_ENTRYUSN_PREV "preventryusn"
+#define SLAPI_ATTR_ENTRYDN "entrydn"
/* opaque structures */
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index 6f1e2ca88..bedc9f5d3 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -1235,6 +1235,8 @@ int plugin_enabled(const char *plugin_name, void *identity);
*/
int is_slapd_running();
+/* attrsyntax.c */
+int slapi_add_internal_attr_syntax( const char *name, const char *oid, const char *syntax, const char *mr_equality, unsigned long extraflags );
#ifdef __cplusplus
}
| 0 |
f593ae7790e3372c6812bfe59e58e6d709ec171f
|
389ds/389-ds-base
|
Ticket #48918 - Upgrade to 389-ds-base >= 1.3.5.5 doesn't install 389-ds-base-snmp
Bug description:
During upgrade from 389-ds-base version <1.3.5.5 additional
package 389-ds-base-snmp is not installed.
Fix description:
Move "Obsoletes:" section from %description to %package.
https://fedorahosted.org/389/ticket/48918
Reviewed by: [email protected].
|
commit f593ae7790e3372c6812bfe59e58e6d709ec171f
Author: Viktor Ashirov <[email protected]>
Date: Mon Jul 11 10:10:42 2016 +0200
Ticket #48918 - Upgrade to 389-ds-base >= 1.3.5.5 doesn't install 389-ds-base-snmp
Bug description:
During upgrade from 389-ds-base version <1.3.5.5 additional
package 389-ds-base-snmp is not installed.
Fix description:
Move "Obsoletes:" section from %description to %package.
https://fedorahosted.org/389/ticket/48918
Reviewed by: [email protected].
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index d08d37962..0924cb512 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -47,6 +47,8 @@ Group: System Environment/Daemons
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
Obsoletes: %{name}-selinux
Conflicts: selinux-policy-base < 3.9.8
+# upgrade path from monolithic %{name} (including -libs & -devel) to %{name} + %{name}-snmp
+Obsoletes: %{name} <= 1.3.5.4
Requires: %{name}-libs = %{version}-%{release}
Provides: ldif2ldbm
@@ -152,9 +154,6 @@ isn't what you want. Please contact support immediately.
Please see http://seclists.org/oss-sec/2016/q1/363 for more information.
%endif
-# upgrade path from monolithic %{name} (including -libs & -devel) to %{name} + %{name}-snmp
-Obsoletes: %{name} <= 1.3.5.4
-
%package libs
Summary: Core libraries for 389 Directory Server (%{variant})
Group: System Environment/Daemons
@@ -213,13 +212,12 @@ Development Libraries and headers for the 389 Directory Server base package.
Summary: SNMP Agent for 389 Directory Server
Group: System Environment/Daemons
Requires: %{name} = %{version}-%{release}
+# upgrade path from monolithic %{name} (including -libs & -devel) to %{name} + %{name}-snmp
+Obsoletes: %{name} <= 1.3.5.4
%description snmp
SNMP Agent for the 389 Directory Server base package.
-# upgrade path from monolithic %{name} (including -libs & -devel) to %{name} + %{name}-snmp
-Obsoletes: %{name} <= 1.3.5.4
-
%package tests
Summary: The lib389 Continuous Integration Tests
Group: Development/Libraries
| 0 |
7c9ccb802f153eabcef84bc30fb45e69910b3690
|
389ds/389-ds-base
|
Ticket 47902 - UI - add continuous refresh log feature
Description: Added checkbox to turn on server log continuous refresh
https://pagure.io/389-ds-base/issue/49702
Reviewed by: spichugi(Thanks!)
|
commit 7c9ccb802f153eabcef84bc30fb45e69910b3690
Author: Mark Reynolds <[email protected]>
Date: Wed Jun 6 17:44:50 2018 -0400
Ticket 47902 - UI - add continuous refresh log feature
Description: Added checkbox to turn on server log continuous refresh
https://pagure.io/389-ds-base/issue/49702
Reviewed by: spichugi(Thanks!)
diff --git a/src/cockpit/389-console/css/ds.css b/src/cockpit/389-console/css/ds.css
index 5717bbeae..8b6db11f3 100644
--- a/src/cockpit/389-console/css/ds.css
+++ b/src/cockpit/389-console/css/ds.css
@@ -983,7 +983,6 @@ Zbutton:focus {
.ds-adj-btn {
padding: 0 !important;
margin-left: 10px !important;
- height: 26px !important;
width: 100px !important;
}
@@ -1247,7 +1246,11 @@ option {
}
.ds-left-margin {
- margin-left: 10px;
+ margin-left: 10px !important;
+}
+
+.ds-sm-left-margin {
+ margin-left: 5px !important;
}
.ds-table-btn {
@@ -1354,3 +1357,4 @@ option {
.ds-suffix-cfg-div {
min-width: 500px;
}
+
diff --git a/src/cockpit/389-console/js/monitor.js b/src/cockpit/389-console/js/monitor.js
index 3768f9fd8..16621b746 100644
--- a/src/cockpit/389-console/js/monitor.js
+++ b/src/cockpit/389-console/js/monitor.js
@@ -1,4 +1,7 @@
-
+var accesslog_cont_refresh;
+var auditlog_cont_refresh;
+var auditfaillog_cont_refresh;
+var erorrslog_cont_refresh;
function gen_ratio_chart(ratio, chart ) {
var c3ChartDefaults = patternfly.c3ChartDefaults();
@@ -270,6 +273,38 @@ $(document).ready( function() {
}
});
+ // The continuous log refresh intervals
+ $("#accesslog-cont-refresh").change(function() {
+ if(this.checked) {
+ accesslog_cont_refresh = setInterval(refresh_access_log, 1000);
+ } else {
+ clearInterval(accesslog_cont_refresh);
+ }
+ });
+
+ $("#auditlog-cont-refresh").change(function() {
+ if(this.checked) {
+ auditlog_cont_refresh = setInterval(refresh_audit_log, 1000);
+ } else {
+ clearInterval(auditlog_cont_refresh);
+ }
+ });
+
+ $("#auditfaillog-cont-refresh").change(function() {
+ if(this.checked) {
+ auditfaillog_cont_refresh = setInterval(refresh_auditfail_log, 1000);
+ } else {
+ clearInterval(auditfaillog_cont_refresh);
+ }
+ });
+
+ $("#errorslog-cont-refresh").change(function() {
+ if(this.checked) {
+ errorslog_cont_refresh = setInterval(refresh_errors_log, 1000);
+ } else {
+ clearInterval(errorslog_cont_refresh);
+ }
+ });
$(document).on('click', '.repl-detail-btn', function(e) {
e.preventDefault();
diff --git a/src/cockpit/389-console/monitor.html b/src/cockpit/389-console/monitor.html
index ccb52fc7a..440f97ee7 100644
--- a/src/cockpit/389-console/monitor.html
+++ b/src/cockpit/389-console/monitor.html
@@ -537,7 +537,8 @@
<option>2000</option>
<option>5000</option>
<option>10000</option>
- </select><button id="accesslog-refresh-btn" class="ds-adj-btn">Refresh</button>
+ </select><button id="accesslog-refresh-btn" class="ds-adj-btn">Refresh</button><label
+ class="ds-left-margin">Continuously Refresh<input type="checkbox" class="ds-sm-left-margin" id="accesslog-cont-refresh"></label>
<textarea id="accesslog-area" class="ds-logarea"></textarea>
<p></p>
</div>
@@ -557,7 +558,8 @@
<option>2000</option>
<option>5000</option>
<option>10000</option>
- </select><button id="auditlog-refresh-btn" class="ds-adj-btn">Refresh</button>
+ </select><button id="auditlog-refresh-btn" class="ds-adj-btn">Refresh</button><label
+ class="ds-left-margin">Continuously Refresh<input type="checkbox" class="ds-sm-left-margin" id="auditlog-cont-refresh"></label>
<textarea id="auditlog-area" class="ds-logarea"></textarea>
<p></p>
</div>
@@ -577,7 +579,8 @@
<option>2000</option>
<option>5000</option>
<option>10000</option>
- </select><button id="auditfaillog-refresh-btn" class="ds-adj-btn">Refresh</button>
+ </select><button id="auditfaillog-refresh-btn" class="ds-adj-btn">Refresh</button><label
+ class="ds-left-margin">Continuously Refresh<input type="checkbox" class="ds-sm-left-margin" id="auditfaillog-cont-refresh"></label>
<textarea id="auditfaillog-area" class="ds-logarea"></textarea>
<p></p>
</div>
@@ -597,7 +600,8 @@
<option>2000</option>
<option>5000</option>
<option>10000</option>
- </select><button id="errorslog-refresh-btn" class="ds-adj-btn">Refresh</button>
+ </select><button id="errorslog-refresh-btn" class="ds-adj-btn">Refresh</button><label
+ class="ds-left-margin">Continuously Refresh<input type="checkbox" class="ds-sm-left-margin" id="errorslog-cont-refresh"></label>
<textarea id="errorslog-area" class="ds-logarea"></textarea>
<p></p>
</div>
| 0 |
a942379b69600c4827268357e894b548271d2013
|
389ds/389-ds-base
|
Issue 5465 - Fix dbscan linking (#5466)
Bug Description:
Linking of dbscan fails with:
ld: ./.libs/libback-ldbm.so: undefined reference to symbol 'slapi_ch_free'
ld: ./.libs/libslapd.so.0: error adding symbols: DSO missing from command line
Fix Description:
Add the missing libslapd.so to the command line.
relates: https://github.com/389ds/389-ds-base/issues/5465
Author: Stanislav Levin
Reviewed by: Mark Reynolds (thanks)
|
commit a942379b69600c4827268357e894b548271d2013
Author: Stanislav Levin <[email protected]>
Date: Thu Sep 29 15:04:09 2022 +0300
Issue 5465 - Fix dbscan linking (#5466)
Bug Description:
Linking of dbscan fails with:
ld: ./.libs/libback-ldbm.so: undefined reference to symbol 'slapi_ch_free'
ld: ./.libs/libslapd.so.0: error adding symbols: DSO missing from command line
Fix Description:
Add the missing libslapd.so to the command line.
relates: https://github.com/389ds/389-ds-base/issues/5465
Author: Stanislav Levin
Reviewed by: Mark Reynolds (thanks)
diff --git a/Makefile.am b/Makefile.am
index e89fdb50e..f5ffca314 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1786,7 +1786,7 @@ libwhoami_plugin_la_LDFLAGS = -avoid-version
dbscan_SOURCES = ldap/servers/slapd/tools/dbscan.c
dbscan_CPPFLAGS = $(NSPR_INCLUDES) $(AM_CPPFLAGS)
-dbscan_LDADD = $(NSPR_LINK) $(DB_IMPL)
+dbscan_LDADD = $(NSPR_LINK) $(DB_IMPL) libslapd.la
#------------------------
# ldap-agent
| 0 |
bf49d6a6b7a7865b29de5b1441bc08b4426ba3a2
|
389ds/389-ds-base
|
Allow rpm builds to be run without configure
The previous patch that added support to build RPM packages required
you to run configure before we could even create a SRPM. We had to
do this since Makefile is generated by automake when configure is
run. This isn't useful when you just want to create a SRPM to use
for performing mock builds since the system you are creating a SRPM
on might not even have the proper build dependencies availble.
This patch removes the need to run configure when building RPMS. A
new Makefile is added that is only used for RPM related tasks. A
target to build a SRPM was also added to allow one to create a SRPM
for using with mock builds. To build a SRPM or RPMS with this patch,
you can just run one of the following commands with a freshly checked
out source tree:
make -f rpm.mk srpms
make -f rpm.mk rpms
The rpms, srpms, and source tarball will be creates in a dist
directory in the build tree.
|
commit bf49d6a6b7a7865b29de5b1441bc08b4426ba3a2
Author: Nathan Kinder <[email protected]>
Date: Sun Apr 21 20:35:48 2013 -0700
Allow rpm builds to be run without configure
The previous patch that added support to build RPM packages required
you to run configure before we could even create a SRPM. We had to
do this since Makefile is generated by automake when configure is
run. This isn't useful when you just want to create a SRPM to use
for performing mock builds since the system you are creating a SRPM
on might not even have the proper build dependencies availble.
This patch removes the need to run configure when building RPMS. A
new Makefile is added that is only used for RPM related tasks. A
target to build a SRPM was also added to allow one to create a SRPM
for using with mock builds. To build a SRPM or RPMS with this patch,
you can just run one of the following commands with a freshly checked
out source tree:
make -f rpm.mk srpms
make -f rpm.mk rpms
The rpms, srpms, and source tarball will be creates in a dist
directory in the build tree.
diff --git a/Makefile.am b/Makefile.am
index a35f8e48d..7f00b0729 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -5,15 +5,6 @@ SPACE := $(NULLSTRING) # the space is between the ) and the #
COLON := $(NULLSTRING):# a colon
QUOTE := $(NULLSTRING)"# a double quote"
-#------------------------
-# RPM Packaging
-#------------------------
-RPMBUILD ?= $(PWD)/rpmbuild
-RPM_VERSION=@rpm_version@
-RPM_RELEASE=@rpm_release@
-RPM_NAME_VERSION=$(PACKAGE)-$(RPM_VERSION)
-TARBALL=$(RPM_NAME_VERSION).tar.bz2
-
#------------------------
# Compiler Flags
#------------------------
@@ -1684,35 +1675,3 @@ git-archive:
fi ; \
git archive --prefix=$(distdir)/ $$gittag | bzip2 > $$srcdistdir/$(distdir).tar.bz2
-local-archive:
- -mkdir -p dist/$(RPM_NAME_VERSION)
- rsync -a --exclude=dist --exclude=.git --exclude=rpmbuild $(srcdir)/. dist/$(RPM_NAME_VERSION)
-
-tarballs: local-archive
- -mkdir -p dist/sources
- cd dist; tar cfj sources/$(TARBALL) $(RPM_NAME_VERSION)
- rm -rf dist/$(RPM_NAME_VERSION)
-
-rpmroot:
- rm -rf $(RPMBUILD)
- mkdir -p $(RPMBUILD)/BUILD
- mkdir -p $(RPMBUILD)/RPMS
- mkdir -p $(RPMBUILD)/SOURCES
- mkdir -p $(RPMBUILD)/SPECS
- mkdir -p $(RPMBUILD)/SRPMS
-
-rpmdistdir:
- mkdir -p dist/rpms
- mkdir -p dist/srpms
-
-rpms: rpmroot rpmdistdir tarballs
- cp dist/sources/$(TARBALL) $(RPMBUILD)/SOURCES/
- cp $(srcdir)/rpm/$(PACKAGE)-* $(RPMBUILD)/SOURCES/
- sed -e s/__VERSION__/$(RPM_VERSION)/ -e s/__RELEASE__/$(RPM_RELEASE)/ \
- $(srcdir)/rpm/$(PACKAGE).spec.in > $(RPMBUILD)/SPECS/$(PACKAGE).spec
- rpmbuild --define "_topdir $(RPMBUILD)" -ba $(RPMBUILD)/SPECS/$(PACKAGE).spec
- cp $(RPMBUILD)/RPMS/*/$(RPM_NAME_VERSION)-*.rpm dist/rpms/
- cp $(RPMBUILD)/RPMS/*/$(PACKAGE)-*-$(RPM_VERSION)-*.rpm dist/rpms/
- cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)-*.src.rpm dist/srpms/
- rm -rf $(RPMBUILD)
-
diff --git a/Makefile.in b/Makefile.in
index b34d1f682..c1f207ee9 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -1310,8 +1310,6 @@ prefix = @prefix@
program_transform_name = @program_transform_name@
propertydir = $(datadir)@propertydir@
psdir = @psdir@
-rpm_release = @rpm_release@
-rpm_version = @rpm_version@
sampledatadir = $(datadir)@sampledatadir@
sasl_inc = @sasl_inc@
sasl_lib = @sasl_lib@
@@ -1348,10 +1346,6 @@ NULLSTRING :=
SPACE := $(NULLSTRING) # the space is between the ) and the #
COLON := $(NULLSTRING):# a colon
QUOTE := $(NULLSTRING)"# a double quote"
-RPM_VERSION = @rpm_version@
-RPM_RELEASE = @rpm_release@
-RPM_NAME_VERSION = $(PACKAGE)-$(RPM_VERSION)
-TARBALL = $(RPM_NAME_VERSION).tar.bz2
#------------------------
# Compiler Flags
@@ -10909,11 +10903,6 @@ uninstall-man: uninstall-man1 uninstall-man8
uninstall-updateDATA uninstall-updateSCRIPTS
-#------------------------
-# RPM Packaging
-#------------------------
-RPMBUILD ?= $(PWD)/rpmbuild
-
clean-local:
-rm -rf dist
-rm -rf selinux-built
@@ -10993,38 +10982,6 @@ git-archive:
fi ; \
git archive --prefix=$(distdir)/ $$gittag | bzip2 > $$srcdistdir/$(distdir).tar.bz2
-local-archive:
- -mkdir -p dist/$(RPM_NAME_VERSION)
- rsync -a --exclude=dist --exclude=.git --exclude=rpmbuild $(srcdir)/. dist/$(RPM_NAME_VERSION)
-
-tarballs: local-archive
- -mkdir -p dist/sources
- cd dist; tar cfj sources/$(TARBALL) $(RPM_NAME_VERSION)
- rm -rf dist/$(RPM_NAME_VERSION)
-
-rpmroot:
- rm -rf $(RPMBUILD)
- mkdir -p $(RPMBUILD)/BUILD
- mkdir -p $(RPMBUILD)/RPMS
- mkdir -p $(RPMBUILD)/SOURCES
- mkdir -p $(RPMBUILD)/SPECS
- mkdir -p $(RPMBUILD)/SRPMS
-
-rpmdistdir:
- mkdir -p dist/rpms
- mkdir -p dist/srpms
-
-rpms: rpmroot rpmdistdir tarballs
- cp dist/sources/$(TARBALL) $(RPMBUILD)/SOURCES/
- cp $(srcdir)/rpm/$(PACKAGE)-* $(RPMBUILD)/SOURCES/
- sed -e s/__VERSION__/$(RPM_VERSION)/ -e s/__RELEASE__/$(RPM_RELEASE)/ \
- $(srcdir)/rpm/$(PACKAGE).spec.in > $(RPMBUILD)/SPECS/$(PACKAGE).spec
- rpmbuild --define "_topdir $(RPMBUILD)" -ba $(RPMBUILD)/SPECS/$(PACKAGE).spec
- cp $(RPMBUILD)/RPMS/*/$(RPM_NAME_VERSION)-*.rpm dist/rpms/
- cp $(RPMBUILD)/RPMS/*/$(PACKAGE)-*-$(RPM_VERSION)-*.rpm dist/rpms/
- cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)-*.src.rpm dist/srpms/
- rm -rf $(RPMBUILD)
-
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:
diff --git a/configure b/configure
index 5c57f94b4..2e4c7992f 100755
--- a/configure
+++ b/configure
@@ -616,8 +616,6 @@ ac_default_prefix=/opt/$PACKAGE_NAME
ac_subst_vars='am__EXEEXT_FALSE
am__EXEEXT_TRUE
LTLIBOBJS
-rpm_release
-rpm_version
vendor
capbrand
brand
@@ -3345,10 +3343,6 @@ case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
-# Set the version and release for developer RPM builds
-rpm_version=$RPM_VERSION
-rpm_release=$RPM_RELEASE
-
# Checks for programs.
ac_ext=cpp
ac_cpp='$CXXCPP $CPPFLAGS'
@@ -21164,9 +21158,6 @@ fi
-
-
-
diff --git a/configure.ac b/configure.ac
index 7d611494b..c6edbfa01 100644
--- a/configure.ac
+++ b/configure.ac
@@ -21,10 +21,6 @@ AC_SUBST([CONSOLE_VERSION])
AM_MAINTAINER_MODE
AC_CANONICAL_HOST
-# Set the version and release for developer RPM builds
-rpm_version=$RPM_VERSION
-rpm_release=$RPM_RELEASE
-
# Checks for programs.
AC_PROG_CXX
AC_PROG_CC
@@ -690,9 +686,6 @@ AC_SUBST(brand)
AC_SUBST(capbrand)
AC_SUBST(vendor)
-AC_SUBST(rpm_version)
-AC_SUBST(rpm_release)
-
# AC_DEFINE([USE_OLD_UNHASHED], [], [Use old unhashed code])
AC_DEFINE([LDAP_DEBUG], [1], [LDAP debug flag])
AC_DEFINE([LDAP_DONT_USE_SMARTHEAP], [1], [Don't use smartheap])
diff --git a/rpm.mk b/rpm.mk
new file mode 100644
index 000000000..dfbadc6f9
--- /dev/null
+++ b/rpm.mk
@@ -0,0 +1,51 @@
+RPMBUILD ?= $(PWD)/rpmbuild
+RPM_VERSION ?= $(shell $(PWD)/rpm/rpmverrel.sh version)
+RPM_RELEASE ?= $(shell $(PWD)/rpm/rpmverrel.sh release)
+PACKAGE = 389-ds-base
+RPM_NAME_VERSION = $(PACKAGE)-$(RPM_VERSION)
+TARBALL = $(RPM_NAME_VERSION).tar.bz2
+
+clean:
+ rm -rf dist
+ rm -rf rpmbuild
+
+local-archive:
+ -mkdir -p dist/$(RPM_NAME_VERSION)
+ rsync -a --exclude=dist --exclude=.git --exclude=rpmbuild . dist/$(RPM_NAME_VERSION)
+
+tarballs: local-archive
+ -mkdir -p dist/sources
+ cd dist; tar cfj sources/$(TARBALL) $(RPM_NAME_VERSION)
+ rm -rf dist/$(RPM_NAME_VERSION)
+
+rpmroot:
+ rm -rf $(RPMBUILD)
+ mkdir -p $(RPMBUILD)/BUILD
+ mkdir -p $(RPMBUILD)/RPMS
+ mkdir -p $(RPMBUILD)/SOURCES
+ mkdir -p $(RPMBUILD)/SPECS
+ mkdir -p $(RPMBUILD)/SRPMS
+
+rpmdistdir:
+ mkdir -p dist/rpms
+
+srpmdistdir:
+ mkdir -p dist/srpms
+
+rpmbuildprep:
+ cp dist/sources/$(TARBALL) $(RPMBUILD)/SOURCES/
+ cp rpm/$(PACKAGE)-* $(RPMBUILD)/SOURCES/
+ sed -e s/__VERSION__/$(RPM_VERSION)/ -e s/__RELEASE__/$(RPM_RELEASE)/ \
+ rpm/$(PACKAGE).spec.in > $(RPMBUILD)/SPECS/$(PACKAGE).spec
+
+srpms: rpmroot srpmdistdir tarballs rpmbuildprep
+ rpmbuild --define "_topdir $(RPMBUILD)" -bs $(RPMBUILD)/SPECS/$(PACKAGE).spec
+ cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)-*.src.rpm dist/srpms/
+ rm -rf $(RPMBUILD)
+
+rpms: rpmroot srpmdistdir rpmdistdir tarballs rpmbuildprep
+ rpmbuild --define "_topdir $(RPMBUILD)" -ba $(RPMBUILD)/SPECS/$(PACKAGE).spec
+ cp $(RPMBUILD)/RPMS/*/$(RPM_NAME_VERSION)-*.rpm dist/rpms/
+ cp $(RPMBUILD)/RPMS/*/$(PACKAGE)-*-$(RPM_VERSION)-*.rpm dist/rpms/
+ cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)-*.src.rpm dist/srpms/
+ rm -rf $(RPMBUILD)
diff --git a/rpm/rpmverrel.sh b/rpm/rpmverrel.sh
new file mode 100755
index 000000000..064b052ef
--- /dev/null
+++ b/rpm/rpmverrel.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+# Source VERSION.sh to set the version
+# and release environment variables.
+source ./VERSION.sh
+
+if [ "$1" = "version" ]; then
+ echo $RPM_VERSION
+elif [ "$1" = "release" ]; then
+ echo $RPM_RELEASE
+fi
| 0 |
cb80c9943cfde17a36108b49a57616cf260b070e
|
389ds/389-ds-base
|
re-enable ppc support
NOTE: We do not have support for atomic 64-bit counters on ppc - ppc platorms use the mutex approach instead
|
commit cb80c9943cfde17a36108b49a57616cf260b070e
Author: Rich Megginson <[email protected]>
Date: Mon Apr 6 18:11:18 2009 +0000
re-enable ppc support
NOTE: We do not have support for atomic 64-bit counters on ppc - ppc platorms use the mutex approach instead
diff --git a/ldap/servers/slapd/slapi_counter.c b/ldap/servers/slapd/slapi_counter.c
index c9930d590..0f56c8ace 100644
--- a/ldap/servers/slapd/slapi_counter.c
+++ b/ldap/servers/slapd/slapi_counter.c
@@ -53,6 +53,7 @@ PRUint64 _sparcv9_AtomicSub_il(PRUint64 *address, PRUint64 val);
#endif
#endif
+#ifdef ATOMIC_64BIT_OPERATIONS
#if defined LINUX && (defined CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH)
/* On systems that don't have the 64-bit GCC atomic builtins, we need to
* implement our own atomic functions using inline assembly code. */
@@ -69,6 +70,8 @@ PRUint64 __sync_sub_and_fetch_8(PRUint64 *ptr, PRUint64 subval);
#define __sync_add_and_fetch __sync_add_and_fetch_8
#define __sync_sub_and_fetch __sync_sub_and_fetch_8
#endif
+#endif /* ATOMIC_64BIT_OPERATIONS */
+
/*
* Counter Structure
@@ -428,6 +431,7 @@ PRUint64 slapi_counter_get_value(Slapi_Counter *counter)
return value;
}
+#ifdef ATOMIC_64BIT_OPERATIONS
#if defined LINUX && (defined CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH)
/* On systems that don't have the 64-bit GCC atomic builtins, we need to
* implement our own atomic add and subtract functions using inline
@@ -520,3 +524,4 @@ PRUint64 __sync_sub_and_fetch_8(PRUint64 *ptr, PRUint64 subval)
return retval;
}
#endif /* LINUX && (defined CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH) */
+#endif /* ATOMIC_64BIT_OPERATIONS */
| 0 |
005000430e3e43c3048e03da50402a2268def1c4
|
389ds/389-ds-base
|
Ticket #48400 - ldclt - segmentation fault error while binding
Descrition: When "-e randombinddn,randombinddnlow=LOW,randombinddnhigh=HIGH"
is given, spaces for bufBaseDN and bufPasswd in the context were not
allocated. The part of the code was enabled only when NEED_FILTER is
set. This patch loosened the condition.
https://fedorahosted.org/389/ticket/48400
Reviewed by [email protected] (Thank you, William!)
|
commit 005000430e3e43c3048e03da50402a2268def1c4
Author: Noriko Hosoi <[email protected]>
Date: Wed Jan 6 14:31:04 2016 -0800
Ticket #48400 - ldclt - segmentation fault error while binding
Descrition: When "-e randombinddn,randombinddnlow=LOW,randombinddnhigh=HIGH"
is given, spaces for bufBaseDN and bufPasswd in the context were not
allocated. The part of the code was enabled only when NEED_FILTER is
set. This patch loosened the condition.
https://fedorahosted.org/389/ticket/48400
Reviewed by [email protected] (Thank you, William!)
diff --git a/ldap/servers/slapd/tools/ldclt/threadMain.c b/ldap/servers/slapd/tools/ldclt/threadMain.c
index 88353c6ce..796a8fb84 100644
--- a/ldap/servers/slapd/tools/ldclt/threadMain.c
+++ b/ldap/servers/slapd/tools/ldclt/threadMain.c
@@ -890,78 +890,78 @@ threadMain (
}
}
} /*JLS 23-03-01*/
+ } /*JLS 05-03-01*/
- /*
- * Variable base DN ?
- */
- tttctx->bufBaseDN = (char *) malloc (strlen (mctx.baseDN) + 1);
- if (tttctx->bufBaseDN == NULL) /*JLS 06-03-00*/
- { /*JLS 06-03-00*/
+ /*
+ * Variable base DN ?
+ */
+ tttctx->bufBaseDN = (char *) malloc (strlen (mctx.baseDN) + 1);
+ if (tttctx->bufBaseDN == NULL) /*JLS 06-03-00*/
+ { /*JLS 06-03-00*/
printf ("ldclt[%d]: T%03d: cannot malloc(tttctx->bufBaseDN), error=%d (%s)\n",
mctx.pid, tttctx->thrdNum, errno, strerror (errno));
ldcltExit (EXIT_INIT); /*JLS 18-12-00*/
- } /*JLS 06-03-00*/
- if (!(mctx.mode & RANDOM_BASE))
+ } /*JLS 06-03-00*/
+ if (!(mctx.mode & RANDOM_BASE))
strcpy (tttctx->bufBaseDN, mctx.baseDN);
- else
- {
+ else
+ {
tttctx->startBaseDN = strlen (mctx.baseDNHead);
strcpy (tttctx->bufBaseDN, mctx.baseDNHead);
strcpy (&(tttctx->bufBaseDN[tttctx->startBaseDN+mctx.baseDNNbDigit]),
mctx.baseDNTail);
- }
+ }
- /*
- * Variable bind DN ?
- * Do not forget the random bind password below that is activated
- * at the same time as the random bind DN.
- */
- if (mctx.bindDN != NULL) /*JLS 05-03-01*/
- { /*JLS 05-03-01*/
+ /*
+ * Variable bind DN ?
+ * Do not forget the random bind password below that is activated
+ * at the same time as the random bind DN.
+ */
+ if (mctx.bindDN != NULL) /*JLS 05-03-01*/
+ { /*JLS 05-03-01*/
tttctx->bufBindDN = (char *) malloc (strlen (mctx.bindDN) + 1);
if (tttctx->bufBindDN == NULL)
{
- printf ("ldclt[%d]: T%03d: cannot malloc(tttctx->bufBindDN), error=%d (%s)\n",
- mctx.pid, tttctx->thrdNum, errno, strerror (errno));
- ldcltExit (EXIT_INIT);
+ printf ("ldclt[%d]: T%03d: cannot malloc(tttctx->bufBindDN), error=%d (%s)\n",
+ mctx.pid, tttctx->thrdNum, errno, strerror (errno));
+ ldcltExit (EXIT_INIT);
}
if (!(mctx.mode & RANDOM_BINDDN))
- strcpy (tttctx->bufBindDN, mctx.bindDN);
+ strcpy (tttctx->bufBindDN, mctx.bindDN);
else
{
- tttctx->startBindDN = strlen (mctx.bindDNHead);
- strcpy (tttctx->bufBindDN, mctx.bindDNHead);
- strcpy (&(tttctx->bufBindDN[tttctx->startBindDN+mctx.bindDNNbDigit]),
- mctx.bindDNTail);
+ tttctx->startBindDN = strlen (mctx.bindDNHead);
+ strcpy (tttctx->bufBindDN, mctx.bindDNHead);
+ strcpy (&(tttctx->bufBindDN[tttctx->startBindDN+mctx.bindDNNbDigit]),
+ mctx.bindDNTail);
}
- } /*JLS 05-03-01*/
+ } /*JLS 05-03-01*/
- /*
- * Variable bind password ?
- * Remember that the random bind password feature is activated
- * by the same option as the random bind DN, but has here its own
- * code section for the ease of coding.
- */
- if (mctx.passwd != NULL) /*JLS 05-03-01*/
- { /*JLS 05-03-01*/
+ /*
+ * Variable bind password ?
+ * Remember that the random bind password feature is activated
+ * by the same option as the random bind DN, but has here its own
+ * code section for the ease of coding.
+ */
+ if (mctx.passwd != NULL) /*JLS 05-03-01*/
+ { /*JLS 05-03-01*/
tttctx->bufPasswd = (char *) malloc (strlen (mctx.passwd) + 1);
if (tttctx->bufPasswd == NULL)
{
- printf ("ldclt[%d]: T%03d: cannot malloc(tttctx->bufPasswd), error=%d (%s)\n",
- mctx.pid, tttctx->thrdNum, errno, strerror (errno));
- ldcltExit (EXIT_INIT);
+ printf ("ldclt[%d]: T%03d: cannot malloc(tttctx->bufPasswd), error=%d (%s)\n",
+ mctx.pid, tttctx->thrdNum, errno, strerror (errno));
+ ldcltExit (EXIT_INIT);
}
if (!(mctx.mode & RANDOM_BINDDN))
- strcpy (tttctx->bufPasswd, mctx.passwd);
+ strcpy (tttctx->bufPasswd, mctx.passwd);
else
{
- tttctx->startPasswd = strlen (mctx.passwdHead);
- strcpy (tttctx->bufPasswd, mctx.passwdHead);
- strcpy (&(tttctx->bufPasswd[tttctx->startPasswd+mctx.passwdNbDigit]),
- mctx.passwdTail);
+ tttctx->startPasswd = strlen (mctx.passwdHead);
+ strcpy (tttctx->bufPasswd, mctx.passwdHead);
+ strcpy (&(tttctx->bufPasswd[tttctx->startPasswd+mctx.passwdNbDigit]),
+ mctx.passwdTail);
}
- }
- } /*JLS 05-03-01*/
+ }
/*
* Bind DN from a file ?
| 0 |
f4cdf875354896ffe72622dfa37e41a35d2972ab
|
389ds/389-ds-base
|
Resolves: #241089
Summary: reset db statistics between restarts
Description: cleaning up the db stats when the server is shutdown
|
commit f4cdf875354896ffe72622dfa37e41a35d2972ab
Author: Noriko Hosoi <[email protected]>
Date: Tue Sep 25 22:03:19 2007 +0000
Resolves: #241089
Summary: reset db statistics between restarts
Description: cleaning up the db stats when the server is shutdown
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index 287b64c24..efc9c420f 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -2481,7 +2481,7 @@ int dblayer_post_close(struct ldbminfo *li, int dbmode)
/* Shutdown the performance counter stuff */
if (DBLAYER_NORMAL_MODE & dbmode) {
if (priv->perf_private) {
- perfctrs_terminate(&priv->perf_private);
+ perfctrs_terminate(&priv->perf_private, priv->dblayer_env->dblayer_DB_ENV);
}
}
diff --git a/ldap/servers/slapd/back-ldbm/perfctrs.c b/ldap/servers/slapd/back-ldbm/perfctrs.c
index 13fda0db6..b3307ffe7 100644
--- a/ldap/servers/slapd/back-ldbm/perfctrs.c
+++ b/ldap/servers/slapd/back-ldbm/perfctrs.c
@@ -219,8 +219,21 @@ void perfctrs_init(struct ldbminfo *li, perfctrs_private **ret_priv)
}
/* Terminate perf ctrs */
-void perfctrs_terminate(perfctrs_private **priv)
+void perfctrs_terminate(perfctrs_private **priv, DB_ENV *db_env)
{
+ DB_MPOOL_STAT *mpstat = NULL;
+ DB_TXN_STAT *txnstat = NULL;
+ DB_LOG_STAT *logstat = NULL;
+ DB_LOCK_STATi *lockstat = NULL;
+
+ MEMP_STAT(db_env, &mpstat, NULL, DB_STAT_CLEAR, malloc);
+ slapi_ch_free((void**)&mpstat);
+ TXN_STAT(db_env, &txnstat, DB_STAT_CLEAR, malloc);
+ slapi_ch_free((void**)&txnstat);
+ LOG_STAT(db_env, &logstat, DB_STAT_CLEAR, malloc);
+ slapi_ch_free((void**)&logstat);
+ LOCK_STAT(db_env, &lockstat, DB_STAT_CLEAR, malloc);
+ slapi_ch_free((void**)&lockstat);
#if defined(_WIN32)
if (NULL != (*priv)->memory) {
UnmapViewOfFile((*priv)->memory);
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
index 29bcb0946..c3ef2ff05 100644
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
@@ -382,7 +382,7 @@ int parent_update_on_childchange(modify_context *mc,int op, size_t *numofchildre
*/
void perfctrs_wait(size_t milliseconds,perfctrs_private *priv,DB_ENV *db_env);
void perfctrs_init(struct ldbminfo *li,perfctrs_private **priv);
-void perfctrs_terminate(perfctrs_private **priv);
+void perfctrs_terminate(perfctrs_private **priv, DB_ENV *db_env);
void perfctrs_as_entry( Slapi_Entry *e, perfctrs_private *priv, DB_ENV *db_env );
/*
| 0 |
05d209432571dc64b242ae47113ae4cbb43607d2
|
389ds/389-ds-base
|
Ticket #47349 - DS instance crashes under a high load
https://fedorahosted.org/389/ticket/47349
Reviewed by: nkinder (Thanks!)
Branch: master
Fix Description: handle_new_connection initializes the connection object,
then calls connection_table_move_connection_on_to_active_list to put it
on the list of active connections, then unlocks the c_mutex, then calls
connection_new_private to allocate c_private. If another thread
interrupts after the conn has been moved to the active list, but before
c_private has been allocated, the new conn will be available via
connection_table_iterate_active_connections where table_iterate_function
will attempt to dereference the NULL c_private.
The fix is to move connection_new_private inside the c_mutex lock, and to
move connection_table_move_connection_on_to_active_list to be the very last
thing before releasing the c_mutex lock. Once the conn is on the active
list it is live and we cannot do anything else to it.
Note: I have still not been able to reproduce the problem in a non-debug
optimized build.
Platforms tested: RHEL6 x86_64
Note: Before patch, server would crash within 5 minutes. After patch, server
has been running for several days in customer environment.
Flag Day: no
Doc impact: no
|
commit 05d209432571dc64b242ae47113ae4cbb43607d2
Author: Rich Megginson <[email protected]>
Date: Wed Apr 24 20:36:37 2013 -0600
Ticket #47349 - DS instance crashes under a high load
https://fedorahosted.org/389/ticket/47349
Reviewed by: nkinder (Thanks!)
Branch: master
Fix Description: handle_new_connection initializes the connection object,
then calls connection_table_move_connection_on_to_active_list to put it
on the list of active connections, then unlocks the c_mutex, then calls
connection_new_private to allocate c_private. If another thread
interrupts after the conn has been moved to the active list, but before
c_private has been allocated, the new conn will be available via
connection_table_iterate_active_connections where table_iterate_function
will attempt to dereference the NULL c_private.
The fix is to move connection_new_private inside the c_mutex lock, and to
move connection_table_move_connection_on_to_active_list to be the very last
thing before releasing the c_mutex lock. Once the conn is on the active
list it is live and we cannot do anything else to it.
Note: I have still not been able to reproduce the problem in a non-debug
optimized build.
Platforms tested: RHEL6 x86_64
Note: Before patch, server would crash within 5 minutes. After patch, server
has been running for several days in customer environment.
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 962699acb..db6495092 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -2686,16 +2686,6 @@ handle_new_connection(Connection_Table *ct, int tcps, PRFileDesc *pr_acceptfd, i
/* Call the plugin extension constructors */
conn->c_extension = factory_create_extension(connection_type,conn,NULL /* Parent */);
-
- /* Add this connection slot to the doubly linked list of active connections. This
- * list is used to find the connections that should be used in the poll call. This
- * connection will be added directly after slot 0 which serves as the head of the list */
- if ( conn != NULL && conn->c_next == NULL && conn->c_prev == NULL )
- {
- /* Now give the new connection to the connection code */
- connection_table_move_connection_on_to_active_list(the_connection_table,conn);
- }
-
#if defined(ENABLE_LDAPI)
#if !defined( XP_WIN32 )
/* ldapi */
@@ -2708,10 +2698,21 @@ handle_new_connection(Connection_Table *ct, int tcps, PRFileDesc *pr_acceptfd, i
#endif
#endif /* ENABLE_LDAPI */
- PR_Unlock( conn->c_mutex );
-
connection_new_private(conn);
+ /* Add this connection slot to the doubly linked list of active connections. This
+ * list is used to find the connections that should be used in the poll call. This
+ * connection will be added directly after slot 0 which serves as the head of the list.
+ * This must be done as the very last thing before we unlock the mutex, because once it
+ * is added to the active list, it is live. */
+ if ( conn != NULL && conn->c_next == NULL && conn->c_prev == NULL )
+ {
+ /* Now give the new connection to the connection code */
+ connection_table_move_connection_on_to_active_list(the_connection_table,conn);
+ }
+
+ PR_Unlock( conn->c_mutex );
+
g_increment_current_conn_count();
return 0;
| 0 |
e033d4b3fff788dcb94957a07fcaf78171499ddf
|
389ds/389-ds-base
|
Ticket 48270: test case
|
commit e033d4b3fff788dcb94957a07fcaf78171499ddf
Author: Thierry Bordaz <[email protected]>
Date: Mon Feb 8 15:04:33 2016 +0100
Ticket 48270: test case
diff --git a/dirsrvtests/tickets/ticket48270_test.py b/dirsrvtests/tickets/ticket48270_test.py
new file mode 100644
index 000000000..a1822f2fa
--- /dev/null
+++ b/dirsrvtests/tickets/ticket48270_test.py
@@ -0,0 +1,177 @@
+import os
+import sys
+import time
+import ldap
+import logging
+#import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+NEW_ACCOUNT = "new_account"
+MAX_ACCOUNTS = 20
+
+MIXED_VALUE="/home/mYhOmEdIrEcToRy"
+LOWER_VALUE="/home/myhomedirectory"
+HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
+HOMEDIRECTORY_CN="homedirectory"
+MATCHINGRULE = 'nsMatchingRule'
+UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
+UIDNUMBER_CN="uidnumber"
+
+
+class TopologyStandalone(object):
+ def __init__(self, standalone):
+ standalone.open()
+ self.standalone = standalone
+
+
+#@pytest.fixture(scope="module")
+def topology(request):
+ global installation1_prefix
+ if installation1_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+ # Creating standalone instance ...
+ standalone = DirSrv(verbose=False)
+ if installation1_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+ args_instance[SER_HOST] = HOST_STANDALONE
+ args_instance[SER_PORT] = PORT_STANDALONE
+ args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_standalone = args_instance.copy()
+ standalone.allocate(args_standalone)
+ instance_standalone = standalone.exists()
+ if instance_standalone:
+ standalone.delete()
+ standalone.create()
+ standalone.open()
+
+ # Delete each instance in the end
+ def fin():
+ standalone.delete()
+ #request.addfinalizer(fin)
+
+ # Clear out the tmp dir
+ standalone.clearTmpDir(__file__)
+
+ return TopologyStandalone(standalone)
+
+def test_ticket48270_init(topology):
+ log.info("Initialization: add dummy entries for the tests")
+ for cpt in range(MAX_ACCOUNTS):
+ name = "%s%d" % (NEW_ACCOUNT, cpt)
+ topology.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), {
+ 'objectclass': "top posixAccount".split(),
+ 'uid': name,
+ 'cn': name,
+ 'uidnumber': str(111),
+ 'gidnumber': str(222),
+ 'homedirectory': "/home/tbordaz_%d" % cpt})))
+
+
+def test_ticket48270_homeDirectory_indexed_cis(topology):
+ log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match")
+ try:
+ ent = topology.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE)
+ except ldap.NO_SUCH_OBJECT:
+ topology.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, {
+ 'objectclass': "top nsIndex".split(),
+ 'cn': HOMEDIRECTORY_CN,
+ 'nsSystemIndex': 'false',
+ 'nsIndexType': 'eq'})))
+ #log.info("attach debugger")
+ #time.sleep(60)
+
+ IGNORE_MR_NAME='caseIgnoreIA5Match'
+ EXACT_MR_NAME='caseExactIA5Match'
+ mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))]
+ topology.standalone.modify_s(HOMEDIRECTORY_INDEX, mod)
+
+ #topology.standalone.stop(timeout=10)
+ log.info("successfully checked that filter with exact mr , a filter with lowercase eq is failing")
+ #assert topology.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory'])
+ #topology.standalone.start(timeout=10)
+ args = {TASK_WAIT: True}
+ topology.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args)
+
+ log.info("Check indexing succeeded with a specified matching rule")
+ file_path = os.path.join(topology.standalone.prefix, "var/log/dirsrv/slapd-%s/errors" % topology.standalone.serverid)
+ file_obj = open(file_path, "r")
+
+ # Check if the MR configuration failure occurs
+ regex = re.compile("unknown or invalid matching rule")
+ while True:
+ line = file_obj.readline()
+ found = regex.search(line)
+ if ((line == '') or (found)):
+ break
+
+ if (found):
+ log.info("The configuration of a specific MR fails")
+ log.info(line)
+ #assert not found
+
+def test_ticket48270_homeDirectory_mixed_value(topology):
+ # Set a homedirectory value with mixed case
+ name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX)
+ mod = [(ldap.MOD_REPLACE, 'homeDirectory', MIXED_VALUE)]
+ topology.standalone.modify_s(name, mod)
+
+def test_ticket48270_extensible_search(topology):
+ name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX)
+
+ # check with the exact stored value
+ log.info("Default: can retrieve an entry filter syntax with exact stored value")
+ ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % MIXED_VALUE)
+ log.info("Default: can retrieve an entry filter caseExactIA5Match with exact stored value")
+ ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE)
+
+ # check with a lower case value that is different from the stored value
+ log.info("Default: can not retrieve an entry filter syntax match with lowered stored value")
+ try:
+ ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % LOWER_VALUE)
+ assert ent is None
+ except ldap.NO_SUCH_OBJECT:
+ pass
+ log.info("Default: can not retrieve an entry filter caseExactIA5Match with lowered stored value")
+ try:
+ ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % LOWER_VALUE)
+ assert ent is None
+ except ldap.NO_SUCH_OBJECT:
+ pass
+ log.info("Default: can retrieve an entry filter caseIgnoreIA5Match with lowered stored value")
+ ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE)
+
+def test_ticket48270(topology):
+ """Write your testcase here...
+
+ Also, if you need any testcase initialization,
+ please, write additional fixture for that(include finalizer).
+ """
+
+ log.info('Test complete')
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ global installation1_prefix
+ installation1_prefix = '/home/tbordaz/install_master'
+ topo = topology(True)
+ test_ticket48270_init(topo)
+ test_ticket48270_homeDirectory_indexed_cis(topo)
+ test_ticket48270_homeDirectory_mixed_value(topo)
+ test_ticket48270_extensible_search(topo)
+
+# CURRENT_FILE = os.path.realpath(__file__)
+# pytest.main("-s %s" % CURRENT_FILE)
\ No newline at end of file
| 0 |
87ad36b865f85297feb40c4b61db82101b9a9447
|
389ds/389-ds-base
|
Ticket #47551 logconv: -V does not produce unindexed search report
https://fedorahosted.org/389/ticket/47551
Reviewed by: mreynolds (Thanks!)
Branch: master
Fix Description: Execute the $usage /u/ code also when verb == yes.
Platforms tested: RHEL6 x86_64
Flag Day: no
Doc impact: no
|
commit 87ad36b865f85297feb40c4b61db82101b9a9447
Author: Rich Megginson <[email protected]>
Date: Tue Oct 8 11:04:31 2013 -0600
Ticket #47551 logconv: -V does not produce unindexed search report
https://fedorahosted.org/389/ticket/47551
Reviewed by: mreynolds (Thanks!)
Branch: master
Fix Description: Execute the $usage /u/ code also when verb == yes.
Platforms tested: RHEL6 x86_64
Flag Day: no
Doc impact: no
diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl
index 8de1e8e39..865e11362 100755
--- a/ldap/admin/src/logconv.pl
+++ b/ldap/admin/src/logconv.pl
@@ -676,9 +676,9 @@ if ($verb eq "yes" || $usage =~ /u/ || $usage =~ /U/){
my $unindexedIp;
my %uniqFilt = (); # hash of unique filters
while (my ($srcnt_conn_op, $count) = each %{$notesa_conn_op}) {
- my ($srvRstCnt, $conn, $op) = split(",", $srcnt_conn_op);
- $unindexedIp = getIPfromConn($conn, $srvRstCnt);
- if ($usage =~ /u/) {
+ if ($verb eq "yes" || $usage =~ /u/) {
+ my ($srvRstCnt, $conn, $op) = split(",", $srcnt_conn_op);
+ my $unindexedIp = getIPfromConn($conn, $srvRstCnt);
print "\n Unindexed Search #".$notesCount." (notes=A)\n";
print " - Date/Time: $time_conn_op->{$srcnt_conn_op}\n";
print " - Connection Number: $conn\n";
@@ -694,7 +694,7 @@ if ($verb eq "yes" || $usage =~ /u/ || $usage =~ /U/){
}
}
if (exists($filter_conn_op->{$srcnt_conn_op}) && defined($filter_conn_op->{$srcnt_conn_op})) {
- if ($usage =~ /u/) {
+ if ($verb eq "yes" || $usage =~ /u/) {
print " - Search Filter: $filter_conn_op->{$srcnt_conn_op}\n";
}
$uniqFilt{$filter_conn_op->{$srcnt_conn_op}}++;
@@ -724,9 +724,9 @@ if ($verb eq "yes" || $usage =~ /u/ || $usage =~ /U/){
my $unindexedIp;
my %uniqFilt = (); # hash of unique filters
while (my ($srcnt_conn_op, $count) = each %{$notesu_conn_op}) {
- my ($srvRstCnt, $conn, $op) = split(",", $srcnt_conn_op);
- $unindexedIp = getIPfromConn($conn, $srvRstCnt);
- if ($usage =~ /u/) {
+ if ($verb eq "yes" || $usage =~ /u/) {
+ my ($srvRstCnt, $conn, $op) = split(",", $srcnt_conn_op);
+ $unindexedIp = getIPfromConn($conn, $srvRstCnt);
print "\n Unindexed Component #".$notesCount." (notes=U)\n";
print " - Date/Time: $time_conn_op->{$srcnt_conn_op}\n";
print " - Connection Number: $conn\n";
@@ -742,7 +742,7 @@ if ($verb eq "yes" || $usage =~ /u/ || $usage =~ /U/){
}
}
if (exists($filter_conn_op->{$srcnt_conn_op}) && defined($filter_conn_op->{$srcnt_conn_op})) {
- if ($usage =~ /u/) {
+ if ($verb eq "yes" || $usage =~ /u/) {
print " - Search Filter: $filter_conn_op->{$srcnt_conn_op}\n";
}
$uniqFilt{$filter_conn_op->{$srcnt_conn_op}}++;
| 0 |
dfdab6e74bf66a89094abd447633b6e5655e2c46
|
389ds/389-ds-base
|
[167982] Service Pack framework
Ported the patch making code to the trunk.
|
commit dfdab6e74bf66a89094abd447633b6e5655e2c46
Author: Noriko Hosoi <[email protected]>
Date: Thu Sep 22 16:48:16 2005 +0000
[167982] Service Pack framework
Ported the patch making code to the trunk.
diff --git a/ldap/Makefile b/ldap/Makefile
index 2af98bfae..ba9ea3c8e 100644
--- a/ldap/Makefile
+++ b/ldap/Makefile
@@ -62,7 +62,9 @@ ifneq ($(ARCH), WINNT)
cd systools; $(MAKE) $(MFLAGS) all
# new unix installer
ifeq ($(USE_SETUPUTIL), 1)
- cd cm/newinst; $(MAKE) $(MFLAGS) all
+ # passing $(OBJDIR) as ORIGINAL_OBJDIR since USE_64 info is cleaned up
+ # and lost in cm/newinst
+ cd cm/newinst; $(MAKE) $(MFLAGS) ORIGINAL_OBJDIR=$(ABS_OBJDIR) all
ifeq ($(USE_64),1)
# In 64-bit builds, we build the installer 32-bit, which has the side-effect that the uninstaller and ns-update scripts
# get copied into the 32-bit output directory by the makefile above. However, we later want to package them and expect
diff --git a/ldap/cm/Makefile b/ldap/cm/Makefile
index 8e01152c4..039a58057 100644
--- a/ldap/cm/Makefile
+++ b/ldap/cm/Makefile
@@ -214,6 +214,13 @@ RELJDK = $(BUILD_DRIVE)$(RELTOP)/ldapjdk
FIX_SETUP_INF = $(BUILD_ROOT)/ldap/cm/fixSetupInf.pl
FIX_BASE_INF = $(BUILD_ROOT)/ldap/cm/fixBaseInf.pl
+ABSBUILD_ROOT = $(shell cd $(BUILD_ROOT); pwd)
+ABSRELDIR = $(ABSBUILD_ROOT)/built/release
+GENRPMPATCH = $(ABSBUILD_ROOT)/ldap/cm/genRpmPatch.pl
+PATCHINF = $(ABSBUILD_ROOT)/ldap/cm/fedora-patch.inf
+DATETIME = $(shell date +%Y%m%d-%H%M%S)
+SPEXT = .SP.$(DATETIME)
+
# This is the directory where we put what we're making: the files which go on the CD.
ifndef INSTDIR
ifeq ($(ARCH), WINNT)
@@ -224,6 +231,11 @@ endif
endif
ABS_INSTDIR = $(shell cd $(INSTDIR); pwd)
+ifdef BUILD_PATCH
+PATCHINSTDIR = $(ABS_INSTDIR)-SP
+SLAPDSP = slapd-71
+endif
+
INST_TARGET_RESKIT=$(INSTDIR)/reskit
INST_TARGET_INTL=./$(PRODUCT_MARKET)dir
@@ -610,6 +622,15 @@ packageDirectory: $(INSTDIR)/slapd \
$(INSTDIR)/tools/infozip.zip \
$(ADMSERV_DEP)
+ifdef BUILD_PATCH
+ifdef BUILD_RPM
+# create a patch
+ $(GENRPMPATCH) -i $(RPM_BASE_NAME) -o $(NS_BUILD_FLAVOR) -r $(ABSRELDIR) -e $(SPEXT) -f $(PATCHINF) -v
+ mv $(ABSRELDIR)/slapd/$(NS_BUILD_FLAVOR) $(ABSRELDIR)/slapd/$(NS_BUILD_FLAVOR).original
+ ln -s $(ABSRELDIR)/slapd/$(NS_BUILD_FLAVOR)$(SPEXT)/opt/$(RPM_BASE_NAME)-ds $(ABSRELDIR)/slapd/$(NS_BUILD_FLAVOR)
+endif
+endif
+
# this gets setup, setup.inf, silent.inf, the zip wrapper, and svrcore, among others
ifeq ($(USE_SETUPUTIL),1)
cp -R $(SETUPUTIL_BINPATH)/* $(INSTDIR)
@@ -712,8 +733,10 @@ ifndef NO_INSTALLER_TAR_FILES
| gzip -f > ../$(NS_BUILD_FLAVOR).tar.gz
# build the combined packages tar file; use h flag to follow symlinks
ifdef BUILD_SHIP
+ifndef BUILD_PATCH
cd $(INSTDIR); $(TAR) cvfh - setup.inf setup slapd nsperl \
perldap dsktune tools $(ADMIN_IMPORTS) | gzip -f > $(BUILD_SHIP)/$(FTPNAMEGZ)
+endif
ifeq ($(DEBUG), optimize)
# $(REMSH) "/u/svbld/bin/preRtm $(BUILD_SHIP) $(FTPNAMEGZ) svbld"
endif
@@ -739,13 +762,44 @@ ifdef BUILD_RPM
rm $(RPM_FILE_BASE) ; \
fi
# execute the RPM build
- rpmbuild $(RPM_TOPDIR) $(RPM_SOURCEDIR) $(RPM_BUILDDIR) $(RPM_RPMDIR) $(RPM_SRPMDIR) --define "flavor $(RPM_FLAVOR)" --clean --nodeps -ba $(OBJDIR)/$(RPM_BASE_NAME)-ds.spec
+ rpmbuild $(RPM_TOPDIR) $(RPM_SOURCEDIR) $(RPM_BUILDDIR) $(RPM_RPMDIR) $(RPM_SRPMDIR) $(RPM_REQUIRES) --define "flavor $(RPM_FLAVOR)" --clean --nodeps -ba $(OBJDIR)/$(RPM_BASE_NAME)-ds.spec
ifdef BUILD_SHIP
cp $(ABS_TOPDIR)/$(RPM_FILE_BASE)*$(NSOS_ARCH)$(NSOS_RELEASE).$(RPM_ARCH).$(RPM_FLAVOR).rpm $(BUILD_SHIP)
endif # BUILD_SHIP
endif # BUILD_RPM
+ifdef BUILD_PATCH
+ mkdir -p $(PATCHINSTDIR)/$(SLAPDSP)
+ cp $(INSTDIR)/LICENSE.txt $(PATCHINSTDIR)
+ cp $(INSTDIR)/README.txt $(PATCHINSTDIR)
+# prepare the main inf file: setup.inf pointing inf file $(SLAPDS)/$(SLAPD).inf
+ sed -e "s/Components.*=/Components = $(SLAPDSP)/" $(INSTDIR)/setup.inf > $(PATCHINSTDIR)/setup.inf
+ echo "" >> $(PATCHINSTDIR)/setup.inf
+ echo "[$(SLAPDSP)]" >> $(PATCHINSTDIR)/setup.inf
+ echo "ComponentInfoFile = $(SLAPDSP)/$(SLAPDSP).inf" >> $(PATCHINSTDIR)/setup.inf
+# create a zip file based upon the $(PATCHINF) file
+ cd $(ABSRELDIR)/slapd/$(NS_BUILD_FLAVOR); zip -r $(PATCHINSTDIR)/$(SLAPDSP)/ns$(SLAPDSP).zip `egrep file: $(PATCHINF) | awk -F: '{print $$3}'`
+# put ns-config and needed libs in the $(PATCHINSTDIR)/$(SLAPDSP) directory
+ $(INSTALL) -m 755 $(RELDIR_32)/bin/slapd/admin/bin/ns-config $(PATCHINSTDIR)/$(SLAPDSP)
+ -@for file in $(PACKAGE_SETUP_LIBS_32) ; \
+ do if [ -f $$file ] ; \
+ then $(INSTALL) -m 755 $$file $(PATCHINSTDIR)/$(SLAPDSP) ; \
+ fi ; \
+ done
+# create patch inf file: $(SLAPD).inf
+ cp $(OBJDIR)/slapd-patch.inf $(PATCHINSTDIR)/$(SLAPDSP)/$(SLAPDSP).inf
+ cd $(ABSRELDIR)/slapd/$(NS_BUILD_FLAVOR); ls `egrep file: $(PATCHINF) | egrep -v "setup/setup" | awk -F: '{print $$3}'` > $(PATCHINSTDIR)/$(SLAPDSP)/$(SLAPDSP).inf.tmp
+ echo `cat $(PATCHINSTDIR)/$(SLAPDSP)/$(SLAPDSP).inf.tmp` | sed -e "s/ /,/g" > $(PATCHINSTDIR)/$(SLAPDSP)/$(SLAPDSP).inf.tmp2
+ echo "BackupFiles="`cat $(PATCHINSTDIR)/$(SLAPDSP)/$(SLAPDSP).inf.tmp2`>> $(PATCHINSTDIR)/$(SLAPDSP)/$(SLAPDSP).inf
+ rm -f $(PATCHINSTDIR)/$(SLAPDSP)/$(SLAPDSP).inf.tmp $(PATCHINSTDIR)/$(SLAPDSP)/$(SLAPDSP).inf.tmp2
+ifdef BUILD_SHIP
+ cd $(PATCHINSTDIR); $(TAR) cvfh - * | gzip -f > $(BUILD_SHIP)/$(FTPNAMEGZ)
+else
+ cd $(PATCHINSTDIR); $(TAR) cvfh - * | gzip -f > ../$(NS_BUILD_FLAVOR)-SP.tar.gz
+endif
+endif
+
else
# ---THE NT PACKAGE---
diff --git a/ldap/cm/fedora-patch.inf b/ldap/cm/fedora-patch.inf
new file mode 100644
index 000000000..be5c87a63
--- /dev/null
+++ b/ldap/cm/fedora-patch.inf
@@ -0,0 +1,47 @@
+#
+# BEGIN COPYRIGHT BLOCK
+# This Program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; version 2 of the License.
+#
+# This Program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
+# Place, Suite 330, Boston, MA 02111-1307 USA.
+#
+# In addition, as a special exception, Red Hat, Inc. gives You the additional
+# right to link the code of this Program with code not covered under the GNU
+# General Public License ("Non-GPL Code") and to distribute linked combinations
+# including the two, subject to the limitations in this paragraph. Non-GPL Code
+# permitted under this exception must only link to the code of this Program
+# through those well defined interfaces identified in the file named EXCEPTION
+# found in the source code files (the "Approved Interfaces"). The files of
+# Non-GPL Code may instantiate templates or use macros or inline functions from
+# the Approved Interfaces without causing the resulting work to be covered by
+# the GNU General Public License. Only Red Hat, Inc. may make changes or
+# additions to the list of Approved Interfaces. You must obey the GNU General
+# Public License in all respects for all of the Program code and other code used
+# in conjunction with the Program except the Non-GPL Code covered by this
+# exception. If you modify this file, you may extend this exception to your
+# version of the file, but you are not obligated to do so. If you do not wish to
+# provide this exception without modification, you must delete this exception
+# statement from your version and license this file solely under the GPL without
+# exception.
+#
+#
+# Copyright (C) 2005 Red Hat, Inc.
+# All rights reserved.
+# END COPYRIGHT BLOCK
+#
+# Sample Info file to generate service pack
+# base: <builddir> containing the base package -- e.g., DS7.1
+# file: <bugzilla number>: <patchfile>
+#
+base: /share/dev4/fedora-ds/fds71/ships/20050526.1
+
+file: 000001: README.txt
+file: 000002: COPYRIGHT.txt
+
diff --git a/ldap/cm/genRpmPatch.pl b/ldap/cm/genRpmPatch.pl
new file mode 100755
index 000000000..d8750a96f
--- /dev/null
+++ b/ldap/cm/genRpmPatch.pl
@@ -0,0 +1,231 @@
+#!/usr/bin/perl
+#
+# BEGIN COPYRIGHT BLOCK
+# This Program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; version 2 of the License.
+#
+# This Program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
+# Place, Suite 330, Boston, MA 02111-1307 USA.
+#
+# In addition, as a special exception, Red Hat, Inc. gives You the additional
+# right to link the code of this Program with code not covered under the GNU
+# General Public License ("Non-GPL Code") and to distribute linked combinations
+# including the two, subject to the limitations in this paragraph. Non-GPL Code
+# permitted under this exception must only link to the code of this Program
+# through those well defined interfaces identified in the file named EXCEPTION
+# found in the source code files (the "Approved Interfaces"). The files of
+# Non-GPL Code may instantiate templates or use macros or inline functions from
+# the Approved Interfaces without causing the resulting work to be covered by
+# the GNU General Public License. Only Red Hat, Inc. may make changes or
+# additions to the list of Approved Interfaces. You must obey the GNU General
+# Public License in all respects for all of the Program code and other code used
+# in conjunction with the Program except the Non-GPL Code covered by this
+# exception. If you modify this file, you may extend this exception to your
+# version of the file, but you are not obligated to do so. If you do not wish to
+# provide this exception without modification, you must delete this exception
+# statement from your version and license this file solely under the GPL without
+# exception.
+#
+#
+# Copyright (C) 2005 Red Hat, Inc.
+# All rights reserved.
+# END COPYRIGHT BLOCK
+#
+#
+#
+
+sub usage {
+ print(STDERR "Usage : $0 -r <releasedir> -o <objdir> -e <extension> -i <identity> -f <inffile>\n");
+ print(STDERR " -r <releasedir>: built/release dir\n");
+ print(STDERR " -o <objdir>: e.g., RHEL4-domestic-full-normal-pth-slapd\n");
+ print(STDERR " -e <extension>: extension for the patch dir\n");
+ print(STDERR " -i <identity>: fedora or redhat\n");
+ print(STDERR " -f <inffile>: file containing the patch info\n");
+ print(STDERR "sample <inffile>\n");
+ print(STDERR " ======================================================\n");
+ print(STDERR " base: /share/dev4/fedora-ds/fds71/ships/20050526.1\n");
+ print(STDERR " file: 147585: plugins/slapd/slapi/examples/testpreop.c\n");
+ print(STDERR " file: 164834,165641: bin/slapd/server/ns-slapd\n");
+ print(STDERR " ======================================================\n");
+}
+
+$verbose = 0;
+$inffile = "";
+$builtdirname = "";
+$releasedir = "";
+$extension = "";
+$identity = "";
+
+$i = 0;
+while ($i <= $#ARGV) {
+ if ("$ARGV[$i]" eq "-o") {
+ $i++;
+ $builtdirname = $ARGV[$i];
+ } elsif ("$ARGV[$i]" eq "-r") {
+ $i++;
+ $releasedir = $ARGV[$i];
+ } elsif ("$ARGV[$i]" eq "-e") {
+ $i++;
+ $extension = $ARGV[$i];
+ } elsif ("$ARGV[$i]" eq "-i") {
+ $i++;
+ $identity = $ARGV[$i];
+ } elsif ("$ARGV[$i]" eq "-f") {
+ $i++;
+ $inffile = $ARGV[$i];
+ } elsif ("$ARGV[$i]" eq "-v") {
+ $verbose = 1;
+ }
+ $i++;
+}
+
+if ("$builtdirname" eq "") {
+ print(STDERR "ERROR: builtdirname is not given\n");
+ &usage; exit(1);
+}
+if ("$releasedir" eq "") {
+ print(STDERR "ERROR: releasedir is not given\n");
+ &usage; exit(1);
+}
+if ("$extension" eq "") {
+ print(STDERR "ERROR: extension is not given\n");
+ &usage; exit(1);
+}
+if ("$identity" eq "" ||
+ (("$identity" ne "fedora") && ("$identity" ne "redhat"))) {
+ print(STDERR "ERROR: $identity is not fedora or redhat\n");
+ &usage; exit(1);
+}
+if ("$inffile" eq "") {
+ print(STDERR "ERROR: inffile is not given\n");
+ &usage; exit(1);
+}
+if (!(-d "$releasedir")) {
+ print(STDERR "ERROR: $releasedir does not exist\n");
+ exit(1);
+}
+
+unless (open (INFFILE, $inffile)) {
+ die "Error, cannot open info file $inffile\n";
+}
+
+$basedir = 0;
+@newfiles = ();
+while ($l = <INFFILE>) {
+ chop($l);
+ $pos = length($l);
+ if ($l =~ /^base: /) {
+ $pos = rindex($l, ":", $pos);
+ $pos++;
+ $basedir = substr($l, $pos);
+ $basedir =~ s/[ ]//g;
+ } elsif ($l =~ /^file: /) {
+ $pos = rindex($l, ":", $pos);
+ $pos++;
+ $file = substr($l, $pos);
+ $file =~ s/[ ]//g;
+ push(@newfiles, ($file));
+ }
+}
+if (1 == $verbose) {
+ print "Base: $basedir\n";
+ print "New Files:\n";
+ foreach $afile (@newfiles) {
+ print " $afile\n";
+ }
+}
+
+if ($builtdirname !~ /RHEL/) {
+ print(STDERR "ERROR: Not RHEL\n");
+ exit(1);
+}
+
+# Get info from $builtdirname (e.g., RHEL4-domestic-full-normal-pth-slapd\n")
+$rhelversion = "";
+$rhelversionl = "";
+if ($builtdirname =~ /RHEL3/) {
+ $rhelversion = "RHEL3";
+ $rhelversionl = "rhel3";
+} elsif ($builtdirname =~ /RHEL4/) {
+ $rhelversion = "RHEL4";
+ $rhelversionl = "rhel4";
+} else {
+ print(STDERR "ERROR: $builtdirname is not supported\n");
+ exit(1);
+}
+
+$optordbg = "";
+if ($builtdirname =~ /full/) {
+ $optordbg = "dbg";
+} elsif ($builtdirname =~ /optimize/) {
+ $optordbg = "opt";
+} else {
+ print(STDERR "ERROR: $builtdirname has no opt/debug info\n");
+ exit(1);
+}
+
+# Get fullpath to the RPM file
+$fullrpmfile = "";
+$iddir = "";
+opendir(BASEDIR, $basedir) or die "ERROR: Could not open $basedir\n";
+while ( defined ( $subdir = readdir(BASEDIR))) {
+ if ($subdir =~ /$rhelversionl/ || $subdir =~ /$rhelversion/) {
+ $fullsubdir = $basedir . "/" . $subdir;
+ opendir(SUBDIR, $fullsubdir) or die "ERROR: Could not open $fullsubdir\n";
+ while ( defined ( $rpmfile = readdir(SUBDIR))) {
+ if (($rpmfile =~ /$rhelversionl/ || $rpmfile =~ /$rhelversion/) &&
+ $rpmfile =~ /$optordbg/ && $rpmfile =~ /\.rpm$/) {
+ $fullrpmfile = $fullsubdir . "/" . $rpmfile;
+ ($org, $ds, $rest) = split('-', $rpmfile, 3);
+ $iddir = $org . "-" . $ds;
+ if ("$org" ne "$identity") {
+ print "ERROR: rpmfile name $rpmfile does not match the given identity $identity\n";
+ exit(1);
+ }
+ closedir(SUBDIR);
+ last;
+ }
+ }
+ closedir(BASEDIR);
+ last;
+ }
+}
+if ("$fullrpmfile" eq "") {
+ print(STDERR "ERROR: Cannot file an rpm file under $basedir\n");
+ exit(1);
+}
+if (1 == $verbose) {
+ print "RPM File: $fullrpmfile\n";
+}
+
+# Expand the RPM file to the $releasedir
+$workdir = $releasedir . "/slapd/" . $builtdirname . $extension;
+mkdir($workdir, 0700);
+chdir($workdir);
+if (1 == $verbose) {
+ print "Work Dir: $workdir\n";
+}
+open(RPM2CPIO, "rpm2cpio $fullrpmfile | cpio -id | ") or die "Cannot run program: $!\n";
+close(RPM2CPIO);
+
+# Copy new files onto the expanded files
+foreach $afile (@newfiles) {
+ $srcfile = $releasedir . "/slapd/" . $builtdirname . "/" . $afile;
+ $destfile = $workdir . "/opt/" . $iddir . "/" . $afile;
+ $destdir = substr($destfile, 0, rindex($destfile, "/", length($destfile)));
+ if (!(-d $destdir)) {
+ print "WARNING: $destdir does not exist. Skipping ...\n";
+ next;
+ }
+ if (1 == $verbose) {
+ print "Copy: $srcfile => $destdir\n";
+ }
+ open(COPY, "cp $srcfile $destdir | ") or print "Copy $srcfile to $destdir failed: $!\n";
+ close(COPY);
+}
diff --git a/ldap/cm/newinst/Makefile b/ldap/cm/newinst/Makefile
index 518862f42..b4442593b 100644
--- a/ldap/cm/newinst/Makefile
+++ b/ldap/cm/newinst/Makefile
@@ -96,6 +96,7 @@ else
BINS=$(addprefix $(BINDEST)/, $(PROGS))
endif
INFO= $(OBJDIR)/$(DIR)
+PATCHINFO= $(ORIGINAL_OBJDIR)/$(DIR)
# Source for staged installation utilities
INCDIR=$(SETUPUTIL_INCLUDE) -I$(LDAP_SRC)/admin/include -I$(LDAP_SRC)/admin/lib -I$(LDAP_SRC)/admin/src
@@ -177,10 +178,13 @@ $(BINDEST)/ns-config: $(OBJS1) $(OBJS2)
ifeq ($(ARCH), WINNT)
$(INFO):
- $(PERL) fixINF.pl $(BUILD_MODULE) $(DIR_VERSION) $(BUILD_ROOT)/$(BUILD_ARCH)/buildnum.dat slapd.inf $(SECURITY) $(PRODUCT) $(IS_DIR_LITE) $(INSTANCE_NAME_PREFIX) [email protected] $(BUILD_BOMB) "bin/admin/ns-admin,bin/admin/ns-admin.so"
+ $(PERL) fixINF.pl $(BUILD_MODULE) $(NOSP_DIR_VERSION) $(BUILD_ROOT)/$(BUILD_ARCH)/buildnum.dat slapd.inf $(SECURITY) $(PRODUCT) $(IS_DIR_LITE) $(INSTANCE_NAME_PREFIX) [email protected] $(BUILD_BOMB) "bin/admin/ns-admin,bin/admin/ns-admin.so"
else
$(INFO):
- $(PERL) fixINF.pl $(BUILD_MODULE) $(DIR_VERSION) $(BUILD_ROOT)/$(BUILD_ARCH)/buildnum.dat slapd.inf $(SECURITY) $(PRODUCT) $(IS_DIR_LITE) $(INSTANCE_NAME_PREFIX) [email protected] $(BUILD_BOMB) "$(addprefix lib/,$(LDAP_SOLIBS)) bin/admin/libnsslapd.sl bin/slapd/bin/ns-slapd bin/slapd/bin/ns-slapd.so bin/slapd/bin/libnsslapd_shr.a"
+ $(PERL) fixINF.pl $(BUILD_MODULE) $(NOSP_DIR_VERSION) $(BUILD_ROOT)/$(BUILD_ARCH)/buildnum.dat slapd.inf $(SECURITY) $(PRODUCT) $(IS_DIR_LITE) $(INSTANCE_NAME_PREFIX) [email protected] $(BUILD_BOMB) "$(addprefix lib/,$(LDAP_SOLIBS)) bin/admin/libnsslapd.sl bin/slapd/bin/ns-slapd bin/slapd/bin/ns-slapd.so bin/slapd/bin/libnsslapd_shr.a"
+ifdef BUILD_PATCH
+ $(PERL) fixINF.pl $(BUILD_MODULE) $(NOSP_DIR_VERSION) $(BUILD_ROOT)/$(BUILD_ARCH)/buildnum.dat slapd-patch.inf $(SECURITY) $(PRODUCT_NAME) $(IS_DIR_LITE) $(INSTANCE_NAME_PREFIX) $(PATCHINFO)-patch.inf $(BUILD_BOMB) "$(addprefix lib/,$(LDAP_SOLIBS)) bin/admin/libnsslapd.sl bin/slapd/bin/ns-slapd bin/slapd/bin/ns-slapd.so bin/slapd/bin/libnsslapd_shr.a"
+endif
endif
$(OBJDEST)/%.o: %.c
diff --git a/ldap/cm/newinst/slapd-patch.inf b/ldap/cm/newinst/slapd-patch.inf
new file mode 100644
index 000000000..27559a2c3
--- /dev/null
+++ b/ldap/cm/newinst/slapd-patch.inf
@@ -0,0 +1,58 @@
+#
+# BEGIN COPYRIGHT BLOCK
+# This Program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; version 2 of the License.
+#
+# This Program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
+# Place, Suite 330, Boston, MA 02111-1307 USA.
+#
+# In addition, as a special exception, Red Hat, Inc. gives You the additional
+# right to link the code of this Program with code not covered under the GNU
+# General Public License ("Non-GPL Code") and to distribute linked combinations
+# including the two, subject to the limitations in this paragraph. Non-GPL Code
+# permitted under this exception must only link to the code of this Program
+# through those well defined interfaces identified in the file named EXCEPTION
+# found in the source code files (the "Approved Interfaces"). The files of
+# Non-GPL Code may instantiate templates or use macros or inline functions from
+# the Approved Interfaces without causing the resulting work to be covered by
+# the GNU General Public License. Only Red Hat, Inc. may make changes or
+# additions to the list of Approved Interfaces. You must obey the GNU General
+# Public License in all respects for all of the Program code and other code used
+# in conjunction with the Program except the Non-GPL Code covered by this
+# exception. If you modify this file, you may extend this exception to your
+# version of the file, but you are not obligated to do so. If you do not wish to
+# provide this exception without modification, you must delete this exception
+# statement from your version and license this file solely under the GPL without
+# exception.
+#
+#
+# Copyright (C) 2005 Red Hat, Inc.
+# All rights reserved.
+# END COPYRIGHT BLOCK
+#
+[General]
+Name= %%%SERVER_NAME%%%
+Components=slapd-71
+
+[slapd-71]
+Name= %%%SERVER_NAME%%%
+InstanceNamePrefix= %%%INSTANCE_NAME_PREFIX%%%
+Description= %%%SERVER_NAME%%%
+NickName= slapd-71
+Version= %%%SERVER_VERSION%%%
+BuildNumber= %%%SERVER_BUILD_NUM%%%
+Archive= nsslapd-71.zip
+SourcePath=slapd-71
+PreInstall= ns-config -r
+PostInstall= bin/slapd/admin/bin/ns-update
+PreUninstall= bin/slapd/admin/bin/uninstall
+PostUninstall=
+Checked=True
+Mandatory=False
+IsLdap=True
diff --git a/ldap/cm/redhat-patch.inf b/ldap/cm/redhat-patch.inf
new file mode 100644
index 000000000..be5c87a63
--- /dev/null
+++ b/ldap/cm/redhat-patch.inf
@@ -0,0 +1,47 @@
+#
+# BEGIN COPYRIGHT BLOCK
+# This Program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; version 2 of the License.
+#
+# This Program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
+# Place, Suite 330, Boston, MA 02111-1307 USA.
+#
+# In addition, as a special exception, Red Hat, Inc. gives You the additional
+# right to link the code of this Program with code not covered under the GNU
+# General Public License ("Non-GPL Code") and to distribute linked combinations
+# including the two, subject to the limitations in this paragraph. Non-GPL Code
+# permitted under this exception must only link to the code of this Program
+# through those well defined interfaces identified in the file named EXCEPTION
+# found in the source code files (the "Approved Interfaces"). The files of
+# Non-GPL Code may instantiate templates or use macros or inline functions from
+# the Approved Interfaces without causing the resulting work to be covered by
+# the GNU General Public License. Only Red Hat, Inc. may make changes or
+# additions to the list of Approved Interfaces. You must obey the GNU General
+# Public License in all respects for all of the Program code and other code used
+# in conjunction with the Program except the Non-GPL Code covered by this
+# exception. If you modify this file, you may extend this exception to your
+# version of the file, but you are not obligated to do so. If you do not wish to
+# provide this exception without modification, you must delete this exception
+# statement from your version and license this file solely under the GPL without
+# exception.
+#
+#
+# Copyright (C) 2005 Red Hat, Inc.
+# All rights reserved.
+# END COPYRIGHT BLOCK
+#
+# Sample Info file to generate service pack
+# base: <builddir> containing the base package -- e.g., DS7.1
+# file: <bugzilla number>: <patchfile>
+#
+base: /share/dev4/fedora-ds/fds71/ships/20050526.1
+
+file: 000001: README.txt
+file: 000002: COPYRIGHT.txt
+
diff --git a/nsdefs.mk b/nsdefs.mk
index 3bb5c5911..abf7b9a80 100644
--- a/nsdefs.mk
+++ b/nsdefs.mk
@@ -238,7 +238,8 @@ else
NSPR_DIR=nspr
endif
NSPR_BASENAME=libnspr21
-PRODUCT="Fedora Directory Server"
+PRODUCTCORE=Fedora Directory Server
+PRODUCT="$(PRODUCTCORE)"
PRODUCT_IS_DIRECTORY_SERVER=1
INSTANCE_NAME_PREFIX="Directory Server"
DIR=slapd
@@ -267,6 +268,7 @@ DO_SEARCH=no
DIR_VERSION:=7.1
NOSP_DIR_VERSION:=7.1
DIR_NORM_VERSION:=7.1
+PRODUCT_NAME="$(PRODUCTCORE) $(DIR_VERSION)"
# When you change DIRSDK_VERSION or DIRSDK_VERSION_DLL_SUFFIX, you must
# update all of the .exp and .def files by executing the following command:
# cd ldap/libraries; gmake exportfiles
| 0 |
275291284c66794cce7dab07a2d2daea33c2e947
|
389ds/389-ds-base
|
Resolves: #214728
Summary: Cleaning up obsolete macros in the build
Changes: eliminated macro CLIENT_AUTH (Comment #12)
|
commit 275291284c66794cce7dab07a2d2daea33c2e947
Author: Noriko Hosoi <[email protected]>
Date: Fri Nov 10 01:40:44 2006 +0000
Resolves: #214728
Summary: Cleaning up obsolete macros in the build
Changes: eliminated macro CLIENT_AUTH (Comment #12)
diff --git a/include/libaccess/nsauth.h b/include/libaccess/nsauth.h
index 91e5d64bb..360485db9 100644
--- a/include/libaccess/nsauth.h
+++ b/include/libaccess/nsauth.h
@@ -48,16 +48,9 @@
* into and out of authentication interface functions.
*/
-#ifdef CLIENT_AUTH
-
#include "ssl.h"
-#if 0
-/* Removed for new ns security */
-#include "sec.h" /* SECCertificate */
-#endif
#include "cert.h" /* CERTCertificate for new ns security bin */
-#endif /* CLIENT_AUTH */
#include "usi.h" /* identifier list support */
#include "attrec.h" /* attribute record types */
@@ -308,13 +301,7 @@ struct ClAuth_s {
char * cla_dns; /* DNS name string pointer */
UserObj_t * cla_uoptr; /* authenticated user object pointer */
GroupObj_t * cla_goptr; /* pointer to list of group objects */
-#ifdef CLIENT_AUTH
-#if 0
- /* Removed for new ns security */
- SECCertificate * cla_cert; /* certificate from SSL client auth */
-#endif
CERTCertificate * cla_cert; /* certificate from SSL client auth */
-#endif /* CLIENT_AUTH */
};
#endif /* __nsauth_h */
diff --git a/include/libaccess/nsautherr.h b/include/libaccess/nsautherr.h
index 43b302256..a73b65399 100644
--- a/include/libaccess/nsautherr.h
+++ b/include/libaccess/nsautherr.h
@@ -71,7 +71,6 @@
#define NSAUERR3320 3320 /* insufficient dynamic memory */
#define NSAUERR3340 3340 /* error opening group database */
-#if defined(CLIENT_AUTH)
/* nsadbOpenClients() */
#define NSAUERR3400 3400 /* invalid function argument */
#define NSAUERR3420 3420 /* insufficient dynamic memory */
@@ -94,8 +93,6 @@
/* nsadbRemoveCertUser() */
#define NSAUERR3900 3900 /* error deleting entry in database */
-#endif /* defined(CLIENT_AUTH) */
-
/* Define error ids generated by nsamgmt.c */
/* nsadbRemoveUser() */
| 0 |
7483341432b1a7c3d8448ff3b3e01b09d0540bc7
|
389ds/389-ds-base
|
Bump version to 1.4.1.5
|
commit 7483341432b1a7c3d8448ff3b3e01b09d0540bc7
Author: Mark Reynolds <[email protected]>
Date: Mon Jul 8 15:23:01 2019 -0400
Bump version to 1.4.1.5
diff --git a/VERSION.sh b/VERSION.sh
index 48b99bdcf..e9e12a2b7 100644
--- a/VERSION.sh
+++ b/VERSION.sh
@@ -10,7 +10,7 @@ vendor="389 Project"
# PACKAGE_VERSION is constructed from these
VERSION_MAJOR=1
VERSION_MINOR=4
-VERSION_MAINT=1.4
+VERSION_MAINT=1.5
# NOTE: VERSION_PREREL is automatically set for builds made out of a git tree
VERSION_PREREL=
VERSION_DATE=$(date -u +%Y%m%d)
| 0 |
1c5f0605e2d3be4a56b4d3aede4c02e11256b15b
|
389ds/389-ds-base
|
Ticket 50184 - Add cli tool parity to dsconf/dsctl
Bug Description: As we are removing the shell/perl tools, we need
to have functional parity with the existing tools. This adds the
final tools needed to make that equivalent.
Fix Description: Add support for dbverify, linkedattr fixup and
a monitoring tool.
https://pagure.io/389-ds-base/issue/50184
Author: William Brown <[email protected]>
Review by: mreynolds (thanks!)
|
commit 1c5f0605e2d3be4a56b4d3aede4c02e11256b15b
Author: William Brown <[email protected]>
Date: Thu Jan 31 12:25:51 2019 +1000
Ticket 50184 - Add cli tool parity to dsconf/dsctl
Bug Description: As we are removing the shell/perl tools, we need
to have functional parity with the existing tools. This adds the
final tools needed to make that equivalent.
Fix Description: Add support for dbverify, linkedattr fixup and
a monitoring tool.
https://pagure.io/389-ds-base/issue/50184
Author: William Brown <[email protected]>
Review by: mreynolds (thanks!)
diff --git a/src/lib389/cli/dsconf b/src/lib389/cli/dsconf
index b22736ed6..a844fca3b 100755
--- a/src/lib389/cli/dsconf
+++ b/src/lib389/cli/dsconf
@@ -23,6 +23,7 @@ from lib389.cli_conf import directory_manager as cli_directory_manager
from lib389.cli_conf import plugin as cli_plugin
from lib389.cli_conf import schema as cli_schema
from lib389.cli_conf import health as cli_health
+from lib389.cli_conf import monitor as cli_monitor
from lib389.cli_conf import saslmappings as cli_sasl
from lib389.cli_conf import pwpolicy as cli_pwpolicy
from lib389.cli_conf import backup as cli_backup
@@ -78,6 +79,7 @@ cli_chaining.create_parser(subparsers)
cli_config.create_parser(subparsers)
cli_directory_manager.create_parsers(subparsers)
cli_health.create_parser(subparsers)
+cli_monitor.create_parser(subparsers)
cli_plugin.create_parser(subparsers)
cli_pwpolicy.create_parser(subparsers)
cli_replication.create_parser(subparsers)
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 974e2189a..fb987e007 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -3091,6 +3091,32 @@ class DirSrv(SimpleLDAPObject, object):
return output
+ def dbverify(self, bename):
+ """
+ @param bename - the backend name to verify
+ @return - True if the verify succeded
+ """
+ prog = os.path.join(self.ds_paths.sbin_dir, 'ns-slapd')
+
+ if self.status():
+ self.log.error("dbverify: Can not operate while directory server is running")
+ return False
+
+ cmd = [
+ prog,
+ 'dbverify',
+ '-D', self.get_config_dir(),
+ '-n', bename
+ ]
+
+ try:
+ result = subprocess.check_output(cmd, encoding='utf-8')
+ except subprocess.CalledProcessError as e:
+ self.log.debug("Command: %s failed with the return code %s and the error %s",
+ format_cmd_list(cmd), e.returncode, e.output)
+ return False
+ return True
+
def searchAccessLog(self, pattern):
"""
Search all the access logs
diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
index eae3154ce..1dd882566 100644
--- a/src/lib389/lib389/_mapped_object.py
+++ b/src/lib389/lib389/_mapped_object.py
@@ -491,6 +491,18 @@ class DSLdapObject(DSLogging):
entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=keys, serverctrls=self._server_controls, clientctrls=self._client_controls)[0]
return entry.getValuesSet(keys)
+ def get_attrs_vals_utf8(self, keys, use_json=False):
+ self._log.debug("%s get_attrs_vals_utf8(%r)" % (self._dn, keys))
+ if self._instance.state != DIRSRV_STATE_ONLINE:
+ raise ValueError("Invalid state. Cannot get properties on instance that is not ONLINE")
+ entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=keys, serverctrls=self._server_controls, clientctrls=self._client_controls)[0]
+ vset = entry.getValuesSet(keys)
+ r = {}
+ for (k, vo) in vset.items():
+ r[k] = ensure_list_str(vo)
+ return r
+
+
def get_attr_vals(self, key, use_json=False):
self._log.debug("%s get_attr_vals(%r)" % (self._dn, key))
# We might need to add a state check for NONE dn.
diff --git a/src/lib389/lib389/cli_base/__init__.py b/src/lib389/lib389/cli_base/__init__.py
index 14cb99944..6175cc30a 100644
--- a/src/lib389/lib389/cli_base/__init__.py
+++ b/src/lib389/lib389/cli_base/__init__.py
@@ -366,7 +366,7 @@ def setup_script_logger(name, verbose=False):
"""
root = logging.getLogger()
log = logging.getLogger(name)
- log_handler = logging.StreamHandler()
+ log_handler = logging.StreamHandler(sys.stdout)
if verbose:
log.setLevel(logging.DEBUG)
diff --git a/src/lib389/lib389/cli_conf/__init__.py b/src/lib389/lib389/cli_conf/__init__.py
index 1ba3b4af9..836e05d33 100644
--- a/src/lib389/lib389/cli_conf/__init__.py
+++ b/src/lib389/lib389/cli_conf/__init__.py
@@ -70,33 +70,33 @@ def generic_object_edit(dsldap_object, log, args, arg_to_attr):
def generic_show(inst, basedn, log, args):
"""Display plugin configuration."""
plugin = args.plugin_cls(inst)
- print(plugin.display())
+ log.info(plugin.display())
def generic_enable(inst, basedn, log, args):
plugin = args.plugin_cls(inst)
if plugin.status():
- print("Plugin '%s' already enabled", plugin.rdn)
+ log.info("Plugin '%s' already enabled" % plugin.rdn)
else:
plugin.enable()
- print("Enabled plugin '%s'", plugin.rdn)
+ log.info("Enabled plugin '%s'" % plugin.rdn)
def generic_disable(inst, basedn, log, args):
plugin = args.plugin_cls(inst)
if not plugin.status():
- print("Plugin '%s' already disabled", plugin.rdn)
+ log.info("Plugin '%s' already disabled" % plugin.rdn)
else:
plugin.disable()
- print("Disabled plugin '%s'", plugin.rdn)
+ log.info("Disabled plugin '%s'" % plugin.rdn)
def generic_status(inst, basedn, log, args):
plugin = args.plugin_cls(inst)
if plugin.status() is True:
- print("Plugin '%s' is enabled", plugin.rdn)
+ log.info("Plugin '%s' is enabled" % plugin.rdn)
else:
- print("Plugin '%s' is disabled", plugin.rdn)
+ log.info("Plugin '%s' is disabled" % plugin.rdn)
def add_generic_plugin_parsers(subparser, plugin_cls):
diff --git a/src/lib389/lib389/cli_conf/monitor.py b/src/lib389/lib389/cli_conf/monitor.py
new file mode 100644
index 000000000..9e8938138
--- /dev/null
+++ b/src/lib389/lib389/cli_conf/monitor.py
@@ -0,0 +1,64 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2019 William Brown <[email protected]>
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+
+from lib389.monitor import Monitor, MonitorLDBM
+from lib389.backend import Backends
+
+from lib389.utils import ensure_str
+
+def _format_status(log, mtype, json=False):
+ if json:
+ print(mtype.get_status_json())
+ else:
+ status_dict = mtype.get_status()
+ log.info('dn: ' + mtype._dn)
+ for k, v in list(status_dict.items()):
+ # For each value in the multivalue attr
+ for vi in v:
+ log.info('{}: {}'.format(k, vi))
+
+
+def monitor(inst, basedn, log, args):
+ monitor = Monitor(inst)
+ _format_status(log, monitor, args.json)
+
+
+def backend_monitor(inst, basedn, log, args):
+ bes = Backends(inst)
+ if args.backend:
+ be = bes.get(args.backend)
+ be_monitor = be.get_monitor()
+ be_monitor.get_status()
+ else:
+ for be in bes.list():
+ be_monitor = be.get_monitor()
+ be_monitor.get_status()
+ # Inejct a new line for now ... see https://pagure.io/389-ds-base/issue/50189
+ print("")
+
+
+def ldbm_monitor(inst, basedn, log, args):
+ ldbm_monitor = MonitorLDBM(inst)
+ ldbm_monitor.get_status()
+
+
+def create_parser(subparsers):
+ monitor_parser = subparsers.add_parser('monitor', help="Monitor the state of the instance")
+
+ subcommands = monitor_parser.add_subparsers(help='action')
+
+ server_parser = subcommands.add_parser('server', help="Monitor the server statistics, connectinos and operations")
+ server_parser.set_defaults(func=monitor)
+
+ ldbm_parser = subcommands.add_parser('ldbm', help="Monitor the ldbm statistics, such as dbcache")
+ ldbm_parser.set_defaults(func=ldbm_monitor)
+
+ backend_parser = subcommands.add_parser('backend', help="Monitor the behaviour of a backend database")
+ backend_parser.add_argument('backend', nargs='?', help="Optional name of the backend to monitor")
+ backend_parser.set_defaults(func=backend_monitor)
+
diff --git a/src/lib389/lib389/cli_conf/plugins/linkedattr.py b/src/lib389/lib389/cli_conf/plugins/linkedattr.py
index 7581e805c..3f9a6acb9 100644
--- a/src/lib389/lib389/cli_conf/plugins/linkedattr.py
+++ b/src/lib389/lib389/cli_conf/plugins/linkedattr.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2018 Red Hat, Inc.
+# Copyright (C) 2019 William Brown <[email protected]>
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -10,7 +10,26 @@ from lib389.plugins import LinkedAttributesPlugin
from lib389.cli_conf import add_generic_plugin_parsers
+def fixup(inst, basedn, log, args):
+ plugin = LinkedAttributesPlugin(inst)
+ log.info('Attempting to add task entry... This will fail if LinkedAttributes plug-in is not enabled.')
+ if not plugin.status():
+ log.error("'%s' is disabled. Fix up task can't be executed" % plugin.rdn)
+ fixup_task = plugin.fixup(args.basedn, args.filter)
+ fixup_task.wait()
+ exitcode = fixup_task.get_exit_code()
+ if exitcode != 0:
+ log.error('LinkedAttributes fixup task for %s has failed. Please, check logs')
+ else:
+ log.info('Successfully added fixup task')
+
+
def create_parser(subparsers):
linkedattr_parser = subparsers.add_parser('linkedattr', help='Manage and configure Linked Attributes plugin')
subcommands = linkedattr_parser.add_subparsers(help='action')
add_generic_plugin_parsers(subcommands, LinkedAttributesPlugin)
+
+ fixup_parser = subcommands.add_parser('fixup', help='Run the fix-up task for linked attributes plugin')
+ fixup_parser.add_argument('basedn', help="basedn that contains entries to fix up")
+ fixup_parser.add_argument('-f', '--filter', help='Filter for entries to fix up linked attributes.')
+ fixup_parser.set_defaults(func=fixup)
diff --git a/src/lib389/lib389/cli_conf/plugins/memberof.py b/src/lib389/lib389/cli_conf/plugins/memberof.py
index 0ccbed315..90fb77412 100644
--- a/src/lib389/lib389/cli_conf/plugins/memberof.py
+++ b/src/lib389/lib389/cli_conf/plugins/memberof.py
@@ -1,5 +1,6 @@
# --- BEGIN COPYRIGHT BLOCK ---
# Copyright (C) 2018 Red Hat, Inc.
+# Copyright (C) 2019 William Brown <[email protected]>
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -74,12 +75,15 @@ def memberof_del_config(inst, basedn, log, args):
def fixup(inst, basedn, log, args):
plugin = MemberOfPlugin(inst)
log.info('Attempting to add task entry... This will fail if MemberOf plug-in is not enabled.')
- assert plugin.status(), "'%s' is disabled. Fix up task can't be executed" % plugin.rdn
+ if not plugin.status():
+ log.error("'%s' is disabled. Fix up task can't be executed" % plugin.rdn)
fixup_task = plugin.fixup(args.DN, args.filter)
fixup_task.wait()
exitcode = fixup_task.get_exit_code()
- assert exitcode == 0, 'MemberOf fixup task for %s has failed. Please, check logs'
- log.info('Successfully added task entry for %s', args.DN)
+ if exitcode != 0:
+ log.error('MemberOf fixup task for %s has failed. Please, check logs')
+ else:
+ log.info('Successfully added task entry')
def _add_parser_args(parser):
diff --git a/src/lib389/lib389/cli_ctl/dbtasks.py b/src/lib389/lib389/cli_ctl/dbtasks.py
index 7854fbe2c..0b6e948d2 100644
--- a/src/lib389/lib389/cli_ctl/dbtasks.py
+++ b/src/lib389/lib389/cli_ctl/dbtasks.py
@@ -1,5 +1,6 @@
# --- BEGIN COPYRIGHT BLOCK ---
# Copyright (C) 2016 Red Hat, Inc.
+# Copyright (C) 2019 William Brown <[email protected]>
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -65,6 +66,13 @@ def dbtasks_backups(inst, log, args):
log.info("backups successful")
+def dbtasks_verify(inst, log, args):
+ if not inst.dbverify(bename=args.backend):
+ log.fatal("dbverify failed")
+ return False
+ else:
+ log.info("dbverify successful")
+
def create_parser(subcommands):
db2index_parser = subcommands.add_parser('db2index', help="Initialise a reindex of the server database. The server must be stopped for this to proceed.")
@@ -85,6 +93,11 @@ def create_parser(subcommands):
db2ldif_parser.add_argument('--encrypted', help="Export encrypted attributes", default=False, action='store_true')
db2ldif_parser.set_defaults(func=dbtasks_db2ldif)
+ dbverify_parser = subcommands.add_parser('dbverify', help="Perform a db verification. You should only do this at direction of support")
+ dbverify_parser.add_argument('backend', help="The backend to verify. IE userRoot")
+ dbverify_parser.set_defaults(func=dbtasks_verify)
+
+
bak2db_parser = subcommands.add_parser('bak2db', help="Restore a BDB backup of the database. The server must be stopped for this to proceed.")
bak2db_parser.add_argument('archive', help="The archive to restore. This will erase all current server databases.")
bak2db_parser.set_defaults(func=dbtasks_bak2db)
diff --git a/src/lib389/lib389/monitor.py b/src/lib389/lib389/monitor.py
index 0102630a7..325261c69 100644
--- a/src/lib389/lib389/monitor.py
+++ b/src/lib389/lib389/monitor.py
@@ -85,6 +85,26 @@ class Monitor(DSLdapObject):
starttime = self.get_attr_vals_utf8('starttime')
return (dtablesize, readwaiters, entriessent, bytessent, currenttime, starttime)
+ def get_status(self, use_json=False):
+ return self.get_attrs_vals_utf8([
+ 'version',
+ 'threads',
+ 'connection',
+ 'currentconnections',
+ 'totalconnections',
+ 'currentconnectionsatmaxthreads',
+ 'maxthreadsperconnhits',
+ 'dtablesize',
+ 'readwaiters',
+ 'opsinitiated',
+ 'opscompleted',
+ 'entriessent',
+ 'bytessent',
+ 'currenttime',
+ 'starttime',
+ 'nbackends',
+ ])
+
class MonitorLDBM(DSLdapObject):
def __init__(self, instance, dn=None):
@@ -155,6 +175,8 @@ class MonitorBackend(DSLdapObject):
'currentnormalizeddncachecount'
])
+ # Issue: get status should return a dict and the called should be
+ # formatting it. See: https://pagure.io/389-ds-base/issue/50189
def get_status(self, use_json=False):
if use_json:
print(self.get_attrs_vals_json(self._backend_keys))
| 0 |
4cb3e1d280133d47919b11e52d33b8d375f2d459
|
389ds/389-ds-base
|
Bug 750625 - Fix Coverity (11058, 11059) Dereference null return value
https://bugzilla.redhat.com/show_bug.cgi?id=750625
plugins/referint/referint.c (_update_one_per_mod, _update_all_per_mod)
Bug Description: Dereferencing a null pointer "dnParts".
Missing a check of the NULL possibilities for origDN and exploded origDN.
Fix Description: check if origDN and dnParts are NULL or not.
If NULL, it does not go forward, but goto bail.
|
commit 4cb3e1d280133d47919b11e52d33b8d375f2d459
Author: Noriko Hosoi <[email protected]>
Date: Tue Nov 1 17:00:15 2011 -0700
Bug 750625 - Fix Coverity (11058, 11059) Dereference null return value
https://bugzilla.redhat.com/show_bug.cgi?id=750625
plugins/referint/referint.c (_update_one_per_mod, _update_all_per_mod)
Bug Description: Dereferencing a null pointer "dnParts".
Missing a check of the NULL possibilities for origDN and exploded origDN.
Fix Description: check if origDN and dnParts are NULL or not.
If NULL, it does not go forward, but goto bail.
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
index 57ee6dfd0..9c655ca79 100644
--- a/ldap/servers/plugins/referint/referint.c
+++ b/ldap/servers/plugins/referint/referint.c
@@ -394,8 +394,19 @@ _update_one_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
int nval = 0;
Slapi_Value *v = NULL;
+ if (NULL == origDN) {
+ slapi_log_error(SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM,
+ "_update_one_value: NULL dn was passed\n");
+ goto bail;
+ }
/* need to put together rdn into a dn */
dnParts = slapi_ldap_explode_dn( origDN, 0 );
+ if (NULL == dnParts) {
+ slapi_log_error(SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM,
+ "_update_one_value: failed to explode dn %s\n",
+ origDN);
+ goto bail;
+ }
if (NULL == newRDN) {
newRDN = dnParts[0];
}
@@ -510,7 +521,7 @@ _update_one_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
}
slapi_ch_free_string(&newDN);
}
-
+bail:
return rc;
}
@@ -554,7 +565,7 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
rc = _do_modify(mod_pb, entrySDN, mods, txn);
if (rc) {
slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM,
- "_update_one_value: entry %s: deleting \"%s: %s\" failed (%d)"
+ "_update_all_per_mod: entry %s: deleting \"%s: %s\" failed (%d)"
"\n", slapi_sdn_get_dn(entrySDN), attrName, origDN, rc);
}
} else {
@@ -563,8 +574,19 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
int nval = 0;
Slapi_Value *v = NULL;
+ if (NULL == origDN) {
+ slapi_log_error(SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM,
+ "_update_all_per_mod: NULL dn was passed\n");
+ goto bail;
+ }
/* need to put together rdn into a dn */
dnParts = slapi_ldap_explode_dn( origDN, 0 );
+ if (NULL == dnParts) {
+ slapi_log_error(SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM,
+ "_update_all_per_mod: failed to explode dn %s\n",
+ origDN);
+ goto bail;
+ }
if (NULL == newRDN) {
newRDN = dnParts[0];
}
@@ -635,7 +657,7 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
rc = _do_modify(mod_pb, entrySDN, slapi_mods_get_ldapmods_byref(smods), txn);
if (rc) {
slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM,
- "_update_all_value: entry %s failed (%d)\n",
+ "_update_all_per_mod: entry %s failed (%d)\n",
slapi_sdn_get_dn(entrySDN), rc);
}
@@ -647,7 +669,7 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
slapi_ch_free_string(&newDN);
slapi_mods_free(&smods);
}
-
+bail:
return rc;
}
| 0 |
84483697f55431c29bc9dbc82ce60908a98808cd
|
389ds/389-ds-base
|
Ticket 49463 - After cleanALLruv, there is a flow of keep alive DEL
Bug Description:
When cleanAllRuv is launched, it spawn cleanAllRuv on all replicas.
Each replica will clean its changelog and database RUV AND in addition
will DEL the keep alive entry of the target ReplicaID.
So for the same entry (keep alive) there will be as many DEL as there are replicas
This flow of DEL is useless as only one DEL is enough.
In addition because of https://pagure.io/389-ds-base/issue/49466, replication may
loop on each of those DELs.
Fix Description:
The fix is only to prevent the flow of DEL.
It adds a flag ('original_task') in the task payload.
The server receiving the task (replica_execute_cleanall_ruv_task) flags the
task as 'original_task'.
In the opposite, the propagated cleanAllRuv (multimaster_extop_cleanruv) does
not flag the task as 'original_task'
Only original task does the DEL of the keep alive entry.
Note the propageted payload (extop) is not changed. In a mixed version
environment "old" servers will DEL the keep alive and flow can still happen
https://pagure.io/389-ds-base/issue/49466
Reviewed by: Ludwig Krispenz
Platforms tested: F23
Flag Day: no
Doc impact: no
|
commit 84483697f55431c29bc9dbc82ce60908a98808cd
Author: Thierry Bordaz <[email protected]>
Date: Fri Dec 1 16:23:11 2017 +0100
Ticket 49463 - After cleanALLruv, there is a flow of keep alive DEL
Bug Description:
When cleanAllRuv is launched, it spawn cleanAllRuv on all replicas.
Each replica will clean its changelog and database RUV AND in addition
will DEL the keep alive entry of the target ReplicaID.
So for the same entry (keep alive) there will be as many DEL as there are replicas
This flow of DEL is useless as only one DEL is enough.
In addition because of https://pagure.io/389-ds-base/issue/49466, replication may
loop on each of those DELs.
Fix Description:
The fix is only to prevent the flow of DEL.
It adds a flag ('original_task') in the task payload.
The server receiving the task (replica_execute_cleanall_ruv_task) flags the
task as 'original_task'.
In the opposite, the propagated cleanAllRuv (multimaster_extop_cleanruv) does
not flag the task as 'original_task'
Only original task does the DEL of the keep alive entry.
Note the propageted payload (extop) is not changed. In a mixed version
environment "old" servers will DEL the keep alive and flow can still happen
https://pagure.io/389-ds-base/issue/49466
Reviewed by: Ludwig Krispenz
Platforms tested: F23
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 4e206a0fc..e08fec752 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -783,12 +783,37 @@ void multimaster_mtnode_construct_replicas(void);
void multimaster_be_state_change(void *handle, char *be_name, int old_be_state, int new_be_state);
+#define CLEANRIDSIZ 64 /* maximum number for concurrent CLEANALLRUV tasks */
+
+typedef struct _cleanruv_data
+{
+ Object *repl_obj;
+ Replica *replica;
+ ReplicaId rid;
+ Slapi_Task *task;
+ struct berval *payload;
+ CSN *maxcsn;
+ char *repl_root;
+ Slapi_DN *sdn;
+ char *certify;
+ char *force;
+ PRBool original_task;
+} cleanruv_data;
+
+typedef struct _cleanruv_purge_data
+{
+ int cleaned_rid;
+ const Slapi_DN *suffix_sdn;
+ char *replName;
+ char *replGen;
+} cleanruv_purge_data;
+
/* In repl5_replica_config.c */
int replica_config_init(void);
void replica_config_destroy(void);
int get_replica_type(Replica *r);
int replica_execute_cleanruv_task_ext(Object *r, ReplicaId rid);
-void add_cleaned_rid(ReplicaId rid, Replica *r, char *maxcsn, char *forcing);
+void add_cleaned_rid(cleanruv_data *data, char *maxcsn);
int is_cleaned_rid(ReplicaId rid);
int replica_cleanall_ruv_abort(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, int *returncode, char *returntext, void *arg);
void replica_cleanallruv_thread_ext(void *arg);
@@ -808,29 +833,7 @@ void set_cleaned_rid(ReplicaId rid);
void cleanruv_log(Slapi_Task *task, int rid, char *task_type, int sev_level, char *fmt, ...);
char *replica_cleanallruv_get_local_maxcsn(ReplicaId rid, char *base_dn);
-#define CLEANRIDSIZ 64 /* maximum number for concurrent CLEANALLRUV tasks */
-typedef struct _cleanruv_data
-{
- Object *repl_obj;
- Replica *replica;
- ReplicaId rid;
- Slapi_Task *task;
- struct berval *payload;
- CSN *maxcsn;
- char *repl_root;
- Slapi_DN *sdn;
- char *certify;
- char *force;
-} cleanruv_data;
-
-typedef struct _cleanruv_purge_data
-{
- int cleaned_rid;
- const Slapi_DN *suffix_sdn;
- char *replName;
- char *replGen;
-} cleanruv_purge_data;
/* replutil.c */
LDAPControl *create_managedsait_control(void);
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index 6ffaa19e3..2861020ea 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -2122,6 +2122,7 @@ replica_check_for_tasks(Replica *r, Slapi_Entry *e)
char csnstr[CSN_STRSIZE];
char *token = NULL;
char *forcing;
+ PRBool original_task;
char *csnpart;
char *ridstr;
char *iter = NULL;
@@ -2153,8 +2154,15 @@ replica_check_for_tasks(Replica *r, Slapi_Entry *e)
csn_init_by_string(maxcsn, csnpart);
csn_as_string(maxcsn, PR_FALSE, csnstr);
forcing = ldap_utf8strtok_r(iter, ":", &iter);
+ original_task = PR_TRUE;
if (forcing == NULL) {
forcing = "no";
+ } else if (!strcasecmp(forcing, "yes") || !strcasecmp(forcing, "no")) {
+ /* forcing was correctly set, lets try to read the original task flag */
+ token = ldap_utf8strtok_r(iter, ":", &iter);
+ if (token && !atoi(token)) {
+ original_task = PR_FALSE;
+ }
}
slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name, "CleanAllRUV Task - cleanAllRUV task found, "
@@ -2192,6 +2200,13 @@ replica_check_for_tasks(Replica *r, Slapi_Entry *e)
data->force = slapi_ch_strdup(forcing);
data->repl_root = NULL;
+ /* This is a corner case, a cleanAllRuv task was interrupted by a shutdown or a crash
+ * We retrieved from type_replicaCleanRUV if the cleanAllRuv request
+ * was received from a direct task ADD or if was received via
+ * the cleanAllRuv extop.
+ */
+ data->original_task = original_task;
+
thread = PR_CreateThread(PR_USER_THREAD, replica_cleanallruv_thread_ext,
(void *)data, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
PR_UNJOINABLE_THREAD, SLAPD_DEFAULT_THREAD_STACKSIZE);
@@ -2286,6 +2301,12 @@ replica_check_for_tasks(Replica *r, Slapi_Entry *e)
data->sdn = slapi_sdn_dup(r->repl_root);
data->certify = slapi_ch_strdup(certify);
+ /* This is a corner case, a cleanAllRuv task was interrupted by a shutdown or a crash
+ * Let's assum this replica was the original receiver of the task.
+ * This flag has no impact on Abort cleanAllRuv
+ */
+ data->original_task = PR_TRUE;
+
thread = PR_CreateThread(PR_USER_THREAD, replica_abort_task_thread,
(void *)data, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
PR_UNJOINABLE_THREAD, SLAPD_DEFAULT_THREAD_STACKSIZE);
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index 005528a41..95b933bb8 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -1573,6 +1573,11 @@ replica_execute_cleanall_ruv_task(Object *r, ReplicaId rid, Slapi_Task *task, co
data->repl_root = slapi_ch_strdup(basedn);
data->force = slapi_ch_strdup(force_cleaning);
+ /* It is either a consequence of a direct ADD cleanAllRuv task
+ * or modify of the replica to add nsds5task: cleanAllRuv
+ */
+ data->original_task = PR_TRUE;
+
thread = PR_CreateThread(PR_USER_THREAD, replica_cleanallruv_thread,
(void *)data, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
PR_UNJOINABLE_THREAD, SLAPD_DEFAULT_THREAD_STACKSIZE);
@@ -1702,7 +1707,7 @@ replica_cleanallruv_thread(void *arg)
/*
* Add the cleanallruv task to the repl config - so we can handle restarts
*/
- add_cleaned_rid(data->rid, data->replica, csnstr, data->force); /* marks config that we started cleaning a rid */
+ add_cleaned_rid(data, csnstr); /* marks config that we started cleaning a rid */
cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Cleaning rid (%d)...", data->rid);
/*
* First, wait for the maxcsn to be covered
@@ -1878,7 +1883,13 @@ done:
*/
delete_cleaned_rid_config(data);
check_replicas_are_done_cleaning(data);
- remove_keep_alive_entry(data->task, data->rid, data->repl_root);
+ if (data->original_task) {
+ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Original task deletes Keep alive entry (%d).", data->rid);
+ remove_keep_alive_entry(data->task, data->rid, data->repl_root);
+ } else {
+ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Propagated task does not delete Keep alive entry (%d).", data->rid);
+ }
+
clean_agmts(data);
remove_cleaned_rid(data->rid);
cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Successfully cleaned rid(%d).", data->rid);
@@ -2029,7 +2040,7 @@ check_replicas_are_done_cleaning(cleanruv_data *data)
"Waiting for all the replicas to finish cleaning...");
csn_as_string(data->maxcsn, PR_FALSE, csnstr);
- filter = PR_smprintf("(%s=%d:%s:%s)", type_replicaCleanRUV, (int)data->rid, csnstr, data->force);
+ filter = PR_smprintf("(%s=%d:%s:%s:%d)", type_replicaCleanRUV, (int)data->rid, csnstr, data->force, data->original_task ? 1 : 0);
while (not_all_cleaned && !is_task_aborted(data->rid) && !slapi_is_shutting_down()) {
agmt_obj = agmtlist_get_first_agreement_for_replica(data->replica);
if (agmt_obj == NULL) {
@@ -2502,7 +2513,7 @@ set_cleaned_rid(ReplicaId rid)
* Add the rid and maxcsn to the repl config (so we can resume after a server restart)
*/
void
-add_cleaned_rid(ReplicaId rid, Replica *r, char *maxcsn, char *forcing)
+add_cleaned_rid(cleanruv_data *cleanruv_data, char *maxcsn)
{
Slapi_PBlock *pb;
struct berval *vals[2];
@@ -2512,6 +2523,16 @@ add_cleaned_rid(ReplicaId rid, Replica *r, char *maxcsn, char *forcing)
char data[CSN_STRSIZE + 10];
char *dn;
int rc;
+ ReplicaId rid;
+ Replica *r;
+ char *forcing;
+
+ if (data == NULL) {
+ return;
+ }
+ rid = cleanruv_data->rid;
+ r = cleanruv_data->replica;
+ forcing = cleanruv_data->force;
if (r == NULL || maxcsn == NULL) {
return;
@@ -2519,7 +2540,7 @@ add_cleaned_rid(ReplicaId rid, Replica *r, char *maxcsn, char *forcing)
/*
* Write the rid & maxcsn to the config entry
*/
- val.bv_len = PR_snprintf(data, sizeof(data), "%d:%s:%s", rid, maxcsn, forcing);
+ val.bv_len = PR_snprintf(data, sizeof(data), "%d:%s:%s:%d", rid, maxcsn, forcing, cleanruv_data->original_task ? 1 : 0);
dn = replica_get_dn(r);
pb = slapi_pblock_new();
mod.mod_op = LDAP_MOD_ADD | LDAP_MOD_BVALUES;
@@ -2961,6 +2982,7 @@ replica_cleanall_ruv_abort(Slapi_PBlock *pb __attribute__((unused)),
data->repl_root = slapi_ch_strdup(base_dn);
data->sdn = NULL;
data->certify = slapi_ch_strdup(certify_all);
+ data->original_task = PR_TRUE;
thread = PR_CreateThread(PR_USER_THREAD, replica_abort_task_thread,
(void *)data, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
index c49c6bd8d..68e2544b4 100644
--- a/ldap/servers/plugins/replication/repl_extop.c
+++ b/ldap/servers/plugins/replication/repl_extop.c
@@ -1412,6 +1412,7 @@ multimaster_extop_abort_cleanruv(Slapi_PBlock *pb)
data->rid = rid;
data->repl_root = slapi_ch_strdup(repl_root);
data->certify = slapi_ch_strdup(certify_all);
+ data->original_task = PR_FALSE;
/*
* Set the aborted rid and stop the cleaning
*/
@@ -1555,6 +1556,7 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb)
data->payload = slapi_ch_bvdup(extop_payload);
data->force = slapi_ch_strdup(force);
data->repl_root = slapi_ch_strdup(repl_root);
+ data->original_task = PR_FALSE;
thread = PR_CreateThread(PR_USER_THREAD, replica_cleanallruv_thread_ext,
(void *)data, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.