commit_id
string
repo
string
commit_message
string
diff
string
label
int64
7a92bcfbf3c6e4f04f4e218764cde7ef09eda99c
389ds/389-ds-base
Ticket 48832 - CI tests - convert all the tests to use py.test Description: Made sure all the tests use py.test instead of isolated run functions. Also made some more fixes to a few tests. https://fedorahosted.org/389/ticket/48832 Reviewed by: nhosoi(Thanks!)
commit 7a92bcfbf3c6e4f04f4e218764cde7ef09eda99c Author: Mark Reynolds <[email protected]> Date: Tue Jul 26 21:22:26 2016 -0400 Ticket 48832 - CI tests - convert all the tests to use py.test Description: Made sure all the tests use py.test instead of isolated run functions. Also made some more fixes to a few tests. https://fedorahosted.org/389/ticket/48832 Reviewed by: nhosoi(Thanks!) diff --git a/dirsrvtests/tests/suites/acct_usability_plugin/acct_usability_test.py b/dirsrvtests/tests/suites/acct_usability_plugin/acct_usability_test.py index 38b78c202..b7ac86915 100644 --- a/dirsrvtests/tests/suites/acct_usability_plugin/acct_usability_test.py +++ b/dirsrvtests/tests/suites/acct_usability_plugin/acct_usability_test.py @@ -77,15 +77,8 @@ def test_acct_usability_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_acct_usability_init(topo) - test_acct_usability_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acctpolicy_plugin/acctpolicy_test.py b/dirsrvtests/tests/suites/acctpolicy_plugin/acctpolicy_test.py index 640436086..14c6851b7 100644 --- a/dirsrvtests/tests/suites/acctpolicy_plugin/acctpolicy_test.py +++ b/dirsrvtests/tests/suites/acctpolicy_plugin/acctpolicy_test.py @@ -74,15 +74,8 @@ def test_acctpolicy_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_acctpolicy_init(topo) - test_acctpolicy_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/attr_encryption/attr_encrypt_test.py b/dirsrvtests/tests/suites/attr_encryption/attr_encrypt_test.py index 9b704d65d..6116aed4c 100644 --- a/dirsrvtests/tests/suites/attr_encryption/attr_encrypt_test.py +++ b/dirsrvtests/tests/suites/attr_encryption/attr_encrypt_test.py @@ -77,15 +77,8 @@ def test_attr_encrypt_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_attr_encrypt_init(topo) - test_attr_encrypt_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py b/dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py index 97862109f..6550f43a4 100644 --- a/dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py +++ b/dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py @@ -229,15 +229,8 @@ def test_attr_uniqueness(topology): log.info('test_attr_uniqueness: PASS\n') -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_attr_uniqueness_init(topo) - test_attr_uniqueness(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_test.py index 940dd7f69..f3a1113f0 100644 --- a/dirsrvtests/tests/suites/automember_plugin/automember_test.py +++ b/dirsrvtests/tests/suites/automember_plugin/automember_test.py @@ -77,15 +77,8 @@ def test_automember_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_automember_init(topo) - test_automember_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/betxns/betxn_test.py b/dirsrvtests/tests/suites/betxns/betxn_test.py index 97c17257e..aa688dea3 100644 --- a/dirsrvtests/tests/suites/betxns/betxn_test.py +++ b/dirsrvtests/tests/suites/betxns/betxn_test.py @@ -233,21 +233,11 @@ def test_betxn_memberof(topology): # # Done # - log.info('test_betxn_memberof: PASSED') -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_betxn_init(topo) - test_betxt_7bit(topo) - test_betxn_attr_uniqueness(topo) - test_betxn_memberof(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/chaining_plugin/chaining_test.py b/dirsrvtests/tests/suites/chaining_plugin/chaining_test.py index fe00ce0be..dc7807bbe 100644 --- a/dirsrvtests/tests/suites/chaining_plugin/chaining_test.py +++ b/dirsrvtests/tests/suites/chaining_plugin/chaining_test.py @@ -74,15 +74,8 @@ def test_chaining_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_chaining_init(topo) - test_chaining_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/clu/clu_test.py b/dirsrvtests/tests/suites/clu/clu_test.py index 824fa74ce..231b0113d 100644 --- a/dirsrvtests/tests/suites/clu/clu_test.py +++ b/dirsrvtests/tests/suites/clu/clu_test.py @@ -90,19 +90,8 @@ def test_clu_pwdhash(topology): log.info('test_clu_pwdhash: PASSED') -def run_isolated(): - ''' - This test is for the simple scripts that don't have a lot of options or - points of failure. Scripts that do, should have their own individual tests. - ''' - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_clu_init(topo) - test_clu_pwdhash(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/clu/db2ldif_test.py b/dirsrvtests/tests/suites/clu/db2ldif_test.py index 3769dc409..be4405ab7 100644 --- a/dirsrvtests/tests/suites/clu/db2ldif_test.py +++ b/dirsrvtests/tests/suites/clu/db2ldif_test.py @@ -65,22 +65,8 @@ def test_db2ldif_init(topology): return -def run_isolated(): - ''' - Test db2lidf/db2ldif.pl - test/stress functionality, all the command line options, - valid/invalid option combinations, etc, etc. - ''' - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_db2ldif_init(topo) - - # test 1 function... - # test 2 function... - # ... - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/collation_plugin/collatation_test.py b/dirsrvtests/tests/suites/collation_plugin/collatation_test.py index b941c8ec4..d9151657e 100644 --- a/dirsrvtests/tests/suites/collation_plugin/collatation_test.py +++ b/dirsrvtests/tests/suites/collation_plugin/collatation_test.py @@ -74,20 +74,8 @@ def test_collatation_(topology): return -def test_collatation_final(topology): - log.info('collatation test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_collatation_init(topo) - test_collatation_(topo) - test_collatation_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/cos_plugin/cos_test.py b/dirsrvtests/tests/suites/cos_plugin/cos_test.py index 2e0355f1d..f06b9d4e4 100644 --- a/dirsrvtests/tests/suites/cos_plugin/cos_test.py +++ b/dirsrvtests/tests/suites/cos_plugin/cos_test.py @@ -74,15 +74,8 @@ def test_cos_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_cos_init(topo) - test_cos_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/deref_plugin/deref_test.py b/dirsrvtests/tests/suites/deref_plugin/deref_test.py index d0200dec5..0153932f7 100644 --- a/dirsrvtests/tests/suites/deref_plugin/deref_test.py +++ b/dirsrvtests/tests/suites/deref_plugin/deref_test.py @@ -74,15 +74,8 @@ def test_deref_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_deref_init(topo) - test_deref_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/disk_monitoring/disk_monitor_test.py b/dirsrvtests/tests/suites/disk_monitoring/disk_monitor_test.py index 5e9a13752..8930ebd36 100644 --- a/dirsrvtests/tests/suites/disk_monitoring/disk_monitor_test.py +++ b/dirsrvtests/tests/suites/disk_monitoring/disk_monitor_test.py @@ -74,15 +74,8 @@ def test_disk_monitor_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_disk_monitor_init(topo) - test_disk_monitor_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/distrib_plugin/distrib_test.py b/dirsrvtests/tests/suites/distrib_plugin/distrib_test.py index 69ce344f6..101d65cca 100644 --- a/dirsrvtests/tests/suites/distrib_plugin/distrib_test.py +++ b/dirsrvtests/tests/suites/distrib_plugin/distrib_test.py @@ -74,15 +74,8 @@ def test_distrib_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_distrib_init(topo) - test_distrib_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/dna_plugin/dna_test.py b/dirsrvtests/tests/suites/dna_plugin/dna_test.py index e6fb745b9..18b871b0e 100644 --- a/dirsrvtests/tests/suites/dna_plugin/dna_test.py +++ b/dirsrvtests/tests/suites/dna_plugin/dna_test.py @@ -3,7 +3,7 @@ # All rights reserved. # # License: GPL (version 3 or any later version). -# See LICENSE for details. +# See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import os @@ -222,13 +222,8 @@ def test_dna_(topology): return -def run_isolated(): - topo = topology(True) - test_dna_init(topo) - test_dna_(topo) - test_dna_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py index 5c07e0fd9..1f6862d94 100644 --- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py +++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py @@ -74,15 +74,8 @@ def test_ds_logs_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ds_logs_init(topo) - test_ds_logs_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py b/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py index 3b40488c7..fa980ec2d 100644 --- a/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py +++ b/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py @@ -391,7 +391,7 @@ def test_dynamic_plugins(topology): # Loop on the consumer - waiting for it to catch up count = 0 insync = False - while count < 10: + while count < 60: try: # Grab master's max CSN entry = replica_inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER) @@ -473,20 +473,8 @@ def test_dynamic_plugins(topology): log.info('#####################################################\n') -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_dynamic_plugins(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py index 7b1d19c45..1601c2374 100644 --- a/dirsrvtests/tests/suites/filter/filter_test.py +++ b/dirsrvtests/tests/suites/filter/filter_test.py @@ -130,17 +130,8 @@ def test_filter_search_original_attrs(topology): log.info('test_filter_search_original_attrs: PASSED') -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - - test_filter_init(topo) - test_filter_escaped(topo) - test_filter_search_original_attrs(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/get_effective_rights/ger_test.py b/dirsrvtests/tests/suites/get_effective_rights/ger_test.py index 561d18e1c..57c97c3d7 100644 --- a/dirsrvtests/tests/suites/get_effective_rights/ger_test.py +++ b/dirsrvtests/tests/suites/get_effective_rights/ger_test.py @@ -74,15 +74,8 @@ def test_ger_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ger_init(topo) - test_ger_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/ldapi/__init__.py b/dirsrvtests/tests/suites/ldapi/__init__.py index e69de29bb..40a96afc6 100644 --- a/dirsrvtests/tests/suites/ldapi/__init__.py +++ b/dirsrvtests/tests/suites/ldapi/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/dirsrvtests/tests/suites/ldapi/ldapi_test.py b/dirsrvtests/tests/suites/ldapi/ldapi_test.py index e6eb477f0..2c30fc06c 100644 --- a/dirsrvtests/tests/suites/ldapi/ldapi_test.py +++ b/dirsrvtests/tests/suites/ldapi/ldapi_test.py @@ -74,15 +74,8 @@ def test_ldapi_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ldapi_init(topo) - test_ldapi_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/linkedattrs_plugin/linked_attrs_test.py b/dirsrvtests/tests/suites/linkedattrs_plugin/linked_attrs_test.py index 35f5bde4e..461da3f94 100644 --- a/dirsrvtests/tests/suites/linkedattrs_plugin/linked_attrs_test.py +++ b/dirsrvtests/tests/suites/linkedattrs_plugin/linked_attrs_test.py @@ -74,15 +74,8 @@ def test_linked_attrs_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_linked_attrs_init(topo) - test_linked_attrs_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/mapping_tree/mapping_tree_test.py b/dirsrvtests/tests/suites/mapping_tree/mapping_tree_test.py index 3d432fe3c..d7a436b1d 100644 --- a/dirsrvtests/tests/suites/mapping_tree/mapping_tree_test.py +++ b/dirsrvtests/tests/suites/mapping_tree/mapping_tree_test.py @@ -74,15 +74,8 @@ def test_mapping_tree_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_mapping_tree_init(topo) - test_mapping_tree_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/monitor/monitor_test.py b/dirsrvtests/tests/suites/monitor/monitor_test.py index 0e407517e..c1fc30333 100644 --- a/dirsrvtests/tests/suites/monitor/monitor_test.py +++ b/dirsrvtests/tests/suites/monitor/monitor_test.py @@ -74,15 +74,8 @@ def test_monitor_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_monitor_init(topo) - test_monitor_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/pam_passthru_plugin/pam_test.py b/dirsrvtests/tests/suites/pam_passthru_plugin/pam_test.py index 63b162cdd..9ff4261c7 100644 --- a/dirsrvtests/tests/suites/pam_passthru_plugin/pam_test.py +++ b/dirsrvtests/tests/suites/pam_passthru_plugin/pam_test.py @@ -74,15 +74,8 @@ def test_pam_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_pam_init(topo) - test_pam_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/passthru_plugin/passthru_test.py b/dirsrvtests/tests/suites/passthru_plugin/passthru_test.py index 503a4e0cf..956d414d7 100644 --- a/dirsrvtests/tests/suites/passthru_plugin/passthru_test.py +++ b/dirsrvtests/tests/suites/passthru_plugin/passthru_test.py @@ -74,15 +74,8 @@ def test_passthru_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_passthru_init(topo) - test_passthru_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/password_test.py b/dirsrvtests/tests/suites/password/password_test.py index 05fe9907d..3781492a5 100644 --- a/dirsrvtests/tests/suites/password/password_test.py +++ b/dirsrvtests/tests/suites/password/password_test.py @@ -124,15 +124,8 @@ def test_password_delete_specific_password(topology): log.info('test_password_delete_specific_password: PASSED') -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_password_init(topo) - test_password_delete_specific_password(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwdAdmin_test.py b/dirsrvtests/tests/suites/password/pwdAdmin_test.py index 10db5b32b..dc8fdabfe 100644 --- a/dirsrvtests/tests/suites/password/pwdAdmin_test.py +++ b/dirsrvtests/tests/suites/password/pwdAdmin_test.py @@ -427,16 +427,8 @@ def test_pwdAdmin_config_validation(topology): e.message['desc']) -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_pwdAdmin_init(topo) - test_pwdAdmin(topo) - test_pwdAdmin_config_validation(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/posix_winsync_plugin/posix_winsync_test.py b/dirsrvtests/tests/suites/posix_winsync_plugin/posix_winsync_test.py index eaf7bc0b4..1c1a993fd 100644 --- a/dirsrvtests/tests/suites/posix_winsync_plugin/posix_winsync_test.py +++ b/dirsrvtests/tests/suites/posix_winsync_plugin/posix_winsync_test.py @@ -74,15 +74,8 @@ def test_posix_winsync_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_posix_winsync_init(topo) - test_posix_winsync_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/psearch/psearch_test.py b/dirsrvtests/tests/suites/psearch/psearch_test.py index 20ae78c35..f78e19eaa 100644 --- a/dirsrvtests/tests/suites/psearch/psearch_test.py +++ b/dirsrvtests/tests/suites/psearch/psearch_test.py @@ -74,15 +74,8 @@ def test_psearch_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_psearch_init(topo) - test_psearch_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/referint_plugin/referint_test.py b/dirsrvtests/tests/suites/referint_plugin/referint_test.py index 07d5841f3..ded1622ee 100644 --- a/dirsrvtests/tests/suites/referint_plugin/referint_test.py +++ b/dirsrvtests/tests/suites/referint_plugin/referint_test.py @@ -74,15 +74,8 @@ def test_referint_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_referint_init(topo) - test_referint_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py index 7a2040dbc..e1518bdd4 100644 --- a/dirsrvtests/tests/suites/replication/cleanallruv_test.py +++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py @@ -173,7 +173,7 @@ def check_ruvs(msg, topology): return clean -def task_done(topology, task_dn, timeout=10): +def task_done(topology, task_dn, timeout=60): """Check if the task is complete""" attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', 'nsTaskCurrentItem', 'nsTaskTotalItems'] @@ -813,7 +813,7 @@ def test_cleanallruv_clean_force(topology): assert False # Start master 3, it should be out of sync with the other replicas... - topology.master3.start(timeout=10) + topology.master3.start(timeout=30) # Remove the agreements from the other masters that point to master 4 remove_master4_agmts("test_cleanallruv_clean_force", topology) @@ -871,7 +871,7 @@ def test_cleanallruv_abort(topology): # Stop master 2 log.info('test_cleanallruv_abort: stop master 2 to freeze the cleanAllRUV task...') - topology.master2.stop(timeout=10) + topology.master2.stop(timeout=30) # Run the task log.info('test_cleanallruv_abort: add the cleanAllRUV task...') @@ -904,7 +904,7 @@ def test_cleanallruv_abort(topology): # Start master 2 log.info('test_cleanallruv_abort: start master 2 to begin the restore process...') - topology.master2.start(timeout=10) + topology.master2.start(timeout=30) # # Now run the clean task task again to we can properly restore master 4 @@ -1204,26 +1204,8 @@ def test_cleanallruv_stress_clean(topology): restore_master4(topology) -def test_cleanallruv_final(topology): - log.info('cleanAllRUV test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - topo = topology(True) - - test_cleanallruv_init(topo) - test_cleanallruv_clean(topo) - test_cleanallruv_clean_restart(topo) - test_cleanallruv_clean_force(topo) - test_cleanallruv_abort(topo) - test_cleanallruv_abort_restart(topo) - test_cleanallruv_abort_certify(topo) - test_cleanallruv_stress_clean(topo) - test_cleanallruv_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py index b0da0eb53..02c27b946 100644 --- a/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py +++ b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py @@ -19,6 +19,7 @@ installation1_prefix = None WAITFOR_ASYNC_ATTR = "nsDS5ReplicaWaitForAsyncResults" + class TopologyReplication(object): def __init__(self, master1, master2, m1_m2_agmt, m2_m1_agmt): master1.open() diff --git a/dirsrvtests/tests/suites/replsync_plugin/repl_sync_test.py b/dirsrvtests/tests/suites/replsync_plugin/repl_sync_test.py index 5d8a530ae..fc3a55956 100644 --- a/dirsrvtests/tests/suites/replsync_plugin/repl_sync_test.py +++ b/dirsrvtests/tests/suites/replsync_plugin/repl_sync_test.py @@ -74,15 +74,8 @@ def test_repl_sync_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_repl_sync_init(topo) - test_repl_sync_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/resource_limits/res_limits_test.py b/dirsrvtests/tests/suites/resource_limits/res_limits_test.py index 233d2cad7..e904bbf48 100644 --- a/dirsrvtests/tests/suites/resource_limits/res_limits_test.py +++ b/dirsrvtests/tests/suites/resource_limits/res_limits_test.py @@ -74,15 +74,8 @@ def test_res_limits_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_res_limits_init(topo) - test_res_limits_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/retrocl_plugin/retrocl_test.py b/dirsrvtests/tests/suites/retrocl_plugin/retrocl_test.py index 0a36c486b..d1419fa50 100644 --- a/dirsrvtests/tests/suites/retrocl_plugin/retrocl_test.py +++ b/dirsrvtests/tests/suites/retrocl_plugin/retrocl_test.py @@ -74,15 +74,8 @@ def test_retrocl_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_retrocl_init(topo) - test_retrocl_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/reverpwd_plugin/reverpwd_test.py b/dirsrvtests/tests/suites/reverpwd_plugin/reverpwd_test.py index c626660a3..b5b978ba7 100644 --- a/dirsrvtests/tests/suites/reverpwd_plugin/reverpwd_test.py +++ b/dirsrvtests/tests/suites/reverpwd_plugin/reverpwd_test.py @@ -74,15 +74,8 @@ def test_reverpwd_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_reverpwd_init(topo) - test_reverpwd_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/roles_plugin/roles_test.py b/dirsrvtests/tests/suites/roles_plugin/roles_test.py index 1f1a765e9..2a5ea233b 100644 --- a/dirsrvtests/tests/suites/roles_plugin/roles_test.py +++ b/dirsrvtests/tests/suites/roles_plugin/roles_test.py @@ -74,15 +74,8 @@ def test_roles_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_roles_init(topo) - test_roles_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py b/dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py index 03bda25d3..7ba20cbf6 100644 --- a/dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py +++ b/dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py @@ -12,6 +12,7 @@ import time import ldap import logging import pytest +import socket from lib389 import DirSrv, Entry, tools, tasks from lib389.tools import DirSrvTools from lib389._constants import * @@ -302,8 +303,12 @@ def test_rootdn_access_denied_ip(topology): log.info('Running test_rootdn_access_denied_ip...') try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-ip', '127.0.0.1'), - (ldap.MOD_ADD, 'rootdn-deny-ip', '::1')]) + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, + 'rootdn-deny-ip', + '127.0.0.1'), + (ldap.MOD_ADD, + 'rootdn-deny-ip', + '::1')]) except ldap.LDAPError as e: log.fatal('test_rootdn_access_denied_ip: Failed to set rootDN plugin config: error ' + e.message['desc']) @@ -371,9 +376,11 @@ def test_rootdn_access_denied_host(topology): ''' log.info('Running test_rootdn_access_denied_host...') - + hostname = socket.gethostname() try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-deny-host', 'localhost.localdomain')]) + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, + 'rootdn-deny-host', + hostname)]) except ldap.LDAPError as e: log.fatal('test_rootdn_access_denied_host: Failed to set deny host: error ' + e.message['desc']) @@ -760,21 +767,8 @@ def test_rootdn_config_validate(topology): log.info('test_rootdn_config_validate: PASSED') -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_rootdn_init(topo) - test_rootdn_access_specific_time(topo) - test_rootdn_access_day_of_week(topo) - test_rootdn_access_allowed_ip(topo) - test_rootdn_access_denied_ip(topo) - test_rootdn_access_allowed_host(topo) - test_rootdn_access_denied_host(topo) - test_rootdn_config_validate(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/sasl/sasl_test.py b/dirsrvtests/tests/suites/sasl/sasl_test.py index a207a90e0..589e96009 100644 --- a/dirsrvtests/tests/suites/sasl/sasl_test.py +++ b/dirsrvtests/tests/suites/sasl/sasl_test.py @@ -74,14 +74,8 @@ def test_sasl_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_sasl_init(topo) - test_sasl_(topo) - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/schema/test_schema.py b/dirsrvtests/tests/suites/schema/test_schema.py index 1c0ec0561..2d4140dea 100644 --- a/dirsrvtests/tests/suites/schema/test_schema.py +++ b/dirsrvtests/tests/suites/schema/test_schema.py @@ -206,21 +206,8 @@ def test_schema_comparewithfiles(topology): log.info('test_schema_comparewithfiles: PASSED') -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = os.environ.get('PREFIX') - - topo = topology(True) - test_schema_comparewithfiles(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/schema_reload_plugin/schema_reload_test.py b/dirsrvtests/tests/suites/schema_reload_plugin/schema_reload_test.py index cd1e396cc..878c7f9eb 100644 --- a/dirsrvtests/tests/suites/schema_reload_plugin/schema_reload_test.py +++ b/dirsrvtests/tests/suites/schema_reload_plugin/schema_reload_test.py @@ -74,15 +74,8 @@ def test_schema_reload_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_schema_reload_init(topo) - test_schema_reload_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/snmp/snmp_test.py b/dirsrvtests/tests/suites/snmp/snmp_test.py index 7cbaf2817..a30b6263f 100644 --- a/dirsrvtests/tests/suites/snmp/snmp_test.py +++ b/dirsrvtests/tests/suites/snmp/snmp_test.py @@ -74,15 +74,8 @@ def test_snmp_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_snmp_init(topo) - test_snmp_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/ssl/ssl_test.py b/dirsrvtests/tests/suites/ssl/ssl_test.py index ef3261fdc..42738de65 100644 --- a/dirsrvtests/tests/suites/ssl/ssl_test.py +++ b/dirsrvtests/tests/suites/ssl/ssl_test.py @@ -74,15 +74,8 @@ def test_ssl_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ssl_init(topo) - test_ssl_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/syntax_plugin/syntax_test.py b/dirsrvtests/tests/suites/syntax_plugin/syntax_test.py index 1ef24c2c7..91d2e5575 100644 --- a/dirsrvtests/tests/suites/syntax_plugin/syntax_test.py +++ b/dirsrvtests/tests/suites/syntax_plugin/syntax_test.py @@ -74,15 +74,8 @@ def test_syntax_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_syntax_init(topo) - test_syntax_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/usn_plugin/usn_test.py b/dirsrvtests/tests/suites/usn_plugin/usn_test.py index 6ecbdf8a5..2e8167284 100644 --- a/dirsrvtests/tests/suites/usn_plugin/usn_test.py +++ b/dirsrvtests/tests/suites/usn_plugin/usn_test.py @@ -74,15 +74,8 @@ def test_usn_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_usn_init(topo) - test_usn_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/views_plugin/views_test.py b/dirsrvtests/tests/suites/views_plugin/views_test.py index d13c5f998..3168a0fca 100644 --- a/dirsrvtests/tests/suites/views_plugin/views_test.py +++ b/dirsrvtests/tests/suites/views_plugin/views_test.py @@ -74,14 +74,8 @@ def test_views_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_views_init(topo) - test_views_(topo) - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/vlv/vlv_test.py b/dirsrvtests/tests/suites/vlv/vlv_test.py index 8532dde83..57945d76f 100644 --- a/dirsrvtests/tests/suites/vlv/vlv_test.py +++ b/dirsrvtests/tests/suites/vlv/vlv_test.py @@ -74,15 +74,8 @@ def test_vlv_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_vlv_init(topo) - test_vlv_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/whoami_plugin/whoami_test.py b/dirsrvtests/tests/suites/whoami_plugin/whoami_test.py index ea4ab6805..e528cc8b6 100644 --- a/dirsrvtests/tests/suites/whoami_plugin/whoami_test.py +++ b/dirsrvtests/tests/suites/whoami_plugin/whoami_test.py @@ -74,15 +74,8 @@ def test_whoami_(topology): return -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_whoami_init(topo) - test_whoami_(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket1347760_test.py b/dirsrvtests/tests/tickets/ticket1347760_test.py index b824a3e78..bf0356307 100644 --- a/dirsrvtests/tests/tickets/ticket1347760_test.py +++ b/dirsrvtests/tests/tickets/ticket1347760_test.py @@ -44,6 +44,7 @@ log = logging.getLogger(__name__) installation1_prefix = None + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -439,6 +440,5 @@ def test_ticket1347760(topology): if __name__ == '__main__': # Run isolated # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket142_test.py b/dirsrvtests/tests/tickets/ticket142_test.py index f244bed29..bc4e7c062 100644 --- a/dirsrvtests/tests/tickets/ticket142_test.py +++ b/dirsrvtests/tests/tickets/ticket142_test.py @@ -33,6 +33,7 @@ ATTR_INHERIT_GLOBAL = 'nsslapd-pwpolicy-inherit-global' BN = 'uid=buser,' + DEFAULT_SUFFIX + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -322,6 +323,5 @@ def test_ticket142(topology): if __name__ == '__main__': # Run isolated # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47313_test.py b/dirsrvtests/tests/tickets/ticket47313_test.py index ca37a52b5..ad6550e36 100644 --- a/dirsrvtests/tests/tickets/ticket47313_test.py +++ b/dirsrvtests/tests/tickets/ticket47313_test.py @@ -150,21 +150,9 @@ def test_ticket47313_run(topology): log.info('Testcase PASSED') -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47313_run(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47431_test.py b/dirsrvtests/tests/tickets/ticket47431_test.py index 4682963ee..00d3f98af 100644 --- a/dirsrvtests/tests/tickets/ticket47431_test.py +++ b/dirsrvtests/tests/tickets/ticket47431_test.py @@ -7,13 +7,11 @@ # --- END COPYRIGHT BLOCK --- # import os -import sys import time import ldap import logging import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools +from lib389 import DirSrv from lib389._constants import * from lib389.properties import * from lib389.tasks import * @@ -24,9 +22,10 @@ log = logging.getLogger(__name__) installation1_prefix = None -DN_7BITPLUGIN="cn=7-bit check,%s" % DN_PLUGIN +DN_7BITPLUGIN = "cn=7-bit check,%s" % DN_PLUGIN ATTRS = ["uid", "mail", "userpassword", ",", SUFFIX, None] + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -238,22 +237,9 @@ def test_ticket47431_3(topology): log.info('Test complete') -def test_ticket47431_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket47431_0(topo) - test_ticket47431_1(topo) - test_ticket47431_2(topo) - test_ticket47431_3(topo) - test_ticket47431_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47490_test.py b/dirsrvtests/tests/tickets/ticket47490_test.py index 56bf24bf5..799e5f681 100644 --- a/dirsrvtests/tests/tickets/ticket47490_test.py +++ b/dirsrvtests/tests/tickets/ticket47490_test.py @@ -14,13 +14,11 @@ Created on Nov 7, 2013 import os import sys import ldap -import socket import time import logging import pytest import re -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools +from lib389 import DirSrv, Entry from lib389._constants import * from lib389.properties import * @@ -654,37 +652,12 @@ def test_ticket47490_nine(topology): if res is not None: assert False - -def test_ticket47490_final(topology): log.info('Testcase PASSED') -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47490_init(topo) - test_ticket47490_one(topo) - test_ticket47490_two(topo) - test_ticket47490_three(topo) - test_ticket47490_four(topo) - test_ticket47490_five(topo) - test_ticket47490_six(topo) - test_ticket47490_seven(topo) - test_ticket47490_eight(topo) - test_ticket47490_nine(topo) - - test_ticket47490_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47560_test.py b/dirsrvtests/tests/tickets/ticket47560_test.py index 32c342721..4354b3871 100644 --- a/dirsrvtests/tests/tickets/ticket47560_test.py +++ b/dirsrvtests/tests/tickets/ticket47560_test.py @@ -227,27 +227,12 @@ def test_ticket47560(topology): assert result_successful is True - -def test_ticket47560_final(topology): log.info('Testcase PASSED') -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47560(topo) - test_ticket47560_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47573_test.py b/dirsrvtests/tests/tickets/ticket47573_test.py index 97a48d6bb..19ded3119 100644 --- a/dirsrvtests/tests/tickets/ticket47573_test.py +++ b/dirsrvtests/tests/tickets/ticket47573_test.py @@ -317,31 +317,12 @@ def test_ticket47573_three(topology): if ent is None: assert False - -def test_ticket47573_final(topology): log.info('Testcase PASSED') -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47573_init(topo) - test_ticket47573_one(topo) - test_ticket47573_two(topo) - test_ticket47573_three(topo) - - test_ticket47573_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47619_test.py b/dirsrvtests/tests/tickets/ticket47619_test.py index a440d3d2b..87977f9a9 100644 --- a/dirsrvtests/tests/tickets/ticket47619_test.py +++ b/dirsrvtests/tests/tickets/ticket47619_test.py @@ -167,6 +167,7 @@ def test_ticket47619_create_index(topology): args = {INDEX_TYPE: 'eq'} for attr in ATTRIBUTES: topology.master.index.create(suffix=RETROCL_SUFFIX, attr=attr, args=args) + topology.master.restart(timeout=10) def test_ticket47619_reindex(topology): @@ -185,36 +186,9 @@ def test_ticket47619_check_indexed_search(topology): assert len(ents) == 0 -def test_ticket47619_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47619_init(topo) - - test_ticket47619_create_index(topo) - - # important restart that trigger the hang - # at restart, finding the new 'changelog' backend, the backend is acquired in Read - # preventing the reindex task to complete - topo.master.restart(timeout=10) - test_ticket47619_reindex(topo) - test_ticket47619_check_indexed_search(topo) - - test_ticket47619_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47640_test.py b/dirsrvtests/tests/tickets/ticket47640_test.py index 6b00cc84c..526ac22be 100644 --- a/dirsrvtests/tests/tickets/ticket47640_test.py +++ b/dirsrvtests/tests/tickets/ticket47640_test.py @@ -112,19 +112,10 @@ def test_ticket47640(topology): log.info('Test complete') -def test_ticket47640_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket47640(topo) - test_ticket47640_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket47653MMR_test.py b/dirsrvtests/tests/tickets/ticket47653MMR_test.py index 6087992e9..107edacd1 100644 --- a/dirsrvtests/tests/tickets/ticket47653MMR_test.py +++ b/dirsrvtests/tests/tickets/ticket47653MMR_test.py @@ -442,31 +442,8 @@ def test_ticket47653_modify(topology): assert ent.getValue('postalCode') == '1929' -def test_ticket47653_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation1_prefix - global installation2_prefix - installation1_prefix = None - installation2_prefix = None - - topo = topology(True) - test_ticket47653_init(topo) - - test_ticket47653_add(topo) - test_ticket47653_modify(topo) - - test_ticket47653_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47653_test.py b/dirsrvtests/tests/tickets/ticket47653_test.py index 1ebdb112f..86d008f37 100644 --- a/dirsrvtests/tests/tickets/ticket47653_test.py +++ b/dirsrvtests/tests/tickets/ticket47653_test.py @@ -349,32 +349,8 @@ def test_ticket47653_delete(topology): topology.standalone.delete_s(ENTRY_DN) -def test_ticket47653_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47653_init(topo) - - test_ticket47653_add(topo) - test_ticket47653_search(topo) - test_ticket47653_modify(topo) - test_ticket47653_delete(topo) - - test_ticket47653_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47669_test.py b/dirsrvtests/tests/tickets/ticket47669_test.py index adbcc461a..af05a82ee 100644 --- a/dirsrvtests/tests/tickets/ticket47669_test.py +++ b/dirsrvtests/tests/tickets/ticket47669_test.py @@ -237,29 +237,8 @@ def test_ticket47669_retrochangelog_maxage(topology): topology.standalone.log.info("ticket47669 was successfully verified.") -def test_ticket47669_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - """ - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - """ - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47669_init(topo) - test_ticket47669_changelog_maxage(topo) - test_ticket47669_changelog_triminterval(topo) - test_ticket47669_changelog_compactdbinterval(topo) - test_ticket47669_retrochangelog_maxage(topo) - test_ticket47669_final(topo) - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47676_test.py b/dirsrvtests/tests/tickets/ticket47676_test.py index 986e6205e..cdf4096dd 100644 --- a/dirsrvtests/tests/tickets/ticket47676_test.py +++ b/dirsrvtests/tests/tickets/ticket47676_test.py @@ -374,33 +374,8 @@ def test_ticket47676_reject_action(topology): assert found -def test_ticket47676_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation1_prefix - global installation2_prefix - installation1_prefix = None - installation2_prefix = None - - topo = topology(True) - topo.master1.log.info("\n\n######################### Ticket 47676 ######################\n") - test_ticket47676_init(topo) - - test_ticket47676_skip_oc_at(topo) - test_ticket47676_reject_action(topo) - - test_ticket47676_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47714_test.py b/dirsrvtests/tests/tickets/ticket47714_test.py index 0e3112b66..2a7cd0f3a 100644 --- a/dirsrvtests/tests/tickets/ticket47714_test.py +++ b/dirsrvtests/tests/tickets/ticket47714_test.py @@ -235,30 +235,8 @@ def test_ticket47714_run_1(topology): topology.standalone.log.info("ticket47714 was successfully verified.") -def test_ticket47714_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47714_init(topo) - - test_ticket47714_run_0(topo) - - test_ticket47714_run_1(topo) - - test_ticket47714_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47721_test.py b/dirsrvtests/tests/tickets/ticket47721_test.py index 5e15c93fd..033fe70a4 100644 --- a/dirsrvtests/tests/tickets/ticket47721_test.py +++ b/dirsrvtests/tests/tickets/ticket47721_test.py @@ -433,35 +433,8 @@ def test_ticket47721_4(topology): assert schema_csn_master1 == schema_csn_master2 -def test_ticket47721_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation1_prefix - global installation2_prefix - installation1_prefix = None - installation2_prefix = None - - topo = topology(True) - topo.master1.log.info("\n\n######################### Ticket 47721 ######################\n") - test_ticket47721_init(topo) - - test_ticket47721_0(topo) - test_ticket47721_1(topo) - test_ticket47721_2(topo) - test_ticket47721_3(topo) - test_ticket47721_4(topo) - - test_ticket47721_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47781_test.py b/dirsrvtests/tests/tickets/ticket47781_test.py index 0f8b49a5b..9001ab430 100644 --- a/dirsrvtests/tests/tickets/ticket47781_test.py +++ b/dirsrvtests/tests/tickets/ticket47781_test.py @@ -164,25 +164,8 @@ def test_ticket47781(topology): assert PR_False -def test_ticket47781_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47781(topo) - test_ticket47781_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47787_test.py b/dirsrvtests/tests/tickets/ticket47787_test.py index 56c8d43e3..443e22331 100644 --- a/dirsrvtests/tests/tickets/ticket47787_test.py +++ b/dirsrvtests/tests/tickets/ticket47787_test.py @@ -530,32 +530,8 @@ def test_ticket47787_2(topology): assert ent.getValue(attr) == value -def test_ticket47787_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation1_prefix - global installation2_prefix - installation1_prefix = None - installation2_prefix = None - - topo = topology(True) - topo.master1.log.info("\n\n######################### Ticket 47787 ######################\n") - test_ticket47787_init(topo) - - test_ticket47787_2(topo) - - test_ticket47787_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47808_test.py b/dirsrvtests/tests/tickets/ticket47808_test.py index 6bff180aa..09a9ffa13 100644 --- a/dirsrvtests/tests/tickets/ticket47808_test.py +++ b/dirsrvtests/tests/tickets/ticket47808_test.py @@ -138,29 +138,11 @@ def test_ticket47808_run(topology): topology.standalone.log.info("Try to delete %s " % entry_dn_1) topology.standalone.delete_s(entry_dn_1) - - -def test_ticket47808_final(topology): log.info('Testcase PASSED') -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47808_run(topo) - - test_ticket47808_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47815_test.py b/dirsrvtests/tests/tickets/ticket47815_test.py index 3a1c9feb9..5f15dc867 100644 --- a/dirsrvtests/tests/tickets/ticket47815_test.py +++ b/dirsrvtests/tests/tickets/ticket47815_test.py @@ -156,24 +156,8 @@ def test_ticket47815(topology): assert False -def test_ticket47815_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47815(topo) - test_ticket47815_final(topo) - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47819_test.py b/dirsrvtests/tests/tickets/ticket47819_test.py index 7be621070..d127fc0ac 100644 --- a/dirsrvtests/tests/tickets/ticket47819_test.py +++ b/dirsrvtests/tests/tickets/ticket47819_test.py @@ -276,24 +276,8 @@ def test_ticket47819(topology): log.info('Part 4 - passed') -def test_ticket47819_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47819(topo) - test_ticket47819_final(topo) - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47823_test.py b/dirsrvtests/tests/tickets/ticket47823_test.py index 1eab26b26..223c13985 100644 --- a/dirsrvtests/tests/tickets/ticket47823_test.py +++ b/dirsrvtests/tests/tickets/ticket47823_test.py @@ -975,47 +975,8 @@ def test_ticket47823_invalid_config_7(topology): pass -def test_ticket47823_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47823_init(topo) - - # run old/new config style that makes uniqueness checking on one subtree - test_ticket47823_one_container_add(topo) - test_ticket47823_one_container_mod(topo) - test_ticket47823_one_container_modrdn(topo) - - # run old config style that makes uniqueness checking on each defined subtrees - test_ticket47823_multi_containers_add(topo) - test_ticket47823_multi_containers_mod(topo) - test_ticket47823_multi_containers_modrdn(topo) - test_ticket47823_across_multi_containers_add(topo) - test_ticket47823_across_multi_containers_mod(topo) - test_ticket47823_across_multi_containers_modrdn(topo) - - test_ticket47823_invalid_config_1(topo) - test_ticket47823_invalid_config_2(topo) - test_ticket47823_invalid_config_3(topo) - test_ticket47823_invalid_config_4(topo) - test_ticket47823_invalid_config_5(topo) - test_ticket47823_invalid_config_6(topo) - test_ticket47823_invalid_config_7(topo) - - test_ticket47823_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47828_test.py b/dirsrvtests/tests/tickets/ticket47828_test.py index 3218ddcf8..92e3178f3 100644 --- a/dirsrvtests/tests/tickets/ticket47828_test.py +++ b/dirsrvtests/tests/tickets/ticket47828_test.py @@ -647,60 +647,9 @@ def test_ticket47828_run_31(topology): topology.standalone.delete_s(DUMMY_USER1_DN) -def test_ticket47828_final(topology): - topology.standalone.plugins.disable(name=PLUGIN_DNA) - topology.standalone.stop(timeout=10) - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47828_init(topo) - - test_ticket47828_run_0(topo) - test_ticket47828_run_1(topo) - test_ticket47828_run_2(topo) - test_ticket47828_run_3(topo) - test_ticket47828_run_4(topo) - test_ticket47828_run_5(topo) - test_ticket47828_run_6(topo) - test_ticket47828_run_7(topo) - test_ticket47828_run_8(topo) - test_ticket47828_run_9(topo) - test_ticket47828_run_10(topo) - test_ticket47828_run_11(topo) - test_ticket47828_run_12(topo) - test_ticket47828_run_13(topo) - test_ticket47828_run_14(topo) - test_ticket47828_run_15(topo) - test_ticket47828_run_16(topo) - test_ticket47828_run_17(topo) - test_ticket47828_run_18(topo) - test_ticket47828_run_19(topo) - test_ticket47828_run_20(topo) - test_ticket47828_run_21(topo) - test_ticket47828_run_22(topo) - test_ticket47828_run_23(topo) - test_ticket47828_run_24(topo) - test_ticket47828_run_25(topo) - test_ticket47828_run_26(topo) - test_ticket47828_run_27(topo) - test_ticket47828_run_28(topo) - test_ticket47828_run_29(topo) - test_ticket47828_run_30(topo) - test_ticket47828_run_31(topo) - - test_ticket47828_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket47829_test.py b/dirsrvtests/tests/tickets/ticket47829_test.py index 425a6269a..a426adab9 100644 --- a/dirsrvtests/tests/tickets/ticket47829_test.py +++ b/dirsrvtests/tests/tickets/ticket47829_test.py @@ -608,49 +608,8 @@ def test_ticket47829_indirect_active_group_4(topology): _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) -def test_ticket47829_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47829_init(topo) - - test_ticket47829_mod_active_user_1(topo) - test_ticket47829_mod_active_user_2(topo) - test_ticket47829_mod_active_user_3(topo) - test_ticket47829_mod_stage_user_1(topo) - test_ticket47829_mod_stage_user_2(topo) - test_ticket47829_mod_stage_user_3(topo) - test_ticket47829_mod_out_user_1(topo) - test_ticket47829_mod_out_user_2(topo) - test_ticket47829_mod_out_user_3(topo) - - test_ticket47829_mod_active_user_modrdn_active_user_1(topo) - test_ticket47829_mod_active_user_modrdn_stage_user_1(topo) - test_ticket47829_mod_active_user_modrdn_out_user_1(topo) - - test_ticket47829_mod_stage_user_modrdn_active_user_1(topo) - test_ticket47829_mod_stage_user_modrdn_stage_user_1(topo) - - test_ticket47829_indirect_active_group_1(topo) - test_ticket47829_indirect_active_group_2(topo) - test_ticket47829_indirect_active_group_3(topo) - test_ticket47829_indirect_active_group_4(topo) - - test_ticket47829_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47833_test.py b/dirsrvtests/tests/tickets/ticket47833_test.py index 307e994ac..7140f011b 100644 --- a/dirsrvtests/tests/tickets/ticket47833_test.py +++ b/dirsrvtests/tests/tickets/ticket47833_test.py @@ -256,19 +256,8 @@ def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology): _find_member (topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) -def test_ticket47833_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket47829_init(topo) - test_ticket47829_mod_stage_user_modrdn_stage_user_1(topo) - test_ticket47833_final(topo) - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47838_test.py b/dirsrvtests/tests/tickets/ticket47838_test.py index bc84474db..d9f6b36e4 100644 --- a/dirsrvtests/tests/tickets/ticket47838_test.py +++ b/dirsrvtests/tests/tickets/ticket47838_test.py @@ -25,6 +25,7 @@ installation_prefix = None CONFIG_DN = 'cn=config' ENCRYPTION_DN = 'cn=encryption,%s' % CONFIG_DN +MY_SECURE_PORT = '36363' RSA = 'RSA' RSA_DN = 'cn=%s,%s' % (RSA, ENCRYPTION_DN) SERVERCERT = 'Server-Cert' @@ -77,7 +78,7 @@ def topology(request): def fin(): standalone.delete() - #request.addfinalizer(fin) + request.addfinalizer(fin) # Here we have standalone instance up and running return TopologyStandalone(standalone) @@ -167,7 +168,7 @@ def _47838_init(topology): topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-security', 'on'), (ldap.MOD_REPLACE, 'nsslapd-ssl-check-hostname', 'off'), - (ldap.MOD_REPLACE, 'nsslapd-secureport', str(DEFAULT_SECURE_PORT))]) + (ldap.MOD_REPLACE, 'nsslapd-secureport', MY_SECURE_PORT)]) topology.standalone.add_s(Entry((RSA_DN, {'objectclass': "top nsEncryptionModule".split(), 'cn': RSA, @@ -825,47 +826,8 @@ def _47838_run_last(topology): topology.standalone.log.info("ticket47838, 47880, 47908, 47928 were successfully verified.") -def _47838_final(topology): - log.info('Testcase PASSED') - - -def test_ticket47838(topology): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - _47838_init(topology) - - _47838_run_0(topology) - _47838_run_1(topology) - _47838_run_2(topology) - _47838_run_3(topology) - _47838_run_4(topology) - _47838_run_5(topology) - _47838_run_6(topology) - _47838_run_7(topology) - _47838_run_8(topology) - _47838_run_9(topology) - _47838_run_10(topology) - _47838_run_11(topology) - _47928_run_0(topology) - _47928_run_1(topology) - _47928_run_2(topology) - _47928_run_3(topology) - - _47838_run_last(topology) - - _47838_final(topology) - if __name__ == '__main__': # Run isolated # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47869MMR_test.py b/dirsrvtests/tests/tickets/ticket47869MMR_test.py index 02408d08f..95627d2f4 100644 --- a/dirsrvtests/tests/tickets/ticket47869MMR_test.py +++ b/dirsrvtests/tests/tickets/ticket47869MMR_test.py @@ -316,31 +316,8 @@ def test_ticket47869_check(topology): topology.master1.log.info("##### ticket47869 was successfully verified. #####") -def test_ticket47869_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation1_prefix - global installation2_prefix - installation1_prefix = None - installation2_prefix = None - - topo = topology(True) - test_ticket47869_init(topo) - - test_ticket47869_check(topo) - - test_ticket47869_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47871_test.py b/dirsrvtests/tests/tickets/ticket47871_test.py index 7e7b56d65..bb6810c71 100644 --- a/dirsrvtests/tests/tickets/ticket47871_test.py +++ b/dirsrvtests/tests/tickets/ticket47871_test.py @@ -198,28 +198,8 @@ def test_ticket47871_2(topology): assert len(ents) <= 1 -def test_ticket47871_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47871_init(topo) - test_ticket47871_1(topo) - test_ticket47871_2(topo) - - test_ticket47871_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47900_test.py b/dirsrvtests/tests/tickets/ticket47900_test.py index a3500d41d..4de368ba4 100644 --- a/dirsrvtests/tests/tickets/ticket47900_test.py +++ b/dirsrvtests/tests/tickets/ticket47900_test.py @@ -321,24 +321,8 @@ def test_ticket47900(topology): topology.standalone.log.info('Password update succeeded (%s)' % passwd) -def test_ticket47900_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47900(topo) - test_ticket47900_final(topo) - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47920_test.py b/dirsrvtests/tests/tickets/ticket47920_test.py index 99cc4782c..f031e1d0b 100644 --- a/dirsrvtests/tests/tickets/ticket47920_test.py +++ b/dirsrvtests/tests/tickets/ticket47920_test.py @@ -169,26 +169,8 @@ def test_ticket47920_mod_readentry_ctrl(topology): assert ent.getValue('description') == FINAL_DESC -def test_ticket47920_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47920_init(topo) - test_ticket47920_mod_readentry_ctrl(topo) - test_ticket47920_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47921_test.py b/dirsrvtests/tests/tickets/ticket47921_test.py index 3465fc7ca..537a59b1e 100644 --- a/dirsrvtests/tests/tickets/ticket47921_test.py +++ b/dirsrvtests/tests/tickets/ticket47921_test.py @@ -145,19 +145,8 @@ def test_ticket47921(topology): log.info('Test complete') -def test_ticket47921_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket47921(topo) - test_ticket47921_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47927_test.py b/dirsrvtests/tests/tickets/ticket47927_test.py index c449b69cb..c425aceef 100644 --- a/dirsrvtests/tests/tickets/ticket47927_test.py +++ b/dirsrvtests/tests/tickets/ticket47927_test.py @@ -289,25 +289,8 @@ def test_ticket47927_six(topology): assert False -def test_ticket47927_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket47927_init(topo) - test_ticket47927_one(topo) - test_ticket47927_two(topo) - test_ticket47927_three(topo) - test_ticket47927_four(topo) - test_ticket47927_five(topo) - test_ticket47927_six(topo) - test_ticket47927_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47937_test.py b/dirsrvtests/tests/tickets/ticket47937_test.py index 009fbd3bb..6eb4f8b55 100644 --- a/dirsrvtests/tests/tickets/ticket47937_test.py +++ b/dirsrvtests/tests/tickets/ticket47937_test.py @@ -164,25 +164,8 @@ def test_ticket47937(topology): assert False -def test_ticket47937_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47937(topo) - test_ticket47937_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47950_test.py b/dirsrvtests/tests/tickets/ticket47950_test.py index c55c4ea07..cb3076d5f 100644 --- a/dirsrvtests/tests/tickets/ticket47950_test.py +++ b/dirsrvtests/tests/tickets/ticket47950_test.py @@ -199,25 +199,8 @@ def test_ticket47950(topology): assert False -def test_ticket47953_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47950(topo) - test_ticket47953_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47953_test.py b/dirsrvtests/tests/tickets/ticket47953_test.py index 5dff58bc9..1bcc007db 100644 --- a/dirsrvtests/tests/tickets/ticket47953_test.py +++ b/dirsrvtests/tests/tickets/ticket47953_test.py @@ -115,25 +115,8 @@ def test_ticket47953(topology): assert False -def test_ticket47953_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47953(topo) - test_ticket47953_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47963_test.py b/dirsrvtests/tests/tickets/ticket47963_test.py index a99117330..6ca74fd27 100644 --- a/dirsrvtests/tests/tickets/ticket47963_test.py +++ b/dirsrvtests/tests/tickets/ticket47963_test.py @@ -181,19 +181,8 @@ def test_ticket47963(topology): log.info('Test complete') -def test_ticket47963_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket47963(topo) - test_ticket47963_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47966_test.py b/dirsrvtests/tests/tickets/ticket47966_test.py index 3130f69df..75550a618 100644 --- a/dirsrvtests/tests/tickets/ticket47966_test.py +++ b/dirsrvtests/tests/tickets/ticket47966_test.py @@ -12,8 +12,7 @@ import time import ldap import logging import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools +from lib389 import DirSrv, Entry from lib389._constants import * from lib389.properties import * from lib389.tasks import * @@ -21,11 +20,10 @@ from lib389.utils import * logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) - installation1_prefix = None - m1_m2_agmt = "" + class TopologyReplication(object): def __init__(self, master1, master2): master1.open() @@ -209,19 +207,8 @@ def test_ticket47966(topology): log.info('Test complete') -def test_ticket47966_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket47966(topo) - test_ticket47966_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47970_test.py b/dirsrvtests/tests/tickets/ticket47970_test.py index bea74bcd1..a957d4972 100644 --- a/dirsrvtests/tests/tickets/ticket47970_test.py +++ b/dirsrvtests/tests/tickets/ticket47970_test.py @@ -134,25 +134,8 @@ def test_ticket47970(topology): log.info('Root DSE was correctly not updated') -def test_ticket47970_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47970(topo) - test_ticket47970_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47973_test.py b/dirsrvtests/tests/tickets/ticket47973_test.py index d2fdcd59d..799c72aed 100644 --- a/dirsrvtests/tests/tickets/ticket47973_test.py +++ b/dirsrvtests/tests/tickets/ticket47973_test.py @@ -6,15 +6,12 @@ # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # -import os -import sys import time import ldap import ldap.sasl import logging import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools +from lib389 import DirSrv, Entry from lib389._constants import * from lib389.properties import * from lib389.tasks import * @@ -161,25 +158,8 @@ def test_ticket47973(topology): task_count += 1 -def test_ticket47973_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47973(topo) - test_ticket47973_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47980_test.py b/dirsrvtests/tests/tickets/ticket47980_test.py index 6b6b5bf4c..09cbe2f3b 100644 --- a/dirsrvtests/tests/tickets/ticket47980_test.py +++ b/dirsrvtests/tests/tickets/ticket47980_test.py @@ -638,25 +638,8 @@ def test_ticket47980(topology): assert False -def test_ticket47980_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47980(topo) - test_ticket47980_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47981_test.py b/dirsrvtests/tests/tickets/ticket47981_test.py index 0fcf51ebe..fbd19d53e 100644 --- a/dirsrvtests/tests/tickets/ticket47981_test.py +++ b/dirsrvtests/tests/tickets/ticket47981_test.py @@ -271,25 +271,8 @@ def test_ticket47981(topology): assert False -def test_ticket47981_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47981(topo) - test_ticket47981_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47988_test.py b/dirsrvtests/tests/tickets/ticket47988_test.py index 45abba618..c9835c3d4 100644 --- a/dirsrvtests/tests/tickets/ticket47988_test.py +++ b/dirsrvtests/tests/tickets/ticket47988_test.py @@ -474,33 +474,8 @@ def test_ticket47988_6(topology): assert (master2_schema_csn) -def test_ticket47988_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation1_prefix - global installation2_prefix - installation1_prefix = None - installation2_prefix = None - - topo = topology(True) - test_ticket47988_init(topo) - test_ticket47988_1(topo) - test_ticket47988_2(topo) - test_ticket47988_3(topo) - test_ticket47988_4(topo) - test_ticket47988_5(topo) - test_ticket47988_6(topo) - test_ticket47988_final(topo) - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48005_test.py b/dirsrvtests/tests/tickets/ticket48005_test.py index 116b1e9fd..5fde541dc 100644 --- a/dirsrvtests/tests/tickets/ticket48005_test.py +++ b/dirsrvtests/tests/tickets/ticket48005_test.py @@ -7,14 +7,12 @@ # --- END COPYRIGHT BLOCK --- # import os -import sys import time import ldap import logging import pytest import re -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools +from lib389 import DirSrv, Entry from lib389._constants import * from lib389.properties import * from lib389.tasks import * @@ -24,6 +22,7 @@ log = logging.getLogger(__name__) installation1_prefix = None + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -392,24 +391,8 @@ def test_ticket48005_schemareload(topology): log.info("Ticket 48005 schema reload test complete") -def test_ticket48005_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket48005_setup(topo) - test_ticket48005_memberof(topo) - test_ticket48005_automember(topo) - test_ticket48005_syntaxvalidate(topo) - test_ticket48005_usn(topo) - test_ticket48005_schemareload(topo) - test_ticket48005_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48013_test.py b/dirsrvtests/tests/tickets/ticket48013_test.py index b6fe22ed8..12bfa3242 100644 --- a/dirsrvtests/tests/tickets/ticket48013_test.py +++ b/dirsrvtests/tests/tickets/ticket48013_test.py @@ -7,18 +7,14 @@ # --- END COPYRIGHT BLOCK --- # import os -import sys import time import ldap import logging import pytest -import pyasn1 -import pyasn1_modules -import ldap,ldapurl +import ldapurl from ldap.ldapobject import SimpleLDAPObject from ldap.syncrepl import SyncreplConsumer -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools +from lib389 import DirSrv from lib389._constants import * from lib389.properties import * from lib389.tasks import * @@ -125,19 +121,8 @@ def test_ticket48013(topology): log.info('Test complete') -def test_ticket48013_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket48013(topo) - test_ticket48013_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48026_test.py b/dirsrvtests/tests/tickets/ticket48026_test.py index 2f411aa2f..730e94b61 100644 --- a/dirsrvtests/tests/tickets/ticket48026_test.py +++ b/dirsrvtests/tests/tickets/ticket48026_test.py @@ -7,13 +7,11 @@ # --- END COPYRIGHT BLOCK --- # import os -import sys import time import ldap import logging import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools +from lib389 import DirSrv, Entry from lib389._constants import * from lib389.properties import * from lib389.tasks import * @@ -150,19 +148,8 @@ def test_ticket48026(topology): log.info('Test complete') -def test_ticket48026_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket48026(topo) - test_ticket48026_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48170_test.py b/dirsrvtests/tests/tickets/ticket48170_test.py index 2261c11d1..3ffa964af 100644 --- a/dirsrvtests/tests/tickets/ticket48170_test.py +++ b/dirsrvtests/tests/tickets/ticket48170_test.py @@ -78,19 +78,8 @@ def test_ticket48170(topology): log.info('Test complete') -def test_ticket48170_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket48170(topo) - test_ticket48170_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48194_test.py b/dirsrvtests/tests/tickets/ticket48194_test.py index 453a794cb..897f4d0f1 100644 --- a/dirsrvtests/tests/tickets/ticket48194_test.py +++ b/dirsrvtests/tests/tickets/ticket48194_test.py @@ -459,10 +459,6 @@ def my_test_run_11(topology): connectWithOpenssl(topology, 'AES256-SHA256', False) -def my_test_final(topology): - log.info('Testcase PASSED') - - def test_ticket48194(topology): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) @@ -471,7 +467,6 @@ def test_ticket48194(topology): - set the installation prefix - run this program ''' - my_test_init(topology) my_test_run_0(topology) my_test_run_1(topology) @@ -485,7 +480,6 @@ def test_ticket48194(topology): my_test_run_9(topology) my_test_run_10(topology) my_test_run_11(topology) - my_test_final(topology) if __name__ == '__main__': # Run isolated diff --git a/dirsrvtests/tests/tickets/ticket48212_test.py b/dirsrvtests/tests/tickets/ticket48212_test.py index 58a860f47..66ce6695c 100644 --- a/dirsrvtests/tests/tickets/ticket48212_test.py +++ b/dirsrvtests/tests/tickets/ticket48212_test.py @@ -21,6 +21,7 @@ MYSUFFIXBE = 'userRoot' _MYLDIF = 'example1k_posix.ldif' UIDNUMBERDN = "cn=uidnumber,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config" + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() diff --git a/dirsrvtests/tests/tickets/ticket48214_test.py b/dirsrvtests/tests/tickets/ticket48214_test.py index 8c8bf4000..7fc4bab66 100644 --- a/dirsrvtests/tests/tickets/ticket48214_test.py +++ b/dirsrvtests/tests/tickets/ticket48214_test.py @@ -18,6 +18,7 @@ installation_prefix = None MYSUFFIX = 'dc=example,dc=com' MYSUFFIXBE = 'userRoot' + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -146,27 +147,8 @@ def test_ticket48214_run(topology): topology.standalone.log.info("ticket48214 was successfully verified.") -def test_ticket48214_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket48214_run(topo) - - test_ticket48214_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48226_test.py b/dirsrvtests/tests/tickets/ticket48226_test.py index 9f96d8a16..939121be5 100644 --- a/dirsrvtests/tests/tickets/ticket48226_test.py +++ b/dirsrvtests/tests/tickets/ticket48226_test.py @@ -187,7 +187,7 @@ def test_ticket48226_1(topology): valgrind_enable(sbin_dir) # start M2 to do the next updates - topology.master2.start(60) + topology.master2.start() # ADD 'description' by '5' mods = [(ldap.MOD_DELETE, 'description', '5')] diff --git a/dirsrvtests/tests/tickets/ticket48228_test.py b/dirsrvtests/tests/tickets/ticket48228_test.py index f820cbe25..d8f9334ef 100644 --- a/dirsrvtests/tests/tickets/ticket48228_test.py +++ b/dirsrvtests/tests/tickets/ticket48228_test.py @@ -306,31 +306,8 @@ def test_ticket48228_test_subtree_policy(topology): log.info("Subtree level policy was successfully verified.") -def test_ticket48228_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - log.info('Testing Ticket 48228 - wrong password check if passwordInHistory is decreased') - - test_ticket48228_test_global_policy(topo) - - test_ticket48228_test_subtree_policy(topo) - - test_ticket48228_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48252_test.py b/dirsrvtests/tests/tickets/ticket48252_test.py index 2c4eeab11..44978fdfe 100644 --- a/dirsrvtests/tests/tickets/ticket48252_test.py +++ b/dirsrvtests/tests/tickets/ticket48252_test.py @@ -26,6 +26,7 @@ installation_prefix = None USER_NUM = 10 TEST_USER = "test_user" + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -154,32 +155,8 @@ def test_ticket48252_run_1(topology): log.info('Case 2 - PASSED') -def test_ticket48252_final(topology): - log.info('Testing Ticket 48252 - PASSED.') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - log.info('Testing Ticket 48252 - db2index creates index entry from deleted records') - - test_ticket48252_setup(topo) - - test_ticket48252_run_0(topo) - test_ticket48252_run_1(topo) - - test_ticket48252_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48265_test.py b/dirsrvtests/tests/tickets/ticket48265_test.py index 9bfa34d29..d255f33d5 100644 --- a/dirsrvtests/tests/tickets/ticket48265_test.py +++ b/dirsrvtests/tests/tickets/ticket48265_test.py @@ -6,15 +6,11 @@ # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # -import os -import sys import time import ldap import logging import pytest -import threading -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools +from lib389 import DirSrv, Entry from lib389._constants import * from lib389.properties import * from lib389.tasks import * @@ -28,6 +24,7 @@ installation1_prefix = None USER_NUM = 20 TEST_USER = 'test_user' + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -106,28 +103,8 @@ def test_ticket48265_test(topology): log.info('Test 48265 complete\n') -def test_ticket48265_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - log.info('Testing Ticket 48265 - Complex filter in a search request does not work as expected') - - test_ticket48265_test(topo) - - test_ticket48265_final(topo) - - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48266_test.py b/dirsrvtests/tests/tickets/ticket48266_test.py index e509e142d..82571e9de 100644 --- a/dirsrvtests/tests/tickets/ticket48266_test.py +++ b/dirsrvtests/tests/tickets/ticket48266_test.py @@ -4,8 +4,7 @@ import time import ldap import logging import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools +from lib389 import DirSrv, Entry from lib389._constants import * from lib389.properties import * from lib389.tasks import * @@ -16,8 +15,9 @@ log = logging.getLogger(__name__) installation1_prefix = None -NEW_ACCOUNT = "new_account" -MAX_ACCOUNTS = 20 +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 20 + class TopologyReplication(object): def __init__(self, master1, master2): diff --git a/dirsrvtests/tests/tickets/ticket48270_test.py b/dirsrvtests/tests/tickets/ticket48270_test.py index 78b357da1..0eeeed4d2 100644 --- a/dirsrvtests/tests/tickets/ticket48270_test.py +++ b/dirsrvtests/tests/tickets/ticket48270_test.py @@ -54,11 +54,9 @@ def topology(request): standalone.delete() request.addfinalizer(fin) - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - return TopologyStandalone(standalone) + def test_ticket48270_init(topology): log.info("Initialization: add dummy entries for the tests") for cpt in range(MAX_ACCOUNTS): @@ -148,24 +146,8 @@ def test_ticket48270_extensible_search(topology): ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE) -def test_ticket48270(topology): - """Write your testcase here... - - Also, if you need any testcase initialization, - please, write additional fixture for that(include finalizer). - """ - - log.info('Test complete') - - if __name__ == '__main__': # Run isolated # -s for DEBUG mode - topo = topology(True) - test_ticket48270_init(topo) - test_ticket48270_homeDirectory_indexed_cis(topo) - test_ticket48270_homeDirectory_mixed_value(topo) - test_ticket48270_extensible_search(topo) - -# CURRENT_FILE = os.path.realpath(__file__) -# pytest.main("-s %s" % CURRENT_FILE) + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48294_test.py b/dirsrvtests/tests/tickets/ticket48294_test.py index ddb71bc62..3cb912f62 100644 --- a/dirsrvtests/tests/tickets/ticket48294_test.py +++ b/dirsrvtests/tests/tickets/ticket48294_test.py @@ -29,6 +29,7 @@ OU_PEOPLE = 'ou=People,' + DEFAULT_SUFFIX LINKTYPE = 'directReport' MANAGEDTYPE = 'manager' + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -260,32 +261,8 @@ def _48294_run_2(topology): log.info('PASSED') -def _48294_final(topology): - log.info('All PASSED') - - -def test_ticket48294(topology): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - _48294_init(topology) - - _48294_run_0(topology) - _48294_run_1(topology) - _48294_run_2(topology) - - _48294_final(topology) - if __name__ == '__main__': # Run isolated # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48295_test.py b/dirsrvtests/tests/tickets/ticket48295_test.py index bcc8879c4..4c7719d38 100644 --- a/dirsrvtests/tests/tickets/ticket48295_test.py +++ b/dirsrvtests/tests/tickets/ticket48295_test.py @@ -29,6 +29,7 @@ OU_PEOPLE = 'ou=People,' + DEFAULT_SUFFIX LINKTYPE = 'directReport' MANAGEDTYPE = 'manager' + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -183,30 +184,8 @@ def _48295_run(topology): log.info('PASSED') -def _48295_final(topology): - log.info('All PASSED') - - -def test_ticket48295(topology): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - _48295_init(topology) - - _48295_run(topology) - - _48295_final(topology) - if __name__ == '__main__': # Run isolated # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48342_test.py b/dirsrvtests/tests/tickets/ticket48342_test.py index 6dd5506aa..de0dea935 100644 --- a/dirsrvtests/tests/tickets/ticket48342_test.py +++ b/dirsrvtests/tests/tickets/ticket48342_test.py @@ -4,8 +4,7 @@ import time import ldap import logging import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools +from lib389 import DirSrv, Entry from lib389._constants import * from lib389.properties import * from lib389.tasks import * @@ -16,9 +15,10 @@ log = logging.getLogger(__name__) installation1_prefix = None -PEOPLE_OU='people' +PEOPLE_OU = 'people' PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX) -MAX_ACCOUNTS=5 +MAX_ACCOUNTS = 5 + class TopologyReplication(object): def __init__(self, master1, master2, master3): @@ -188,6 +188,7 @@ def topology(request): return TopologyReplication(master1, master2, master3) + def _dna_config(server, nextValue=500, maxValue=510): log.info("Add dna plugin config entry...%s" % server) @@ -220,6 +221,7 @@ def _dna_config(server, nextValue=500, maxValue=510): server.start(timeout=120) time.sleep(3) + def test_ticket4026(topology): """Write your replication testcase here. @@ -310,9 +312,5 @@ def test_ticket4026(topology): if __name__ == '__main__': # Run isolated # -s for DEBUG mode -# global installation1_prefix -# installation1_prefix=None -# topo = topology(True) -# test_ticket4026(topo) CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48362_test.py b/dirsrvtests/tests/tickets/ticket48362_test.py index dc03d3ae0..0fa803775 100644 --- a/dirsrvtests/tests/tickets/ticket48362_test.py +++ b/dirsrvtests/tests/tickets/ticket48362_test.py @@ -23,6 +23,7 @@ BINDMETHOD_VALUE = "SASL/GSSAPI" PROTOCOLE_ATTR = 'dnaRemoteConnProtocol' PROTOCOLE_VALUE = 'LDAP' + class TopologyReplication(object): def __init__(self, master1, master2): master1.open() diff --git a/dirsrvtests/tests/tickets/ticket48366_test.py b/dirsrvtests/tests/tickets/ticket48366_test.py index 2ce4ccfa5..c785d3090 100644 --- a/dirsrvtests/tests/tickets/ticket48366_test.py +++ b/dirsrvtests/tests/tickets/ticket48366_test.py @@ -32,6 +32,7 @@ SUBTREE_GREEN = "ou=green,%s" % SUFFIX SUBTREE_RED = "ou=red,%s" % SUFFIX SUBTREES = (SUBTREE_GREEN, SUBTREE_RED) + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -187,29 +188,8 @@ def test_ticket48366_search_dm(topology): assert (len(ents) == 0) -def test_ticket48366_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket48366_init(topo) - - test_ticket48366_search_dm(topo) - - test_ticket48366_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48369_test.py b/dirsrvtests/tests/tickets/ticket48369_test.py index 0b65fa29d..37f427249 100644 --- a/dirsrvtests/tests/tickets/ticket48369_test.py +++ b/dirsrvtests/tests/tickets/ticket48369_test.py @@ -121,4 +121,4 @@ if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) - pytest.main("-s %s" % CURRENT_FILE) \ No newline at end of file + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48383_test.py b/dirsrvtests/tests/tickets/ticket48383_test.py index d05c7c25d..c6bebcdaf 100644 --- a/dirsrvtests/tests/tickets/ticket48383_test.py +++ b/dirsrvtests/tests/tickets/ticket48383_test.py @@ -17,6 +17,7 @@ import random logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() diff --git a/dirsrvtests/tests/tickets/ticket48497_test.py b/dirsrvtests/tests/tickets/ticket48497_test.py index debe63f4b..3130fc4b3 100644 --- a/dirsrvtests/tests/tickets/ticket48497_test.py +++ b/dirsrvtests/tests/tickets/ticket48497_test.py @@ -16,16 +16,16 @@ log = logging.getLogger(__name__) installation1_prefix = None -NEW_ACCOUNT = "new_account" -MAX_ACCOUNTS = 20 +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 20 -MIXED_VALUE="/home/mYhOmEdIrEcToRy" -LOWER_VALUE="/home/myhomedirectory" +MIXED_VALUE = "/home/mYhOmEdIrEcToRy" +LOWER_VALUE = "/home/myhomedirectory" HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' -HOMEDIRECTORY_CN="homedirectory" +HOMEDIRECTORY_CN = "homedirectory" MATCHINGRULE = 'nsMatchingRule' UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' -UIDNUMBER_CN="uidnumber" +UIDNUMBER_CN = "uidnumber" class TopologyStandalone(object): @@ -41,7 +41,7 @@ def topology(request): args_instance[SER_DEPLOYED_DIR] = installation1_prefix # Creating standalone instance ... - standalone = DirSrv(verbose=True) + standalone = DirSrv(verbose=False) if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix args_instance[SER_HOST] = HOST_STANDALONE @@ -66,6 +66,7 @@ def topology(request): return TopologyStandalone(standalone) + def test_ticket48497_init(topology): log.info("Initialization: add dummy entries for the tests") for cpt in range(MAX_ACCOUNTS): @@ -78,12 +79,14 @@ def test_ticket48497_init(topology): 'gidnumber': str(222), 'homedirectory': "/home/tb_%d" % cpt}))) + def test_ticket48497_homeDirectory_mixed_value(topology): # Set a homedirectory value with mixed case name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) mod = [(ldap.MOD_REPLACE, 'homeDirectory', MIXED_VALUE)] topology.standalone.modify_s(name, mod) + def test_ticket48497_extensible_search(topology): name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) @@ -120,14 +123,13 @@ def test_ticket48497_homeDirectory_index_cfg(topology): 'cn': HOMEDIRECTORY_CN, 'nsSystemIndex': 'false', 'nsIndexType': 'eq'}))) -# log.info("attach debugger") -# time.sleep(60) IGNORE_MR_NAME='caseIgnoreIA5Match' EXACT_MR_NAME='caseExactIA5Match' mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))] topology.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) + def test_ticket48497_homeDirectory_index_run(topology): args = {TASK_WAIT: True} topology.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) @@ -149,29 +151,8 @@ def test_ticket48497_homeDirectory_index_run(topology): log.info(line) assert 0 -def test_ticket48497(topology): - """Write your testcase here... - - Also, if you need any testcase initialization, - please, write additional fixture for that(include finalizer). - """ - - log.info('Test complete') - - if __name__ == '__main__': # Run isolated # -s for DEBUG mode -# global installation1_prefix -# installation1_prefix = None -# topo = topology(True) -# test_ticket48497_init(topo) -# -# -# test_ticket48497_homeDirectory_mixed_value(topo) -# test_ticket48497_extensible_search(topo) -# test_ticket48497_homeDirectory_index_cfg(topo) -# test_ticket48497_homeDirectory_index_run(topo) - CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48637_test.py b/dirsrvtests/tests/tickets/ticket48637_test.py index 5df65d728..2bf03217b 100644 --- a/dirsrvtests/tests/tickets/ticket48637_test.py +++ b/dirsrvtests/tests/tickets/ticket48637_test.py @@ -195,4 +195,3 @@ if __name__ == '__main__': # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) - diff --git a/dirsrvtests/tests/tickets/ticket48665_test.py b/dirsrvtests/tests/tickets/ticket48665_test.py index 9396b8a92..f50ab3556 100644 --- a/dirsrvtests/tests/tickets/ticket48665_test.py +++ b/dirsrvtests/tests/tickets/ticket48665_test.py @@ -14,6 +14,7 @@ from lib389.utils import * logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() diff --git a/dirsrvtests/tests/tickets/ticket48745_test.py b/dirsrvtests/tests/tickets/ticket48745_test.py index ca888da4f..6ebe31e3f 100644 --- a/dirsrvtests/tests/tickets/ticket48745_test.py +++ b/dirsrvtests/tests/tickets/ticket48745_test.py @@ -66,6 +66,7 @@ def topology(request): return TopologyStandalone(standalone) + def test_ticket48745_init(topology): log.info("Initialization: add dummy entries for the tests") for cpt in range(MAX_ACCOUNTS): @@ -78,6 +79,7 @@ def test_ticket48745_init(topology): 'gidnumber': str(222), 'homedirectory': "/home/tbordaz_%d" % cpt}))) + def test_ticket48745_homeDirectory_indexed_cis(topology): log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match") try: @@ -120,12 +122,14 @@ def test_ticket48745_homeDirectory_indexed_cis(topology): log.info(line) assert 0 + def test_ticket48745_homeDirectory_mixed_value(topology): # Set a homedirectory value with mixed case name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) mod = [(ldap.MOD_REPLACE, 'homeDirectory', MIXED_VALUE)] topology.standalone.modify_s(name, mod) + def test_ticket48745_extensible_search_after_index(topology): name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) @@ -159,15 +163,6 @@ def test_ticket48745_extensible_search_after_index(topology): log.info("Default: can retrieve an entry filter caseIgnoreIA5Match with lowered stored value") ent = topology.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE) -def test_ticket48745(topology): - """Write your testcase here... - - Also, if you need any testcase initialization, - please, write additional fixture for that(include finalizer). - """ - - log.info('Test complete') - if __name__ == '__main__': # Run isolated diff --git a/dirsrvtests/tests/tickets/ticket48746_test.py b/dirsrvtests/tests/tickets/ticket48746_test.py index 038f1dd63..401b30067 100644 --- a/dirsrvtests/tests/tickets/ticket48746_test.py +++ b/dirsrvtests/tests/tickets/ticket48746_test.py @@ -184,30 +184,9 @@ def test_ticket48746_homeDirectory_indexed_ces(topology): log.info(line) assert not found -def test_ticket48746(topology): - """Write your testcase here... - - Also, if you need any testcase initialization, - please, write additional fixture for that(include finalizer). - """ - - log.info('Test complete') - if __name__ == '__main__': # Run isolated # -s for DEBUG mode -# global installation1_prefix -# installation1_prefix = None -# topo = topology(True) -# test_ticket48746_init(topo) -# -# -# test_ticket48746_homeDirectory_indexed_cis(topo) -# test_ticket48746_homeDirectory_mixed_value(topo) -# test_ticket48746_extensible_search_after_index(topo) -# # crash should occur here -# test_ticket48746_homeDirectory_indexed_ces(topo) - CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48755_test.py b/dirsrvtests/tests/tickets/ticket48755_test.py index db4ae395a..db3c3f367 100644 --- a/dirsrvtests/tests/tickets/ticket48755_test.py +++ b/dirsrvtests/tests/tickets/ticket48755_test.py @@ -28,6 +28,7 @@ installation1_prefix = None m1_m2_agmt = None + class TopologyReplication(object): def __init__(self, master1, master2): master1.open() @@ -140,6 +141,7 @@ def add_ou_entry(server, idx, myparent): server.add_s(Entry((dn, {'objectclass': ['top', 'organizationalunit'], 'ou': name}))) + def add_user_entry(server, idx, myparent): name = 'tuser%d' % idx dn = 'uid=%s,%s' % (name, myparent) @@ -149,11 +151,13 @@ def add_user_entry(server, idx, myparent): 'cn': 'Test User%d' % idx, 'userpassword': 'password'}))) + def del_user_entry(server, idx, myparent): name = 'tuser%d' % idx dn = 'uid=%s,%s' % (name, myparent) server.delete_s(dn) + def add_ldapsubentry(server, myparent): name = 'nsPwPolicyContainer' container = 'cn=%s,%s' % (name, myparent) @@ -187,6 +191,7 @@ def add_ldapsubentry(server, myparent): 'cosAttribute': 'pwdpolicysubentry default operational-default', 'cn': '%s' % name}))) + def test_ticket48755(topology): log.info("Ticket 48755 - moving an entry could make the online init fail") @@ -253,9 +258,9 @@ def test_ticket48755(topology): assert len(m1entries) == len(m2entries) log.info('PASSED') + if __name__ == '__main__': # Run isolated # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48759_test.py b/dirsrvtests/tests/tickets/ticket48759_test.py index 63115c166..71c7c2f66 100644 --- a/dirsrvtests/tests/tickets/ticket48759_test.py +++ b/dirsrvtests/tests/tickets/ticket48759_test.py @@ -26,6 +26,7 @@ MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') GROUP_DN = ("cn=group," + DEFAULT_SUFFIX) MEMBER_DN_COMP = "uid=member" + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -262,24 +263,8 @@ def test_ticket48759(topology): assert (changes_pre == changes_post) -def test_ticket48759_final(topology): - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket48759(topo) - test_ticket48759_final(topo) - if __name__ == '__main__': - run_isolated() + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48784_test.py b/dirsrvtests/tests/tickets/ticket48784_test.py index a970fc645..e513bfd57 100644 --- a/dirsrvtests/tests/tickets/ticket48784_test.py +++ b/dirsrvtests/tests/tickets/ticket48784_test.py @@ -358,7 +358,8 @@ def config_tls_agreements(topology): def set_ssl_Version(server, name, version): - log.info("\n######################### Set %s on %s ######################\n", (version, name)) + log.info("\n######################### Set %s on %s ######################\n" % + (version, name)) server.simple_bind_s(DN_DM, PASSWORD) if version.startswith('SSL'): server.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', 'on'), diff --git a/dirsrvtests/tests/tickets/ticket48799_test.py b/dirsrvtests/tests/tickets/ticket48799_test.py index e92b5fd47..47461eb67 100644 --- a/dirsrvtests/tests/tickets/ticket48799_test.py +++ b/dirsrvtests/tests/tickets/ticket48799_test.py @@ -14,6 +14,7 @@ from lib389.utils import * logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) + class TopologyReplication(object): def __init__(self, master1, consumer1): master1.open() @@ -96,6 +97,7 @@ def topology(request): return TopologyReplication(master1, consumer1) + def _add_custom_schema(server): attr_value = "( 10.0.9.2342.19200300.100.1.1 NAME 'customManager' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'user defined' )" mod = [(ldap.MOD_ADD, 'attributeTypes', attr_value)] @@ -105,6 +107,7 @@ def _add_custom_schema(server): mod = [(ldap.MOD_ADD, 'objectclasses', oc_value)] server.modify_s('cn=schema', mod) + def _create_user(server): server.add_s(Entry(( "uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, @@ -122,6 +125,7 @@ def _create_user(server): } ))) + def _modify_user(server): mod = [ (ldap.MOD_ADD, 'objectClass', ['customPerson']), @@ -130,6 +134,7 @@ def _modify_user(server): ] server.modify("uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, mod) + def test_ticket48799(topology): """Write your replication testcase here. @@ -159,7 +164,6 @@ def test_ticket48799(topology): assert(master_entry == consumer_entry) - log.info('Test complete') diff --git a/dirsrvtests/tests/tickets/ticket48844_test.py b/dirsrvtests/tests/tickets/ticket48844_test.py index 308ec1706..de719c39c 100644 --- a/dirsrvtests/tests/tickets/ticket48844_test.py +++ b/dirsrvtests/tests/tickets/ticket48844_test.py @@ -25,6 +25,7 @@ BITWISE_F2 = '(&(%s)(testUserAccountControl:1.2.840.113556.1.4.803:=514))' % F1 BITWISE_F3 = '(&(%s)(testUserAccountControl:1.2.840.113556.1.4.803:=513))' % F1 BITWISE_F6 = '(&(%s)(testUserAccountControl:1.2.840.113556.1.4.803:=16777216))' % F1 + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -63,6 +64,7 @@ def topology(request): return TopologyStandalone(standalone) + def _addBitwiseEntries(topology): users = [ @@ -111,6 +113,7 @@ def _addBitwiseEntries(topology): except ValueError: topology.standalone.log.fatal("add_s failed: %s", ValueError) + def test_ticket48844_init(topology): # create a suffix where test entries will be stored BITW_SCHEMA_AT_1 = '( NAME \'testUserAccountControl\' DESC \'Attribute Bitwise filteri-Multi-Valued\' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 )' @@ -150,6 +153,7 @@ def test_ticket48844_bitwise_on(topology): assert (len(ents) == expect) assert (ents[0].hasAttr('testUserAccountControl')) + def test_ticket48844_bitwise_off(topology): """ Check that when bitwise plugin is not enabled, no plugin diff --git a/dirsrvtests/tests/tickets/ticket48891_test.py b/dirsrvtests/tests/tickets/ticket48891_test.py index ad6fd3afa..a33b22aa7 100644 --- a/dirsrvtests/tests/tickets/ticket48891_test.py +++ b/dirsrvtests/tests/tickets/ticket48891_test.py @@ -7,18 +7,14 @@ # --- END COPYRIGHT BLOCK --- # import os -import sys import time import ldap import logging import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools +from lib389 import DirSrv, Entry from lib389._constants import * from lib389.properties import * from lib389.tasks import * -from ldap.controls import SimplePagedResultsControl -from ldap.controls.simple import GetEffectiveRightsControl import fnmatch log = logging.getLogger(__name__) @@ -35,6 +31,7 @@ SEARCHFILTER = '(objectclass=person)' OTHER_NAME = 'other_entry' MAX_OTHERS = 10 + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -95,11 +92,11 @@ def test_ticket48891_setup(topology): topology.standalone.simple_bind_s(DN_DM, PASSWORD) # check there is no core - entry = topology.standalone.search_s(CONFIG_DN, ldap.SCOPE_BASE, "(cn=config)",['nsslapd-workingdir']) + entry = topology.standalone.search_s(CONFIG_DN, ldap.SCOPE_BASE, + "(cn=config)", ['nsslapd-errorlog']) assert entry - assert entry[0] - assert entry[0].hasAttr('nsslapd-workingdir') - path = entry[0].getValue('nsslapd-workingdir') + path = entry[0].getValue('nsslapd-errorlog').replace('errors', '') + log.debug('Looking for a core file in: ' + path) cores = fnmatch.filter(os.listdir(path), 'core.*') assert len(cores) == 0 @@ -137,39 +134,19 @@ def test_ticket48891_setup(topology): assert MAX_OTHERS == len(entries) topology.standalone.log.info('%d person entries are successfully created under %s.' % (len(entries), MYSUFFIX)) - - topology.standalone.stop(timeout=1) - cores = fnmatch.filter(os.listdir(path), 'core.*') for core in cores: core = os.path.join(path, core) topology.standalone.log.info('cores are %s' % core) assert not os.path.isfile(core) - - -def test_ticket48891_final(topology): log.info('Testcase PASSED') -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket48891_setup(topo) - test_ticket48891_final(topo) - - if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48916_test.py b/dirsrvtests/tests/tickets/ticket48916_test.py index 44c96dafa..299b22457 100644 --- a/dirsrvtests/tests/tickets/ticket48916_test.py +++ b/dirsrvtests/tests/tickets/ticket48916_test.py @@ -147,6 +147,7 @@ def _create_user(inst, idnum): }) )) + def test_ticket48916(topology): """ https://bugzilla.redhat.com/show_bug.cgi?id=1353629 @@ -222,11 +223,10 @@ def test_ticket48916(topology): }) )) - # Enable the plugins dna_m1.enable() dna_m2.enable() - + # Restart the instances topology.master1.restart(60) topology.master2.restart(60) @@ -236,7 +236,7 @@ def test_ticket48916(topology): # Allocate the 10 members to exhaust - for i in range(1,11): + for i in range(1, 11): _create_user(topology.master2, i) # Allocate the 11th
0
25e5b7842f6a2c5911b9fc788bce29d938e6d668
389ds/389-ds-base
Ticket 49186 - Fix NS to improve shutdown relability Bug Description: In some cases, NS would race the condvar and not shutdown correctly, causing pthread_join to hang Fix Description: Instead of spinning on is_shutdown as an atomic value, we should send shutdown jobs to the queues allowing ns to shut itself down asynchronously rather than synchronously. This also has the benefito f allowing all queued jobs to complete. https://pagure.io/389-ds-base/issue/49186 Author: wibrown Review by: mreynolds(Thanks!!!!) Signed-off-by: Mark Reynolds <[email protected]>
commit 25e5b7842f6a2c5911b9fc788bce29d938e6d668 Author: William Brown <[email protected]> Date: Wed Mar 22 18:05:56 2017 +1000 Ticket 49186 - Fix NS to improve shutdown relability Bug Description: In some cases, NS would race the condvar and not shutdown correctly, causing pthread_join to hang Fix Description: Instead of spinning on is_shutdown as an atomic value, we should send shutdown jobs to the queues allowing ns to shut itself down asynchronously rather than synchronously. This also has the benefito f allowing all queued jobs to complete. https://pagure.io/389-ds-base/issue/49186 Author: wibrown Review by: mreynolds(Thanks!!!!) Signed-off-by: Mark Reynolds <[email protected]> diff --git a/src/nunc-stans/include/nunc-stans.h b/src/nunc-stans/include/nunc-stans.h index bda63504e..7ce0582ae 100644 --- a/src/nunc-stans/include/nunc-stans.h +++ b/src/nunc-stans/include/nunc-stans.h @@ -219,6 +219,16 @@ typedef void (*ns_job_func_t)(struct ns_job_t *); * \sa ns_add_io_job, ns_add_io_timeout_job, ns_job_get_type, ns_job_get_output_type, NS_JOB_IS_PRESERVE_FD */ #define NS_JOB_PRESERVE_FD 0x100 +/** + * Internal flag to shutdown a worker thread. + * + * If you assign this to a job it will cause the worker thread that dequeues it to be + * shutdown, ready for pthread_join() to be called on it. + * + * You probably DON'T want to use this ever, as it really will shutdown threads + * and you can't get them back .... you have been warned. + */ +#define NS_JOB_SHUTDOWN_WORKER 0x200 /** * Bitflag type for job types * @@ -237,7 +247,7 @@ typedef void (*ns_job_func_t)(struct ns_job_t *); * \endcode * \sa ns_add_io_job, ns_add_io_timeout_job, ns_add_job, ns_add_signal_job, ns_add_timeout_job, ns_job_get_type, ns_job_get_output_type */ -typedef unsigned short ns_job_type_t; +typedef uint_fast16_t ns_job_type_t; /** * Used to test an #ns_job_type_t value for #NS_JOB_ACCEPT @@ -274,6 +284,10 @@ typedef unsigned short ns_job_type_t; * \sa NS_JOB_PERSIST, ns_job_get_type, ns_job_get_output_type */ #define NS_JOB_IS_PERSIST(eee) ((eee)&NS_JOB_PERSIST) +/** + * Used to test if an #ns_job_type_t is to shutdown the worker thread. + */ +#define NS_JOB_IS_SHUTDOWN_WORKER(eee) ((eee)&NS_JOB_SHUTDOWN_WORKER) /** * Used to test an #ns_job_type_t value to see if it is any sort of I/O job * \sa NS_JOB_IS_ACCEPT, NS_JOB_IS_READ, NS_JOB_IS_CONNECT, NS_JOB_IS_WRITE, ns_job_get_type, ns_job_get_output_type diff --git a/src/nunc-stans/ns/ns_thrpool.c b/src/nunc-stans/ns/ns_thrpool.c index 2bd3bf26d..f53e74e27 100644 --- a/src/nunc-stans/ns/ns_thrpool.c +++ b/src/nunc-stans/ns/ns_thrpool.c @@ -73,6 +73,7 @@ struct ns_thrpool_t { pthread_cond_t work_q_cv; pthread_mutex_t work_q_lock; sds_queue *thread_stack; + uint32_t thread_count; pthread_t event_thread; PRFileDesc *event_q_wakeup_pipe_read; PRFileDesc *event_q_wakeup_pipe_write; @@ -355,7 +356,7 @@ worker_thread_func(void *arg) ns_thread_t *thr = (ns_thread_t *)arg; ns_thrpool_t *tp = thr->tp; sds_result result = SDS_SUCCESS; - int32_t is_shutdown = ns_thrpool_is_shutdown(tp); + int_fast32_t is_shutdown = 0; /* Get ready to use lock free ds */ sds_lqueue_tprep(tp->work_q); @@ -368,20 +369,25 @@ worker_thread_func(void *arg) result = sds_lqueue_dequeue(tp->work_q, (void **)&job); /* Don't need monitor here, job_dequeue barriers the memory for us. Job will be valid */ /* Is it possible for a worker thread to get stuck here during shutdown? */ - if (result == SDS_LIST_EXHAUSTED && !is_shutdown) { + if (result == SDS_LIST_EXHAUSTED) { work_q_wait(tp); } else if (result == SDS_SUCCESS && job != NULL) { /* Even if we are shutdown here, we can process a job. */ /* Should we just keep dequeing until we exhaust the list? */ - work_job_execute(job); + if (NS_JOB_IS_SHUTDOWN_WORKER(job->job_type)) { + ns_log(LOG_INFO, "worker_thread_func notified to shutdown!\n"); + internal_ns_job_done(job); + is_shutdown = 1; + } else { + work_job_execute(job); + } /* MUST NOT ACCESS JOB FROM THIS POINT */ } else { ns_log(LOG_ERR, "worker_thread_func encountered a recoverable issue during processing of the queue\n"); } - - is_shutdown = ns_thrpool_is_shutdown(tp); } + ns_log(LOG_INFO, "worker_thread_func shutdown complete!\n"); /* With sds, it cleans the thread on join automatically. */ return NULL; } @@ -1011,6 +1017,18 @@ ns_add_job(ns_thrpool_t *tp, ns_job_type_t job_type, ns_job_func_t func, void *d return PR_SUCCESS; } +PRStatus +ns_add_shutdown_job(ns_thrpool_t *tp) { + ns_job_t *_job = NULL; + _job = new_ns_job(tp, NULL, NS_JOB_SHUTDOWN_WORKER, NULL, NULL); + if (!_job) { + return PR_FAILURE; + } + _job->state = NS_JOB_NEEDS_ARM; + internal_ns_job_rearm(_job); + return PR_SUCCESS; +} + /* * Because of the design of work_job_execute, when we are in RUNNING * we hold the monitor. As a result, we don't need to assert the current thread @@ -1388,6 +1406,7 @@ ns_thrpool_new(struct ns_thrpool_config *tp_config) } for (ii = 0; ii < tp_config->max_threads; ++ii) { + tp->thread_count += 1; thr = ns_calloc(1, sizeof(ns_thread_t)); PR_ASSERT(thr); thr->tp = tp; @@ -1502,15 +1521,19 @@ ns_thrpool_shutdown(struct ns_thrpool_t *tp) /* Already done! */ return; } + /* Set the shutdown flag. This will cause the worker * threads to exit after they finish all remaining work. */ __atomic_add_fetch(&(tp->shutdown), 1, __ATOMIC_RELEASE); - /* Wake up the idle worker threads so they can exit. */ - /* Do we need this to be run in conjuction with our thread join loop incase threads are still active? */ - pthread_mutex_lock(&(tp->work_q_lock)); - pthread_cond_broadcast(&(tp->work_q_cv)); - pthread_mutex_unlock(&(tp->work_q_lock)); + /* Send worker shutdown jobs into the queues. This allows + * currently queued jobs to complete. + */ + for (size_t i = 0; i < tp->thread_count; i++) { + PRStatus result = ns_add_shutdown_job(tp); + PR_ASSERT(result == PR_SUCCESS); + } + } PRStatus @@ -1524,8 +1547,8 @@ ns_thrpool_wait(ns_thrpool_t *tp) while (sds_queue_dequeue(tp->thread_stack, (void **)&thr) == SDS_SUCCESS) { - /* LAST CHANCE! Really make sure the thread workers are ready to go! */ - /* In theory, they could still be blocked up here, but we hope not ... */ + + /* Make sure all threads are woken up to their shutdown jobs. */ pthread_mutex_lock(&(tp->work_q_lock)); pthread_cond_broadcast(&(tp->work_q_cv)); pthread_mutex_unlock(&(tp->work_q_lock));
0
1bf6bfb757bb9c6edd1ed9342953a16453ed2d12
389ds/389-ds-base
Resolves: #474248 Summary: Replica crashes in the consumer initialization if the backend to be replicated does not exist Description: . mapping_tree.c: if NULL mapping tree state is passed, return an error. . repl_extop.c: if mapping tree node state is NULL, don't reset the mapping tree state. . replutil.c: if NULL mapping tree state is passed, log it and return.
commit 1bf6bfb757bb9c6edd1ed9342953a16453ed2d12 Author: Noriko Hosoi <[email protected]> Date: Thu Dec 4 00:42:18 2008 +0000 Resolves: #474248 Summary: Replica crashes in the consumer initialization if the backend to be replicated does not exist Description: . mapping_tree.c: if NULL mapping tree state is passed, return an error. . repl_extop.c: if mapping tree node state is NULL, don't reset the mapping tree state. . replutil.c: if NULL mapping tree state is passed, log it and return. diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c index b286f209d..e9d5af47e 100644 --- a/ldap/servers/plugins/replication/repl_extop.c +++ b/ldap/servers/plugins/replication/repl_extop.c @@ -837,10 +837,12 @@ multimaster_extop_StartNSDS50ReplicationRequest(Slapi_PBlock *pb) { response = NSDS50_REPL_INTERNAL_ERROR; /* reset the mapping tree state to what it was before - we tried to do the bulk import */ - repl_set_mtn_state_and_referrals(repl_root_sdn, mtnstate, + we tried to do the bulk import if mtnstate exists */ + if (mtnstate) { + repl_set_mtn_state_and_referrals(repl_root_sdn, mtnstate, NULL, NULL, mtnreferral); - slapi_ch_free_string(&mtnstate); + slapi_ch_free_string(&mtnstate); + } charray_free(mtnreferral); mtnreferral = NULL; diff --git a/ldap/servers/plugins/replication/replutil.c b/ldap/servers/plugins/replication/replutil.c index 544ac1161..6e6fd63d2 100644 --- a/ldap/servers/plugins/replication/replutil.c +++ b/ldap/servers/plugins/replication/replutil.c @@ -709,6 +709,11 @@ repl_set_mtn_state_and_referrals( int ii = 0; char **referrals_to_set = NULL; PRBool chain_on_update = is_chain_on_update_setup(repl_root_sdn); + if (NULL == mtn_state) { + slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, + "repl_set_mtn_referrals: cannot set NULL state.\n"); + return; + } /* Fix for blackflag bug 601440: We want the new behaviour of DS, ** going forward, to now be that if the nsds5replicareferral attrib diff --git a/ldap/servers/slapd/mapping_tree.c b/ldap/servers/slapd/mapping_tree.c index 3e011ee72..a0b0d9c63 100644 --- a/ldap/servers/slapd/mapping_tree.c +++ b/ldap/servers/slapd/mapping_tree.c @@ -3043,6 +3043,10 @@ slapi_mtn_set_state(const Slapi_DN *sdn, char *state) return LDAP_OPERATIONS_ERROR; } + if (NULL == state) { + return LDAP_OPERATIONS_ERROR; + } + if ( (value = slapi_mtn_get_state(sdn)) != NULL ) { if ( strcasecmp(value, state) == 0 )
0
f81e7eac08e2af16fa6b8d245525c4a5ac5eb6f7
389ds/389-ds-base
Improve search for pcre header file Some platforms (RHEL4 for instance) put the pcre header file in a pcre subdirectory under /usr/include. This patch makes configure first search in /usr/include/pcre, then falls back to /usr/include.
commit f81e7eac08e2af16fa6b8d245525c4a5ac5eb6f7 Author: Nathan Kinder <[email protected]> Date: Tue Feb 9 12:21:34 2010 -0800 Improve search for pcre header file Some platforms (RHEL4 for instance) put the pcre header file in a pcre subdirectory under /usr/include. This patch makes configure first search in /usr/include/pcre, then falls back to /usr/include. diff --git a/Makefile.in b/Makefile.in old mode 100644 new mode 100755 diff --git a/aclocal.m4 b/aclocal.m4 old mode 100644 new mode 100755 diff --git a/configure b/configure index e9647c2df..4b6db2d20 100755 --- a/configure +++ b/configure @@ -27061,7 +27061,12 @@ if test "${with_pcre+set}" = set; then if test "$withval" = "yes"; then echo "$as_me:$LINENO: result: yes" >&5 echo "${ECHO_T}yes" >&6 - if test -f "/usr/include/pcre.h"; then + if test -f "/usr/include/pcre/pcre.h"; then + pcre_incdir="/usr/include/pcre" + pcre_inc="-I/usr/include/pcre" + pcre_lib='-L$(libdir)' + pcre_libdir='$(libdir)' + elif test -f "/usr/include/pcre.h"; then pcre_incdir="/usr/include" pcre_inc="-I/usr/include" pcre_lib='-L$(libdir)' diff --git a/ltmain.sh b/ltmain.sh old mode 100644 new mode 100755 diff --git a/m4/pcre.m4 b/m4/pcre.m4 index 6096108d3..0c7569dbf 100644 --- a/m4/pcre.m4 +++ b/m4/pcre.m4 @@ -27,7 +27,12 @@ AC_ARG_WITH(pcre, [ --with-pcre=PATH Perl Compatible Regular Expression direc if test "$withval" = "yes"; then AC_MSG_RESULT(yes) dnl - check in system locations - if test -f "/usr/include/pcre.h"; then + if test -f "/usr/include/pcre/pcre.h"; then + pcre_incdir="/usr/include/pcre" + pcre_inc="-I/usr/include/pcre" + pcre_lib='-L$(libdir)' + pcre_libdir='$(libdir)' + elif test -f "/usr/include/pcre.h"; then pcre_incdir="/usr/include" pcre_inc="-I/usr/include" pcre_lib='-L$(libdir)'
0
e6a9b22b99d7e880edec4f6744f480d873c57af7
389ds/389-ds-base
coverity 12563 Read from pointer after free In dna_be_txn_pre_op(): Reads target of a freed pointer set types_to_generate to NULL after slapi_ch_array_free Reviewed by: nkinder, nhosoi (Thanks!)
commit e6a9b22b99d7e880edec4f6744f480d873c57af7 Author: Rich Megginson <[email protected]> Date: Tue Mar 6 10:29:11 2012 -0700 coverity 12563 Read from pointer after free In dna_be_txn_pre_op(): Reads target of a freed pointer set types_to_generate to NULL after slapi_ch_array_free Reviewed by: nkinder, nhosoi (Thanks!) diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c index 7ace2b1b6..84f2a7ad2 100644 --- a/ldap/servers/plugins/dna/dna.c +++ b/ldap/servers/plugins/dna/dna.c @@ -3137,6 +3137,7 @@ static int dna_pre_op(Slapi_PBlock * pb, int modtype) /* free up */ slapi_ch_free_string(&value); slapi_ch_array_free(types_to_generate); + types_to_generate = NULL; } else if (types_to_generate) { slapi_ch_free((void **)&types_to_generate); }
0
68bc0a4391b77ad717fbdfb93f9eebb25eda5bc1
389ds/389-ds-base
Bug 627993 - Inconsistent storage of password expiry times Commit made in 5727b8899700f574026bc9be5a1990c4c66619cf introduced a bug. The commit removed 2 functions format_genTime and parse_genTime from slapi-private.h. But they are used in plugins, which requires them in the header file. This fix restores the function declarations.
commit 68bc0a4391b77ad717fbdfb93f9eebb25eda5bc1 Author: Noriko Hosoi <[email protected]> Date: Thu Jan 20 10:20:52 2011 -0800 Bug 627993 - Inconsistent storage of password expiry times Commit made in 5727b8899700f574026bc9be5a1990c4c66619cf introduced a bug. The commit removed 2 functions format_genTime and parse_genTime from slapi-private.h. But they are used in plugins, which requires them in the header file. This fix restores the function declarations. diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h index 0873a75d5..3bc0c7414 100644 --- a/ldap/servers/slapd/slapi-private.h +++ b/ldap/servers/slapd/slapi-private.h @@ -1117,8 +1117,10 @@ time_t read_localTime(struct berval* from); time_t parse_localTime(char* from); void write_localTime(time_t from, struct berval* into); time_t current_time( void ); +char* format_genTime(time_t from); void write_genTime(time_t from, struct berval* into); time_t read_genTime(struct berval* from); +time_t parse_genTime(char* from); long parse_duration(char *value); char *gen_duration(long duration);
0
b8826854c583a091b372bf75f02efc01de411f1e
389ds/389-ds-base
Issue 49031 - Improve memberof with a cache of group parents Description: Online import and add of bulk entries with nested groups to compare the performance of memberOf plugin. https://pagure.io/389-ds-base/issue/49031 Reviewed by: spichugi Signed-off-by: Simon Pichugin <[email protected]>
commit b8826854c583a091b372bf75f02efc01de411f1e Author: Sankar Ramalingam <[email protected]> Date: Fri Jul 14 17:43:38 2017 +0530 Issue 49031 - Improve memberof with a cache of group parents Description: Online import and add of bulk entries with nested groups to compare the performance of memberOf plugin. https://pagure.io/389-ds-base/issue/49031 Reviewed by: spichugi Signed-off-by: Simon Pichugin <[email protected]> diff --git a/dirsrvtests/tests/perf/create_data.py b/dirsrvtests/tests/perf/create_data.py new file mode 100755 index 000000000..0d7e385c5 --- /dev/null +++ b/dirsrvtests/tests/perf/create_data.py @@ -0,0 +1,289 @@ +#!/usr/bin/python2 +from __future__ import ( + print_function, + division +) + +import sys +import math + + +class RHDSData(object): + def __init__( + self, + stream=sys.stdout, + users=10000, + groups=100, + grps_puser=20, + nest_level=10, + ngrps_puser=10, + domain="redhat.com", + basedn="dc=example,dc=com", + ): + self.users = users + self.groups = groups + self.basedn = basedn + self.domain = domain + self.stream = stream + + self.grps_puser = grps_puser + self.nest_level = nest_level + self.ngrps_puser = ngrps_puser + + self.user_defaults = { + 'objectClass': [ + 'person', + 'top', + 'inetorgperson', + 'organizationalperson', + 'inetuser', + 'posixaccount'], + 'uidNumber': ['-1'], + 'gidNumber': ['-1'], + } + + self.group_defaults = { + 'objectClass': [ + 'top', + 'inetuser', + 'posixgroup', + 'groupofnames'], + 'gidNumber': [-1], + } + + def put_entry(self, entry): + """ + Abstract method, implementation depends on if we want just print LDIF, + or update LDAP directly + """ + raise NotImplementedError() + + def gen_user(self, uid): + user = dict(self.user_defaults) + user['dn'] = 'uid={uid},ou=people,{suffix}'.format( + uid=uid, + suffix=self.basedn, + ) + user['uid'] = [uid] + user['displayName'] = ['{} {}'.format(uid, uid)] + user['sn'] = [uid] + user['homeDirectory'] = ['/other-home/{}'.format(uid)] + user['mail'] = ['{uid}@{domain}'.format( + uid=uid, domain=self.domain)] + user['givenName'] = [uid] + user['cn'] = ['{} {}'.format(uid, uid)] + + return user + + def username_generator(self, start, stop, step=1): + for i in range(start, stop, step): + yield 'user%s' % i + + def gen_group(self, name, members=(), group_members=()): + group = dict(self.group_defaults) + group['dn'] = 'cn={name},ou=groups,{suffix}'.format( + name=name, + suffix=self.basedn, + ) + group['cn'] = [name] + group['member'] = ['uid={uid},ou=people,{suffix}'.format( + uid=uid, + suffix=self.basedn, + ) for uid in members] + group['member'].extend( + ['cn={name},ou=groups,{suffix}'.format( + name=name, + suffix=self.basedn, + ) for name in group_members]) + return group + + def groupname_generator(self, start, stop, step=1): + for i in range(start, stop, step): + yield 'group%s' % i + + def gen_users_and_groups(self): + self.__gen_entries_with_groups( + self.users, + self.groups, + self.grps_puser, + self.ngrps_puser, + self.nest_level, + self.username_generator, self.gen_user, + self.groupname_generator, self.gen_group + ) + + def __gen_entries_with_groups( + self, + num_of_entries, + num_of_groups, + groups_per_entry, + nested_groups_per_entry, + max_nesting_level, + gen_entry_name_f, gen_entry_f, + gen_group_name_f, gen_group_f + ): + assert num_of_groups % groups_per_entry == 0 + assert num_of_groups >= groups_per_entry + assert groups_per_entry > nested_groups_per_entry + assert max_nesting_level > 0 + assert nested_groups_per_entry > 0 + assert ( + groups_per_entry - nested_groups_per_entry > + int(math.ceil(nested_groups_per_entry / float(max_nesting_level))) + ), ( + "At least {} groups is required to generate proper amount of " + "nested groups".format( + nested_groups_per_entry + + int(math.ceil( + nested_groups_per_entry / float(max_nesting_level)) + ) + ) + ) + + for uid in gen_entry_name_f(0, num_of_entries): + self.put_entry(gen_entry_f(uid)) + + # create N groups per entry, <num_of_nested_groups> of them are nested + # User/Host (max nesting level = 2) + # | + # +--- G1 --- G2 (nested) --- G3 (nested, max level) + # | + # +--- G5 --- G6 (nested) + # | + # ...... + # | + # +--- GN + + # how many members should be added to groups (set of groups_per_entry + # have the same members) + entries_per_group = num_of_entries // (num_of_groups // groups_per_entry) + + # generate groups and put users there + for i in range(num_of_groups // groups_per_entry): + + uids = list(gen_entry_name_f( + i * entries_per_group, + (i + 1) * entries_per_group + )) + + # per user + last_grp_name = None + nest_lvl = 0 + nested_groups_added = 0 + + for group_name in gen_group_name_f( + i * groups_per_entry, + (i + 1) * groups_per_entry, + ): + # create nested groups first + if nested_groups_added < nested_groups_per_entry: + if nest_lvl == 0: + # the top group + self.put_entry( + gen_group_f( + group_name, + members=uids + ) + ) + nest_lvl += 1 + nested_groups_added += 1 + elif nest_lvl == max_nesting_level: + # the last level group this group is not nested + self.put_entry( + gen_group_f( + group_name, + group_members=[last_grp_name], + ) + ) + nest_lvl = 0 + else: + # mid level group + self.put_entry( + gen_group_f( + group_name, + group_members=[last_grp_name] + ) + ) + nested_groups_added += 1 + nest_lvl += 1 + + last_grp_name = group_name + else: + # rest of groups have direct membership + if nest_lvl != 0: + # assign the last nested group if exists + self.put_entry( + gen_group_f( + group_name, + members=uids, + group_members=[last_grp_name], + ) + ) + nest_lvl = 0 + else: + self.put_entry( + gen_group_f( + group_name, + members=uids + ) + ) + + def __generate_entries_with_users_groups( + self, + num_of_entries_direct_members, + num_of_entries_indirect_members, + entries_per_user, + entries_per_group, + gen_entry_name_f, gen_entry_f, + ): + assert num_of_entries_direct_members % entries_per_user == 0 + assert num_of_entries_indirect_members % entries_per_group == 0 + + num_of_entries = num_of_entries_direct_members + num_of_entries_indirect_members + + # direct members + users_per_entry = self.users // (num_of_entries_direct_members // entries_per_user) + + start_user = 0 + stop_user = users_per_entry + for name in gen_entry_name_f(0, num_of_entries_direct_members): + self.put_entry( + gen_entry_f( + name, + user_members=self.username_generator(start_user, stop_user), + ) + ) + start_user = stop_user % self.users + stop_user = start_user + users_per_entry + stop_user = stop_user if stop_user < self.users else self.users + + groups_per_entry = self.groups // (num_of_entries_indirect_members // entries_per_group) + + # indirect members + start_group = 0 + stop_group = groups_per_entry + for name in gen_entry_name_f(num_of_entries_direct_members, num_of_entries): + self.put_entry( + gen_entry_f( + name, + usergroup_members=self.groupname_generator(start_group, stop_group), + ) + ) + start_group = stop_group % self.groups + stop_group = start_group + groups_per_entry + stop_group = stop_group if stop_group < self.groups else self.groups + + def do_magic(self): + self.gen_users_and_groups() + + +class RHDSDataLDIF(RHDSData): + def put_entry(self, entry): + print(file=self.stream) + print("dn:", entry['dn'], file=self.stream) + for k, values in entry.items(): + if k == 'dn': + continue + for v in values: + print("{}: {}".format(k, v), file=self.stream) + print(file=self.stream) diff --git a/dirsrvtests/tests/perf/memberof_test.py b/dirsrvtests/tests/perf/memberof_test.py new file mode 100755 index 000000000..4a460ea35 --- /dev/null +++ b/dirsrvtests/tests/perf/memberof_test.py @@ -0,0 +1,403 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import subprocess +from lib389 import Entry +from lib389.tasks import Tasks +from lib389.dseldif import DSEldif +from create_data import RHDSDataLDIF +from lib389.properties import TASK_WAIT +from lib389.utils import ldap, os, time, logging, ds_is_older +from lib389._constants import SUFFIX, DN_SCHEMA, DN_DM, DEFAULT_SUFFIX, PASSWORD, PLUGIN_MEMBER_OF, \ + PLUGIN_MANAGED_ENTRY, PLUGIN_AUTOMEMBER, DN_CONFIG_LDBM, HOST_STANDALONE, PORT_STANDALONE +from lib389.topologies import topology_st as topo + +MEMOF_PLUGIN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') +MAN_ENTRY_PLUGIN = ('cn=' + PLUGIN_MANAGED_ENTRY + ',cn=plugins,cn=config') +AUTO_MEM_PLUGIN = ('cn=' + PLUGIN_AUTOMEMBER + ',cn=plugins,cn=config') +DOMAIN = 'redhat.com' +LDAP_MOD = '/usr/bin/ldapmodify' +FILTER = 'objectClass=*' +USER_FILTER = '(|(uid=user*)(cn=group*))' +MEMBEROF_ATTR = 'memberOf' +DN_ATTR = 'dn:' + +logging.basicConfig(level=logging.DEBUG) +log = logging.getLogger(__name__) + + [email protected](scope="module") +def memberof_setup(topo, request): + """Configure required plugins and restart the server""" + + log.info('Configuring memberOf, managedEntry and autoMembers plugins and restarting the server') + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + try: + topo.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + except ldap.LDAPError as e: + log.error('Failed to enable {} plugin'.format(PLUGIN_MEMBER_OF)) + raise e + try: + topo.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY) + topo.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) + except ldap.LDAPError as e: + log.error('Failed to enable {}, {} plugins'.format(PLUGIN_MANAGED_ENTRY, PLUGIN_AUTOMEMBER)) + raise e + + log.info('Change config values for db-locks and dbcachesize to import large ldif files') + if ds_is_older('1.3.6'): + topo.standalone.stop(timeout=10) + dse_ldif = DSEldif(topo.standalone) + try: + dse_ldif.replace(DN_CONFIG_LDBM, 'nsslapd-db-locks', '100000') + dse_ldif.replace(DN_CONFIG_LDBM, 'nsslapd-dbcachesize', '10000000') + except: + log.error('Failed to replace cn=config values of db-locks and dbcachesize') + raise + topo.standalone.start(timeout=10) + else: + try: + topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-db-locks', '100000')]) + topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-cache-autosize', '0')]) + topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-dbcachesize', '10000000')]) + except ldap.LDAPError as e: + log.error( + 'Failed to replace values of nsslapd-db-locks and nsslapd-dbcachesize {}'.format(e.message['desc'])) + raise e + topo.standalone.restart(timeout=10) + + def fin(): + log.info('Disabling plugins {}, {}, {}'.format(PLUGIN_MEMBER_OF, PLUGIN_MANAGED_ENTRY, PLUGIN_AUTOMEMBER)) + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + try: + topo.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + topo.standalone.plugins.disable(name=PLUGIN_MANAGED_ENTRY) + topo.standalone.plugins.disable(name=PLUGIN_AUTOMEMBER) + except ldap.LDAPError as e: + log.error('Failed to disable plugins, {}'.format(e.message['desc'])) + assert False + topo.standalone.restart(timeout=10) + + request.addfinalizer(fin) + + +def _create_base_ldif(topo, import_base=False): + """Create base ldif file to clean entries from suffix""" + + log.info('Add base entry for online import') + ldif_dir = topo.standalone.get_ldif_dir() + ldif_file = os.path.join(ldif_dir, '/perf.ldif') + log.info('LDIF FILE is this: {}'.format(ldif_file)) + base_ldif = """dn: dc=example,dc=com +objectclass: top +objectclass: domain +dc: example + +dn: ou=people,dc=example,dc=com +objectclass: top +objectclass: organizationalUnit +ou: people + +dn: ou=groups,dc=example,dc=com +objectclass: top +objectclass: organizationalUnit +ou: groups +""" + with open(ldif_file, "w") as fd: + fd.write(base_ldif) + if import_base: + log.info('Adding base entry to suffix to remove users/groups and leave only the OUs') + try: + topo.standalone.tasks.importLDIF(suffix=SUFFIX, input_file=ldif_file, args={TASK_WAIT: True}) + except ValueError as e: + log.error('Online import failed' + e.message('desc')) + assert False + else: + log.info('Return LDIF file') + return ldif_file + + +def _run_fixup_memberof(topo): + """Run fixup memberOf task and measure the time taken""" + + log.info('Running fixup memberOf task and measuring the time taken') + start = time.time() + try: + topo.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: True}) + except ValueError as e: + log.error('Running fixup MemberOf task failed' + e.message('desc')) + assert False + end = time.time() + cmd_time = int(end - start) + return cmd_time + + +def _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, is_import=False): + """Create LDIF files for given nof users, groups and nested group levels""" + + log.info('Checking if the operation is Import or Ldapadd') + if is_import: + log.info('Import: Create base entry before adding users and groups') + exp_entries = nof_users + nof_groups + data_ldif = _create_base_ldif(topo, False) + log.info('Create data LDIF file by appending users, groups and nested groups') + with open(data_ldif, 'a') as file1: + data = RHDSDataLDIF(stream=file1, users=nof_users, groups=nof_groups, grps_puser=grps_user, + nest_level=nof_depth, ngrps_puser=ngrps_user, basedn=SUFFIX) + data.do_magic() + start = time.time() + log.info('Run importLDIF task to add entries to Server') + try: + topo.standalone.tasks.importLDIF(suffix=SUFFIX, input_file=data_ldif, args={TASK_WAIT: True}) + except ValueError as e: + log.error('Online import failed' + e.message('desc')) + assert False + end = time.time() + time_import = int(end - start) + + log.info('Check if number of entries created matches the expected entries') + users_groups = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, USER_FILTER, [DN_ATTR]) + act_entries = str(users_groups).count(DN_ATTR) + log.info('Expected entries: {}, Actual entries: {}'.format(exp_entries, act_entries)) + assert act_entries == exp_entries + return time_import + else: + log.info('Ldapadd: Create data LDIF file with users, groups and nested groups') + ldif_dir = topo.standalone.get_ldif_dir() + data_ldif = os.path.join(ldif_dir, '/perf_add.ldif') + with open(data_ldif, 'w') as file1: + data = RHDSDataLDIF(stream=file1, users=nof_users, groups=nof_groups, grps_puser=grps_user, + nest_level=nof_depth, ngrps_puser=ngrps_user, basedn=SUFFIX) + data.do_magic() + start = time.time() + log.info('Run LDAPMODIFY to add entries to Server') + try: + subprocess.check_output( + [LDAP_MOD, '-cx', '-D', DN_DM, '-w', PASSWORD, '-h', HOST_STANDALONE, '-p', str(PORT_STANDALONE), '-af', + data_ldif]) + except subprocess.CalledProcessError as e: + log.error('LDAPMODIFY failed to add entries, error:{:s}'.format(str(e))) + raise e + end = time.time() + cmd_time = int(end - start) + log.info('Time taken to complete LDAPADD: {} secs'.format(cmd_time)) + return cmd_time + + +def _sync_memberof_attrs(topo, exp_memberof): + """Check if expected entries are created or attributes are synced""" + + log.info('_sync_memberof_attrs: Check if expected memberOf attributes are synced/created') + loop = 0 + start = time.time() + entries = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, FILTER, [MEMBEROF_ATTR]) + act_memberof = str(entries).count(MEMBEROF_ATTR) + end = time.time() + cmd_time = int(end - start) + log.info('Loop-{}, expected memberOf attrs: {}, synced: {}, time for search-{} secs'.format(loop, exp_memberof, + act_memberof, cmd_time)) + while act_memberof != exp_memberof: + loop = loop + 1 + time.sleep(30) + start = time.time() + entries = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, FILTER, [MEMBEROF_ATTR]) + act_memberof = str(entries).count(MEMBEROF_ATTR) + end = time.time() + cmd_time = cmd_time + int(end - start) + log.info('Loop-{}, expected memberOf attrs: {}, synced: {}, time for search-{} secs'.format(loop, exp_memberof, + act_memberof, + cmd_time)) + # Worst case scenario, exit the test after 10hrs of wait + if loop > 1200: + log.error('Either syncing memberOf attrs takes too long or some issue with the test itself') + assert False + sync_time = 1 + loop * 30 + log.info('Expected memberOf attrs: {}, Actual memberOf attrs: {}'.format(exp_memberof, act_memberof)) + assert act_memberof == exp_memberof + return sync_time + + [email protected]("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", + [(20000, 200, 20, 10, 5), (50000, 500, 50, 10, 10), (100000, 1000, 100, 20, 20)]) +def test_nestgrps_import(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): + """Import large users and nested groups with N depth and measure the time taken + + :ID: 169a09f2-2c2d-4e42-8b90-a0bd1034f278 + :feature: MemberOf Plugin + :setup: Standalone instance, memberOf plugin enabled + :steps: 1. Create LDIF file for given nof_users and nof_groups + 2. Import entries to server + 3. Check if entries are created + 4. Run fixupMemberOf task to create memberOf attributes + 5. Check if memberOf attributes are synced for all users and groups + 6. Compare the actual no of memberOf attributes to the expected + 7. Measure the time taken to sync memberOf attributes + :expectedresults: MemberOf attributes should be synced + """ + + exp_memberof = (nof_users * grps_user) + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) + log.info('Create nested ldif file with users-{}, groups-{}, nested-{}'.format(nof_users, nof_groups, nof_depth)) + log.info('Import LDIF file and measure the time taken') + import_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, True) + + log.info('Run fixup memberOf task and measure the time taken to complete the task') + fixup_time = _run_fixup_memberof(topo) + + log.info('Check the total number of memberOf entries created for users and groups') + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + + total_time = import_time + fixup_time + sync_memberof + log.info('Time for import-{}secs, fixup task-{}secs, total time for memberOf sync: {}secs'.format(import_time, + fixup_time, + total_time)) + + [email protected]("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", + [(20000, 100, 20, 10, 5), (50000, 200, 50, 10, 10), (100000, 100, 20, 10, 10)]) +def test_nestgrps_add(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): + """Import large users and nested groups with n depth and measure the time taken + + :ID: 6eda75c6-5ae0-4b17-b610-d217d7ec7542 + :feature: MemberOf Plugin + :setup: Standalone instance, memberOf plugin enabled + :steps: 1. Create LDIF file for given nof_users and nof_groups + 2. Add entries using LDAPADD + 3. Check if entries are created + 4. Check if memberOf attributes are synced for all users and groups + 5. Compare the actual no of memberOf attributes to the expected + 6. Measure the time taken to sync memberOf attributes + :expectedresults: MemberOf attributes should be created and synced + """ + + exp_memberof = (nof_users * grps_user) + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) + log.info('Creating base_ldif file and importing it to wipe out all users and groups') + _create_base_ldif(topo, True) + log.info('Create nested ldif file with users-{}, groups-{}, nested-{}'.format(nof_users, nof_groups, nof_depth)) + log.info('Run LDAPADD to add entries to Server') + add_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, False) + + log.info('Check the total number of memberOf entries created for users and groups') + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = add_time + sync_memberof + log.info('Time for ldapadd-{}secs, total time for memberOf sync: {}secs'.format(add_time, total_time)) + + [email protected]("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", + [(20000, 200, 20, 10, 5), (50000, 500, 50, 10, 10), (100000, 1000, 100, 20, 20)]) +def test_mod_nestgrp(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): + """Import bulk entries, modify nested groups at N depth and measure the time taken + + :ID: 4bf8e753-6ded-4177-8225-aaf6aef4d131 + :feature: MemberOf Plugin + :setup: Standalone instance, memberOf plugin enabled + :steps: 1. Import bulk entries with nested group and create memberOf attributes + 2. Modify nested groups by adding new members at each nested level + 3. Check new memberOf attributes created for users and groups + 4. Compare the actual memberOf attributes with the expected + 5. Measure the time taken to sync memberOf attributes + :expectedresults: MemberOf attributes should be modified and synced + """ + + exp_memberof = (nof_users * grps_user) + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) + log.info('Create nested ldif file, import it and measure the time taken') + import_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, True) + log.info('Run fixup memberOf task and measure the time to complete the task') + fixup_time = _run_fixup_memberof(topo) + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = import_time + fixup_time + sync_memberof + log.info('Time for import-{}secs, fixup task-{}secs, total time for memberOf sync: {}secs'.format(import_time, + fixup_time, + total_time)) + + log.info('Add {} users to existing nested groups at all depth level'.format(nof_groups)) + log.info('Add one user to each groups at different nest levels') + start = time.time() + for usr in range(nof_groups): + usrrdn = 'newcliusr{}'.format(usr) + userdn = 'uid={},ou=people,{}'.format(usrrdn, SUFFIX) + groupdn = 'cn=group{},ou=groups,{}'.format(usr, SUFFIX) + try: + topo.standalone.add_s(Entry((userdn, { + 'objectclass': 'top person inetUser inetOrgperson'.split(), + 'cn': usrrdn, + 'sn': usrrdn, + 'userpassword': 'Secret123'}))) + except ldap.LDAPError as e: + log.error('Failed to add {} user: error {}'.format(userdn, e.message['desc'])) + raise + try: + topo.standalone.modify_s(groupdn, [(ldap.MOD_ADD, 'member', userdn)]) + except ldap.LDAPError as e: + log.error('Error-{}: Failed to add user to group'.format(e.message['desc'])) + assert False + end = time.time() + cmd_time = int(end - start) + + exp_memberof = (nof_users * grps_user) + nof_groups + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1))) + log.info('Check the total number of memberOf entries created for users and groups') + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = cmd_time + sync_memberof + log.info('Time taken add new members to existing nested groups + memberOf sync: {} secs'.format(total_time)) + + [email protected]("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", + [(20000, 200, 20, 10, 5), (50000, 500, 50, 10, 10), (100000, 1000, 100, 20, 20)]) +def test_del_nestgrp(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): + """Import bulk entries, delete nested groups at N depth and measure the time taken + + :ID: d3d82ac5-d968-4cd6-a268-d380fc9fd51b + :feature: MemberOf Plugin + :setup: Standalone instance, memberOf plugin enabled + :steps: 1. Import bulk users and groups with nested level N. + 2. Run fixup memberOf task to create memberOf attributes + 3. Delete nested groups at nested level N + 4. Check memberOf attributes deleted for users and groups + 5. Compare the actual memberOf attributes with the expected + 6. Measure the time taken to sync memberOf attributes + :expectedresults: MemberOf attributes should be deleted and synced + """ + + exp_memberof = (nof_users * grps_user) + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) + log.info('Create nested ldif file, import it and measure the time taken') + import_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, True) + log.info('Run fixup memberOf task and measure the time to complete the task') + fixup_time = _run_fixup_memberof(topo) + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = import_time + fixup_time + sync_memberof + log.info('Time taken to complete add users + memberOf sync: {} secs'.format(total_time)) + + log.info('Delete {} groups from nested groups at depth level-{}'.format(nof_depth, nof_depth)) + start = time.time() + for nos in range(nof_depth, nof_groups, grps_user): + groupdn = 'cn=group{},ou=groups,{}'.format(nos, SUFFIX) + try: + topo.standalone.delete_s(groupdn) + except ldap.LDAPError as e: + log.error('Error-{}: Failed to delete group'.format(e.message['desc'])) + assert False + end = time.time() + cmd_time = int(end - start) + + exp_memberof = exp_memberof - (nof_users + (nof_depth * (nof_groups // grps_user))) + log.info('Check memberOf attributes after deleting groups at depth-{}'.format(nof_depth)) + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = cmd_time + sync_memberof + log.info('Time taken to delete and sync memberOf attributes: {}secs'.format(total_time)) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s {}".format(CURRENT_FILE))
0
fba364304b7fd3517e1d26556569485846e4ef67
389ds/389-ds-base
Issue 6680 - instance read-only mode is broken (#6681) Read only mode is broken because some plugins fails to starts as they are not able to create/updates some entries in the dse backend. Solution is to allow interrnal operations to write in dse.backend but not modify the dse.ldif (except for the special case when trying to modify nsslapd-readonly flags (to be allowed to set/unset the readonly mode) Issue: #6680 Reviewed by: @droideck, @tbordaz (thanks!)
commit fba364304b7fd3517e1d26556569485846e4ef67 Author: progier389 <[email protected]> Date: Mon Mar 24 10:43:21 2025 +0100 Issue 6680 - instance read-only mode is broken (#6681) Read only mode is broken because some plugins fails to starts as they are not able to create/updates some entries in the dse backend. Solution is to allow interrnal operations to write in dse.backend but not modify the dse.ldif (except for the special case when trying to modify nsslapd-readonly flags (to be allowed to set/unset the readonly mode) Issue: #6680 Reviewed by: @droideck, @tbordaz (thanks!) diff --git a/dirsrvtests/tests/suites/config/regression_test.py b/dirsrvtests/tests/suites/config/regression_test.py index 0a9f1f641..d1bf7356d 100644 --- a/dirsrvtests/tests/suites/config/regression_test.py +++ b/dirsrvtests/tests/suites/config/regression_test.py @@ -28,6 +28,8 @@ CUSTOM_MEM = '9100100100' IDLETIMEOUT = 5 DN_TEST_USER = f'uid={TEST_USER_PROPERTIES["uid"]},ou=People,{DEFAULT_SUFFIX}' +RO_ATTR = 'nsslapd-readonly' + @pytest.fixture(scope="module") def idletimeout_topo(topo, request): @@ -188,3 +190,61 @@ def test_idletimeout(idletimeout_topo, dn, expected_result): except ldap.SERVER_DOWN: result = True assert expected_result == result + + +def test_instance_readonly_mode(topo): + """Check that readonly mode is supported + + :id: 34d2e28e-04d7-11f0-b0cf-482ae39447e5 + :setup: Standalone Instance + :steps: + 1. Set readonly mode + 2. Stop the instance + 3. Get dse.ldif modification time + 4. Start the instance + 5. Get dse.ldif modification time + 6. Check that modification time has not changed + 7. Check that readonly mode is set + 8. Try to modify another config attribute + 9. Unset readonly mode + 10. Restart the instance + 11. Check that modification time has not changed + 12. Check that modification time has changed + 13. Check that readonly mode is unset + 14. Try to modify another config attribute + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Should get ldap.UNWILLING_TO_PERFORM exception + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + """ + + inst = topo.standalone + dse_path = f'{topo.standalone.get_config_dir()}/dse.ldif' + inst.config.replace(RO_ATTR, 'on') + inst.stop() + dse_mtime = os.stat(dse_path).st_mtime + inst.start() + new_dse_mtime = os.stat(dse_path).st_mtime + assert dse_mtime == new_dse_mtime + assert inst.config.get_attr_val_utf8(RO_ATTR) == "on" + attr = 'nsslapd-errorlog-maxlogsize' + val = inst.config.get_attr_val_utf8(attr) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + inst.config.replace(attr, val) + inst.config.replace(RO_ATTR, 'off') + inst.restart() + new_dse_mtime = os.stat(dse_path).st_mtime + assert dse_mtime != new_dse_mtime + assert inst.config.get_attr_val_utf8(RO_ATTR) == "off" + inst.config.replace(attr, val) diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c index 100c4d0d0..b788054db 100644 --- a/ldap/servers/slapd/dse.c +++ b/ldap/servers/slapd/dse.c @@ -1031,6 +1031,114 @@ dse_check_for_readonly_error(Slapi_PBlock *pb, struct dse *pdse) return rc; /* no error */ } +/* Trivial wrapper around slapi_re_comp to handle errors */ +static Slapi_Regex * +recomp(const char *regexp) +{ + char *error = ""; + Slapi_Regex *re = slapi_re_comp(regexp, &error); + if (re == NULL) { + slapi_log_err(SLAPI_LOG_ERR, "is_readonly_set_in_dse", + "Failed to compile '%s' regular expression. Error is %s\n", + regexp, error); + } + slapi_ch_free_string(&error); + return re; +} + +/* + * Check if "nsslapd-readonly: on" is in cn-config in dse.ldif file + * ( If the flag is set in memory but on in the file, the file should + * be written (to let dsconf able to modify the nsslapd-readonly flag) + */ +static bool +is_readonly_set_in_dse(const char *dsename) +{ + Slapi_Regex *re_config = recomp("^dn:\\s+cn=config\\s*$"); + Slapi_Regex *re_isro = recomp("^" CONFIG_READONLY_ATTRIBUTE ":\\s+on\\s*$"); + Slapi_Regex *re_eoe = recomp("^$"); + bool isconfigentry = false; + bool isro = false; + FILE *fdse = NULL; + char line[128]; + char *error = NULL; + const char *regexp = ""; + + if (!dsename) { + goto done; + } + if (re_config == NULL || re_isro == NULL || re_eoe == NULL) { + goto done; + } + fdse = fopen(dsename, "r"); + if (fdse == NULL) { + /* No dse file, we need to write it */ + goto done; + } + while (fgets(line, (sizeof line), fdse)) { + /* Convert the read line to lowercase */ + for (char *pt=line; *pt; pt++) { + if (isalpha(*pt)) { + *pt = tolower(*pt); + } + } + if (slapi_re_exec_nt(re_config, line)) { + isconfigentry = true; + } + if (slapi_re_exec_nt(re_eoe, line)) { + if (isconfigentry) { + /* End of config entry ==> readonly flag is not set */ + break; + } + } + if (isconfigentry && slapi_re_exec_nt(re_isro, line)) { + /* Found readonly flag */ + isro = true; + break; + } + } +done: + if (fdse) { + (void) fclose(fdse); + } + slapi_re_free(re_config); + slapi_re_free(re_isro); + slapi_re_free(re_eoe); + return isro; +} + +/* + * Check if dse.ldif can be written + * Beware that even in read-only mode dse.ldif file + * should still be written to change the nsslapd-readonly value + */ +static bool +check_if_readonly(struct dse *pdse) +{ + static bool ro = false; + + if (pdse->dse_filename == NULL) { + return false; + } + if (!slapi_config_get_readonly()) { + ro = false; + return ro; + } + if (ro) { + /* read-only mode and dse is up to date ==> Do not modify it. */ + return ro; + } + /* First attempt to write the dse.ldif since readonly mode is enabled. + * Lets check if "nsslapd-readonly: on" is in cn=config entry + * and allow to write the dse.ldif if it is the case + */ + if (is_readonly_set_in_dse(pdse->dse_filename)) { + /* read-only mode and dse is up to date ==> Do not modify it. */ + ro = true; + } + /* Read only mode but nsslapd-readonly value is not up to date. */ + return ro; +} /* * Write the AVL tree of entries back to the LDIF file. @@ -1041,7 +1149,7 @@ dse_write_file_nolock(struct dse *pdse) FPWrapper fpw; int rc = 0; - if (dont_ever_write_dse_files) { + if (dont_ever_write_dse_files || check_if_readonly(pdse)) { return rc; } diff --git a/ldap/servers/slapd/mapping_tree.c b/ldap/servers/slapd/mapping_tree.c index 159d3972c..5382207ab 100644 --- a/ldap/servers/slapd/mapping_tree.c +++ b/ldap/servers/slapd/mapping_tree.c @@ -2058,6 +2058,82 @@ slapi_dn_write_needs_referral(Slapi_DN *target_sdn, Slapi_Entry **referral) done: return ret; } + +/* + * This function dermines if an operation should be rejected + * when readonly mode is enabled. + * All operations are rejected except: + * - if they target a private backend that is not the DSE backend + * - if they are read operations (SEARCH, COMPARE, BIND, UNBIND) + * - if they are tombstone fixup operation (i.e: tombstone purging) + * - if they are internal operation that targets the DSE backend. + * (change will then be done in memory but not written in dse.ldif) + * - single modify modify operation on cn=config changing nsslapd-readonly + * (to allow "dsconf instance config replace nsslapd-readonly=xxx", + change will then be done both in memory and in dse.ldif) + */ +static bool +is_rejected_op(Slapi_Operation *op, Slapi_Backend *be) +{ + const char *betype = slapi_be_gettype(be); + unsigned long be_op_type = operation_get_type(op); + int isdse = (betype && strcmp(betype, "DSE") == 0); + + /* Private backend operations are not rejected */ + + /* Read operations are not rejected */ + if ((be_op_type == SLAPI_OPERATION_SEARCH) || + (be_op_type == SLAPI_OPERATION_COMPARE) || + (be_op_type == SLAPI_OPERATION_BIND) || + (be_op_type == SLAPI_OPERATION_UNBIND)) { + return false; + } + + /* Tombstone fixup are not rejected. */ + if (operation_is_flag_set(op, OP_FLAG_TOMBSTONE_FIXUP)) { + return false; + } + + if (!isdse) { + /* write operation on readonly backends are rejected */ + if (be->be_readonly) { + return true; + } + + /* private backends (DSE excepted) are not backed on files + * so write operations are accepted. + * but other operations (not on DSE) are rejected. + */ + if (slapi_be_private(be)) { + return false; + } else { + return true; + } + } + + /* Allowed operations in dse backend are: + * - the internal operations and + * - modify of nsslapd-readonly flag in cn=config + */ + + if (operation_is_flag_set(op, OP_FLAG_INTERNAL)) { + return false; + } + if (be_op_type == SLAPI_OPERATION_MODIFY) { + Slapi_DN *sdn = operation_get_target_spec(op); + Slapi_DN config = {0}; + LDAPMod **mods = op->o_params.p.p_modify.modify_mods; + slapi_sdn_init_ndn_byref(&config, SLAPD_CONFIG_DN); + if (mods && mods[0] && !mods[1] && + slapi_sdn_compare(sdn, &config) == 0 && + strcasecmp(mods[0]->mod_type, CONFIG_READONLY_ATTRIBUTE) == 0) { + /* Single modifier impacting nsslapd-readonly */ + return false; + } + } + return true; +} + /* * Description: * The reason we have a mapping tree. This function selects a backend or @@ -2095,7 +2171,6 @@ slapi_mapping_tree_select(Slapi_PBlock *pb, Slapi_Backend **be, Slapi_Entry **re int ret; int scope = LDAP_SCOPE_BASE; int op_type; - int fixup = 0; if (slapi_atomic_load_32(&mapping_tree_freed, __ATOMIC_RELAXED)) { /* shutdown detected */ @@ -2112,7 +2187,6 @@ slapi_mapping_tree_select(Slapi_PBlock *pb, Slapi_Backend **be, Slapi_Entry **re /* Get the target for this op */ target_sdn = operation_get_target_spec(op); - fixup = operation_is_flag_set(op, OP_FLAG_TOMBSTONE_FIXUP); PR_ASSERT(mapping_tree_inited == 1); @@ -2161,22 +2235,14 @@ slapi_mapping_tree_select(Slapi_PBlock *pb, Slapi_Backend **be, Slapi_Entry **re * or if the whole server is readonly AND backend is public (!private) */ if ((ret == LDAP_SUCCESS) && *be && !be_isdeleted(*be) && - (((*be)->be_readonly && !fixup) || - ((slapi_config_get_readonly() && !fixup) && - !slapi_be_private(*be)))) { - unsigned long be_op_type = operation_get_type(op); - - if ((be_op_type != SLAPI_OPERATION_SEARCH) && - (be_op_type != SLAPI_OPERATION_COMPARE) && - (be_op_type != SLAPI_OPERATION_BIND) && - (be_op_type != SLAPI_OPERATION_UNBIND)) { + ((*be)->be_readonly || slapi_config_get_readonly()) && + is_rejected_op(op, *be)) { if (errorbuf) { PL_strncpyz(errorbuf, slapi_config_get_readonly() ? "Server is read-only" : "database is read-only", ebuflen); } ret = LDAP_UNWILLING_TO_PERFORM; slapi_be_Unlock(*be); *be = NULL; - } } return ret;
0
af66c7b6239e689a5f5850d1d7deb3745fc966c2
389ds/389-ds-base
Issue 6641 - modrdn fails when a user is member of multiple groups (#6643) Bug description: Rename of a user that is member of multiple AM groups fail when MO and RI plugins are enabled. Fix description: MO plugin - After updating the entry member attribute, check the return value. Retry the delete if the attr value exists and retry the add if the attr value is missing. RI plugin - A previous commit checked if the attr value was not present before adding a mod. This commit was reverted in favour of overriding the internal op return value, consistent with other plugins. CI test from Viktor Ashirov <[email protected]> Fixes: https://github.com/389ds/389-ds-base/issues/6641 Relates: https://github.com/389ds/389-ds-base/issues/6566 Reviewed by: @progier389, @tbordaz, @vashirov (Thank you)
commit af66c7b6239e689a5f5850d1d7deb3745fc966c2 Author: James Chapman <[email protected]> Date: Fri May 30 11:12:43 2025 +0000 Issue 6641 - modrdn fails when a user is member of multiple groups (#6643) Bug description: Rename of a user that is member of multiple AM groups fail when MO and RI plugins are enabled. Fix description: MO plugin - After updating the entry member attribute, check the return value. Retry the delete if the attr value exists and retry the add if the attr value is missing. RI plugin - A previous commit checked if the attr value was not present before adding a mod. This commit was reverted in favour of overriding the internal op return value, consistent with other plugins. CI test from Viktor Ashirov <[email protected]> Fixes: https://github.com/389ds/389-ds-base/issues/6641 Relates: https://github.com/389ds/389-ds-base/issues/6566 Reviewed by: @progier389, @tbordaz, @vashirov (Thank you) diff --git a/dirsrvtests/tests/suites/plugins/modrdn_test.py b/dirsrvtests/tests/suites/plugins/modrdn_test.py new file mode 100644 index 000000000..be79b0c3c --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/modrdn_test.py @@ -0,0 +1,174 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2025 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.topologies import topology_st +from lib389._constants import DEFAULT_SUFFIX +from lib389.idm.group import Groups +from lib389.idm.user import nsUserAccounts +from lib389.plugins import ( + AutoMembershipDefinitions, + AutoMembershipPlugin, + AutoMembershipRegexRules, + MemberOfPlugin, + ReferentialIntegrityPlugin, +) + +pytestmark = pytest.mark.tier1 + +USER_PROPERTIES = { + "uid": "userwith", + "cn": "userwith", + "uidNumber": "1000", + "gidNumber": "2000", + "homeDirectory": "/home/testuser", + "displayName": "test user", +} + + +def test_modrdn_of_a_member_of_2_automember_groups(topology_st): + """Test that a member of 2 automember groups can be renamed + + :id: 0e40bdc4-a2d2-4bb8-8368-e02c8920bad2 + + :setup: Standalone instance + + :steps: + 1. Enable automember plugin + 2. Create definiton for users with A in the name + 3. Create regex rule for users with A in the name + 4. Create definiton for users with Z in the name + 5. Create regex rule for users with Z in the name + 6. Enable memberof plugin + 7. Enable referential integrity plugin + 8. Restart the instance + 9. Create groups + 10. Create users userwitha, userwithz, userwithaz + 11. Rename userwithaz + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + """ + inst = topology_st.standalone + + # Enable automember plugin + automember_plugin = AutoMembershipPlugin(inst) + automember_plugin.enable() + + # Create definiton for users with A in the name + automembers = AutoMembershipDefinitions(inst) + automember = automembers.create( + properties={ + "cn": "userswithA", + "autoMemberScope": DEFAULT_SUFFIX, + "autoMemberFilter": "objectclass=posixAccount", + "autoMemberGroupingAttr": "member:dn", + } + ) + + # Create regex rule for users with A in the name + automembers_regex_rule = AutoMembershipRegexRules(inst, f"{automember.dn}") + automembers_regex_rule.create( + properties={ + "cn": "userswithA", + "autoMemberInclusiveRegex": ["cn=.*a.*"], + "autoMemberTargetGroup": [f"cn=userswithA,ou=Groups,{DEFAULT_SUFFIX}"], + } + ) + + # Create definiton for users with Z in the name + automember = automembers.create( + properties={ + "cn": "userswithZ", + "autoMemberScope": DEFAULT_SUFFIX, + "autoMemberFilter": "objectclass=posixAccount", + "autoMemberGroupingAttr": "member:dn", + } + ) + + # Create regex rule for users with Z in the name + automembers_regex_rule = AutoMembershipRegexRules(inst, f"{automember.dn}") + automembers_regex_rule.create( + properties={ + "cn": "userswithZ", + "autoMemberInclusiveRegex": ["cn=.*z.*"], + "autoMemberTargetGroup": [f"cn=userswithZ,ou=Groups,{DEFAULT_SUFFIX}"], + } + ) + + # Enable memberof plugin + memberof_plugin = MemberOfPlugin(inst) + memberof_plugin.enable() + + # Enable referential integrity plugin + referint_plugin = ReferentialIntegrityPlugin(inst) + referint_plugin.enable() + + # Restart the instance + inst.restart() + + # Create groups + groups = Groups(inst, DEFAULT_SUFFIX) + groupA = groups.create(properties={"cn": "userswithA"}) + groupZ = groups.create(properties={"cn": "userswithZ"}) + + # Create users + users = nsUserAccounts(inst, DEFAULT_SUFFIX) + + # userwitha + user_props = USER_PROPERTIES.copy() + user_props.update( + { + "uid": USER_PROPERTIES["uid"] + "a", + "cn": USER_PROPERTIES["cn"] + "a", + } + ) + user = users.create(properties=user_props) + + # userwithz + user_props.update( + { + "uid": USER_PROPERTIES["uid"] + "z", + "cn": USER_PROPERTIES["cn"] + "z", + } + ) + user = users.create(properties=user_props) + + # userwithaz + user_props.update( + { + "uid": USER_PROPERTIES["uid"] + "az", + "cn": USER_PROPERTIES["cn"] + "az", + } + ) + user = users.create(properties=user_props) + user_orig_dn = user.dn + + # Rename userwithaz + user.rename(new_rdn="uid=userwith") + user_new_dn = user.dn + + assert user.get_attr_val_utf8("uid") != "userwithaz" + + # Check groups contain renamed username + assert groupA.is_member(user_new_dn) + assert groupZ.is_member(user_new_dn) + + # Check groups dont contain original username + assert not groupA.is_member(user_orig_dn) + assert not groupZ.is_member(user_orig_dn) diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c index 38f817e5d..f900db7f2 100644 --- a/ldap/servers/plugins/automember/automember.c +++ b/ldap/servers/plugins/automember/automember.c @@ -1755,13 +1755,12 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char } mod_pb = slapi_pblock_new(); - slapi_modify_internal_set_pb(mod_pb, group_dn, - mods, 0, 0, automember_get_plugin_id(), 0); - slapi_modify_internal_pb(mod_pb); - slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &result); + /* Do a single mod with error overrides for DEL/ADD */ + result = slapi_single_modify_internal_override(mod_pb, slapi_sdn_new_dn_byval(group_dn), mods, + automember_get_plugin_id(), 0); if(add){ - if ((result != LDAP_SUCCESS) && (result != LDAP_TYPE_OR_VALUE_EXISTS)) { + if (result != LDAP_SUCCESS) { slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM, "automember_update_member_value - Unable to add \"%s\" as " "a \"%s\" value to group \"%s\" (%s).\n", @@ -1771,7 +1770,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char } } else { /* delete value */ - if ((result != LDAP_SUCCESS) && (result != LDAP_NO_SUCH_ATTRIBUTE)) { + if (result != LDAP_SUCCESS) { slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM, "automember_update_member_value - Unable to delete \"%s\" as " "a \"%s\" value from group \"%s\" (%s).\n", diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c index 45610b289..073d8d938 100644 --- a/ldap/servers/plugins/memberof/memberof.c +++ b/ldap/servers/plugins/memberof/memberof.c @@ -963,7 +963,6 @@ modify_need_fixup(int set) mod_pb, memberof_get_config_area(), mods, 0, 0, memberof_get_plugin_id(), SLAPI_OP_FLAG_FIXUP|SLAPI_OP_FLAG_BYPASS_REFERRALS); - slapi_modify_internal_pb(mod_pb); slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc); slapi_pblock_destroy(mod_pb); @@ -1491,18 +1490,9 @@ memberof_del_dn_type_callback(Slapi_Entry *e, void *callback_data) mod.mod_op = LDAP_MOD_DELETE; mod.mod_type = ((memberof_del_dn_data *)callback_data)->type; mod.mod_values = val; - - slapi_modify_internal_set_pb_ext( - mod_pb, slapi_entry_get_sdn(e), - mods, 0, 0, - memberof_get_plugin_id(), SLAPI_OP_FLAG_BYPASS_REFERRALS); - - slapi_modify_internal_pb(mod_pb); - - slapi_pblock_get(mod_pb, - SLAPI_PLUGIN_INTOP_RESULT, - &rc); - + /* Internal mod with error overrides for DEL/ADD */ + rc = slapi_single_modify_internal_override(mod_pb, slapi_entry_get_sdn(e), mods, + memberof_get_plugin_id(), SLAPI_OP_FLAG_BYPASS_REFERRALS); slapi_pblock_destroy(mod_pb); if (rc == LDAP_NO_SUCH_ATTRIBUTE && val[0] == NULL) { @@ -1975,6 +1965,7 @@ memberof_replace_dn_type_callback(Slapi_Entry *e, void *callback_data) return rc; } + LDAPMod ** my_copy_mods(LDAPMod **orig_mods) { @@ -2783,33 +2774,6 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_o replace_mod.mod_values = replace_val; } rc = memberof_add_memberof_attr(mods, op_to, config->auto_add_oc); - if (rc == LDAP_NO_SUCH_ATTRIBUTE || rc == LDAP_TYPE_OR_VALUE_EXISTS) { - if (rc == LDAP_TYPE_OR_VALUE_EXISTS) { - /* - * For some reason the new modrdn value is present, so retry - * the delete by itself and ignore the add op by tweaking - * the mod array. - */ - mods[1] = NULL; - rc = memberof_add_memberof_attr(mods, op_to, config->auto_add_oc); - } else { - /* - * The memberof value to be replaced does not exist so just - * add the new value. Shuffle the mod array to apply only - * the add operation. - */ - mods[0] = mods[1]; - mods[1] = NULL; - rc = memberof_add_memberof_attr(mods, op_to, config->auto_add_oc); - if (rc == LDAP_TYPE_OR_VALUE_EXISTS) { - /* - * The entry already has the expected memberOf value, no - * problem just return success. - */ - rc = LDAP_SUCCESS; - } - } - } } } @@ -4470,43 +4434,57 @@ memberof_add_memberof_attr(LDAPMod **mods, const char *dn, char *add_oc) Slapi_PBlock *mod_pb = NULL; int added_oc = 0; int rc = 0; + LDAPMod *single_mod[2]; - while (1) { - mod_pb = slapi_pblock_new(); - slapi_modify_internal_set_pb( - mod_pb, dn, mods, 0, 0, - memberof_get_plugin_id(), SLAPI_OP_FLAG_BYPASS_REFERRALS); - slapi_modify_internal_pb(mod_pb); - - slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc); - if (rc == LDAP_OBJECT_CLASS_VIOLATION) { - if (!add_oc || added_oc) { - /* - * We aren't auto adding an objectclass, or we already - * added the objectclass, and we are still failing. - */ + if (!dn || !mods) { + slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM, + "Invalid argument: %s%s is NULL\n", + !dn ? "dn " : "", + !mods ? "mods " : ""); + return LDAP_PARAM_ERROR; + } + + + mod_pb = slapi_pblock_new(); + /* Split multiple mods into individual mod operations */ + for (size_t i = 0; (mods != NULL) && (mods[i] != NULL); i++) { + single_mod[0] = mods[i]; + single_mod[1] = NULL; + + while (1) { + slapi_pblock_init(mod_pb); + + /* Internal mod with error overrides for DEL/ADD */ + rc = slapi_single_modify_internal_override(mod_pb, slapi_sdn_new_normdn_byref(dn), single_mod, + memberof_get_plugin_id(), SLAPI_OP_FLAG_BYPASS_REFERRALS); + if (rc == LDAP_OBJECT_CLASS_VIOLATION) { + if (!add_oc || added_oc) { + /* + * We aren't auto adding an objectclass, or we already + * added the objectclass, and we are still failing. + */ + break; + } + rc = memberof_add_objectclass(add_oc, dn); + slapi_log_err(SLAPI_LOG_WARNING, MEMBEROF_PLUGIN_SUBSYSTEM, + "Entry %s - schema violation caught - repair operation %s\n", + dn ? dn : "unknown", + rc ? "failed" : "succeeded"); + if (rc) { + /* Failed to add objectclass */ + rc = LDAP_OBJECT_CLASS_VIOLATION; + break; + } + added_oc = 1; + } else if (rc) { + /* Some other fatal error */ + slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, + "memberof_add_memberof_attr - Internal modify failed. rc=%d\n", rc); break; - } - rc = memberof_add_objectclass(add_oc, dn); - slapi_log_err(SLAPI_LOG_WARNING, MEMBEROF_PLUGIN_SUBSYSTEM, - "Entry %s - schema violation caught - repair operation %s\n", - dn ? dn : "unknown", - rc ? "failed" : "succeeded"); - if (rc) { - /* Failed to add objectclass */ - rc = LDAP_OBJECT_CLASS_VIOLATION; + } else { + /* success */ break; } - added_oc = 1; - slapi_pblock_destroy(mod_pb); - } else if (rc) { - /* Some other fatal error */ - slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, - "memberof_add_memberof_attr - Internal modify failed. rc=%d\n", rc); - break; - } else { - /* success */ - break; } } slapi_pblock_destroy(mod_pb); diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c index e7d95bfd9..5d7f9e5dd 100644 --- a/ldap/servers/plugins/referint/referint.c +++ b/ldap/servers/plugins/referint/referint.c @@ -712,19 +712,28 @@ static int _do_modify(Slapi_PBlock *mod_pb, Slapi_DN *entrySDN, LDAPMod **mods) { int rc = 0; + LDAPMod *mod[2]; - slapi_pblock_init(mod_pb); + /* Split multiple modifications into individual modify operations */ + for (size_t i = 0; (mods != NULL) && (mods[i] != NULL); i++) { + mod[0] = mods[i]; + mod[1] = NULL; - if (allow_repl) { - /* Must set as a replicated operation */ - slapi_modify_internal_set_pb_ext(mod_pb, entrySDN, mods, NULL, NULL, - referint_plugin_identity, OP_FLAG_REPLICATED); - } else { - slapi_modify_internal_set_pb_ext(mod_pb, entrySDN, mods, NULL, NULL, - referint_plugin_identity, 0); + slapi_pblock_init(mod_pb); + + /* Do a single mod with error overrides for DEL/ADD */ + if (allow_repl) { + rc = slapi_single_modify_internal_override(mod_pb, entrySDN, mod, + referint_plugin_identity, OP_FLAG_REPLICATED); + } else { + rc = slapi_single_modify_internal_override(mod_pb, entrySDN, mod, + referint_plugin_identity, 0); + } + + if (rc != LDAP_SUCCESS) { + return rc; + } } - slapi_modify_internal_pb(mod_pb); - slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc); return rc; } @@ -924,7 +933,6 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */ { Slapi_Mods *smods = NULL; char *newDN = NULL; - struct berval bv = {0}; char **dnParts = NULL; char *sval = NULL; char *newvalue = NULL; @@ -1027,30 +1035,21 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */ } /* else: normalize_rc < 0) Ignore the DN normalization error for now. */ - bv.bv_val = newDN; - bv.bv_len = strlen(newDN); p = PL_strstr(sval, slapi_sdn_get_ndn(origDN)); if (p == sval) { /* (case 1) */ slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval); - /* Add only if the attr value does not exist */ - if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) { - slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN); - } + slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN); } else if (p) { /* (case 2) */ slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval); *p = '\0'; newvalue = slapi_ch_smprintf("%s%s", sval, newDN); - /* Add only if the attr value does not exist */ - if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) { - slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue); - } + slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue); slapi_ch_free_string(&newvalue); } /* else: value does not include the modified DN. Ignore it. */ slapi_ch_free_string(&sval); - bv = (struct berval){0}; } rc = _do_modify(mod_pb, entrySDN, slapi_mods_get_ldapmods_byref(smods)); if (rc) { diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c index 56ddae6cc..36dd1fbfb 100644 --- a/ldap/servers/slapd/modify.c +++ b/ldap/servers/slapd/modify.c @@ -489,6 +489,57 @@ slapi_modify_internal_set_pb_ext(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod slapi_pblock_set(pb, SLAPI_PLUGIN_IDENTITY, plugin_identity); } +/* Performs a single LDAP modify operation with error overrides. + * + * If specific errors occur, such as attempting to add an existing attribute or + * delete a non-existent one, the function overrides the error and returns success: + * - LDAP_MOD_ADD -> LDAP_TYPE_OR_VALUE_EXISTS (ignored) + * - LDAP_MOD_DELETE -> LDAP_NO_SUCH_ATTRIBUTE (ignored) + * + * Any other errors encountered during the operation will be returned as-is. + */ +int +slapi_single_modify_internal_override(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod **mod, Slapi_ComponentId *plugin_id, int op_flags) +{ + int rc = 0; + int result = 0; + int result_reset = 0; + int mod_op = 0; + + if (!pb || !sdn || !mod || !mod[0]) { + slapi_log_err(SLAPI_LOG_ERR, "slapi_single_modify_internal_override", + "Invalid argument: %s%s%s%s is NULL\n", + !pb ? "pb " : "", + !sdn ? "sdn " : "", + !mod ? "mod " : "", + !mod[0] ? "mod[0] " : ""); + + return LDAP_PARAM_ERROR; + } + + slapi_modify_internal_set_pb_ext(pb, sdn, mod, NULL, NULL, plugin_id, op_flags); + slapi_modify_internal_pb(pb); + slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &result); + + if (result != LDAP_SUCCESS) { + mod_op = mod[0]->mod_op & LDAP_MOD_OP; + if ((mod_op == LDAP_MOD_ADD && result == LDAP_TYPE_OR_VALUE_EXISTS) || + (mod_op == LDAP_MOD_DELETE && result == LDAP_NO_SUCH_ATTRIBUTE)) { + slapi_log_err(SLAPI_LOG_PLUGIN, "slapi_single_modify_internal_override", + "Overriding return code - plugin:%s dn:%s mod_op:%d result:%d\n", + plugin_id ? plugin_id->sci_component_name : "unknown", + sdn ? sdn->udn : "unknown", mod_op, result); + + slapi_pblock_set(pb, SLAPI_PLUGIN_INTOP_RESULT, &result_reset); + rc = LDAP_SUCCESS; + } else { + rc = result; + } + } + + return rc; +} + /* Helper functions */ static int diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h index 62393133d..415c81f0c 100644 --- a/ldap/servers/slapd/slapi-plugin.h +++ b/ldap/servers/slapd/slapi-plugin.h @@ -5956,6 +5956,7 @@ void slapi_add_entry_internal_set_pb(Slapi_PBlock *pb, Slapi_Entry *e, LDAPContr int slapi_add_internal_set_pb(Slapi_PBlock *pb, const char *dn, LDAPMod **attrs, LDAPControl **controls, Slapi_ComponentId *plugin_identity, int operation_flags); void slapi_modify_internal_set_pb(Slapi_PBlock *pb, const char *dn, LDAPMod **mods, LDAPControl **controls, const char *uniqueid, Slapi_ComponentId *plugin_identity, int operation_flags); void slapi_modify_internal_set_pb_ext(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod **mods, LDAPControl **controls, const char *uniqueid, Slapi_ComponentId *plugin_identity, int operation_flags); +int slapi_single_modify_internal_override(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod **mod, Slapi_ComponentId *plugin_identity, int operation_flags); /** * Set \c Slapi_PBlock to perform modrdn/rename internally *
0
796dcfedee3d8e884dafb02ed0fce86cd95199b2
389ds/389-ds-base
Trac Ticket #46 - (additional) setup-ds-admin.pl does not like ipv6 only hostnames commit 850005499bd92c5b9b0027f944fcd33633c8db46 included a logic bug. The host name returned from getnameinfo needs to be checked with the original when the host name is not NULL.
commit 796dcfedee3d8e884dafb02ed0fce86cd95199b2 Author: Noriko Hosoi <[email protected]> Date: Thu Mar 29 15:38:16 2012 -0700 Trac Ticket #46 - (additional) setup-ds-admin.pl does not like ipv6 only hostnames commit 850005499bd92c5b9b0027f944fcd33633c8db46 included a logic bug. The host name returned from getnameinfo needs to be checked with the original when the host name is not NULL. diff --git a/ldap/admin/src/scripts/DSUtil.pm.in b/ldap/admin/src/scripts/DSUtil.pm.in index a3a8abfea..e84f482a5 100644 --- a/ldap/admin/src/scripts/DSUtil.pm.in +++ b/ldap/admin/src/scripts/DSUtil.pm.in @@ -269,7 +269,7 @@ sub checkHostname { debug(1, "ipaddr=", $ip, "\n"); # see if reverse resolution works my ($hn2, $service) = getnameinfo($saddr); - if (!$hn2) { + if ($hn2) { push @hostip, [$hn2, $ip]; if (lc($hn) eq lc($hn2)) { $found = 1;
0
d4a676cff41fb8840d1ae9ceb9e3d6c9faf6b389
389ds/389-ds-base
Issue 49232 - Truncate the message when buffer capacity is exceeded Bug Description: When the access log buffer capacity is exceeded we log an emergency error and the access log line is not logged at all. Fix Description: Log the error message to errors log and log the access log message but truncate its elements (for the search access log message). Or just log what is in the buffer in other cases. Add CI test to ds_logs test suite for the basic feature testing. https://pagure.io/389-ds-base/issue/49232 Reviewed by: mreynolds, tbordaz, firstyear (Thanks!!)
commit d4a676cff41fb8840d1ae9ceb9e3d6c9faf6b389 Author: Simon Pichugin <[email protected]> Date: Fri May 31 14:50:12 2019 +0200 Issue 49232 - Truncate the message when buffer capacity is exceeded Bug Description: When the access log buffer capacity is exceeded we log an emergency error and the access log line is not logged at all. Fix Description: Log the error message to errors log and log the access log message but truncate its elements (for the search access log message). Or just log what is in the buffer in other cases. Add CI test to ds_logs test suite for the basic feature testing. https://pagure.io/389-ds-base/issue/49232 Reviewed by: mreynolds, tbordaz, firstyear (Thanks!!) diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py index e471ee537..d2c2579ac 100644 --- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py +++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py @@ -6,16 +6,16 @@ # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # -from random import sample - +import os +import logging import pytest -from lib389.tasks import * -from lib389.utils import * -from lib389.plugins import * from lib389.topologies import topology_st +from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, AutoMembershipDefinitions from lib389.idm.user import UserAccounts from lib389.idm.group import Groups from lib389.idm.organizationalunit import OrganizationalUnits +from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL +import ldap pytestmark = pytest.mark.tier1 @@ -34,11 +34,11 @@ def add_users(topology_st, users_num): uid = 1000 + i users.create(properties={ 'uid': 'testuser%d' % uid, - 'cn' : 'testuser%d' % uid, - 'sn' : 'user', - 'uidNumber' : '%d' % uid, - 'gidNumber' : '%d' % uid, - 'homeDirectory' : '/home/testuser%d' % uid + 'cn': 'testuser%d' % uid, + 'sn': 'user', + 'uidNumber': '%d' % uid, + 'gidNumber': '%d' % uid, + 'homeDirectory': '/home/testuser%d' % uid }) @@ -80,7 +80,7 @@ def add_group_and_perform_user_operations(topology_st): assert test_user.dn in group.list_members() log.info('Renaming user') - test_user.rename('uid=new_test_user_777', newsuperior=SUFFIX) + test_user.rename('uid=new_test_user_777', newsuperior=DEFAULT_SUFFIX) log.info('Delete the user') delete_obj(test_user) @@ -110,28 +110,36 @@ def enable_plugins(topology_st): topo.restart() [email protected](scope="module") -def add_user_log_level_260(topology_st, enable_plugins): - log.info('Configure access log level to 260 (4 + 256)') - access_log_level = '260' - topology_st.standalone.config.set(LOG_ACCESS_LEVEL, access_log_level) +def add_user_log_level(topology_st, loglevel, request): + topo = topology_st.standalone + default_log_level = topo.config.get_attr_val_utf8(LOG_ACCESS_LEVEL) + log.info(f'Configure access log level to {loglevel}') + topo.config.set(LOG_ACCESS_LEVEL, str(loglevel)) add_group_and_perform_user_operations(topology_st) + def fin(): + topo.config.set(LOG_ACCESS_LEVEL, default_log_level) + log.info('Delete the previous access logs for the next test') + topo.deleteAccessLogs() + request.addfinalizer(fin) [email protected](scope="module") -def add_user_log_level_516(topology_st, enable_plugins): - log.info('Configure access log level to 516 (4 + 512)') - access_log_level = '516' - topology_st.standalone.config.set(LOG_ACCESS_LEVEL, access_log_level) - add_group_and_perform_user_operations(topology_st) [email protected](scope="function") +def add_user_log_level_260(topology_st, enable_plugins, request): + access_log_level = 4 + 256 + add_user_log_level(topology_st, access_log_level, request) + + [email protected](scope="function") +def add_user_log_level_516(topology_st, enable_plugins, request): + access_log_level = 4 + 512 + add_user_log_level(topology_st, access_log_level, request) [email protected](scope="module") -def add_user_log_level_131076(topology_st, enable_plugins): - log.info('Configure access log level to 131076 (4 + 131072)') - access_log_level = '131076' - topology_st.standalone.config.set(LOG_ACCESS_LEVEL, access_log_level) - add_group_and_perform_user_operations(topology_st) + [email protected](scope="function") +def add_user_log_level_131076(topology_st, enable_plugins, request): + access_log_level = 4 + 131072 + add_user_log_level(topology_st, access_log_level, request) @pytest.mark.bz1273549 @@ -156,7 +164,7 @@ def test_check_default(topology_st): default = topology_st.standalone.config.get_attr_val_utf8(PLUGIN_TIMESTAMP) # Now check it should be ON by default - assert (default == "on") + assert default == "on" log.debug(default) @@ -283,6 +291,7 @@ def test_internal_log_server_level_0(topology_st): """ topo = topology_st.standalone + default_log_level = topo.config.get_attr_val_utf8(LOG_ACCESS_LEVEL) log.info('Delete the previous access logs') topo.deleteAccessLogs() @@ -308,6 +317,7 @@ def test_internal_log_server_level_0(topology_st): # conn=Internal(0) op=0 assert not topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') + topo.config.set(LOG_ACCESS_LEVEL, default_log_level) log.info('Delete the previous access logs for the next test') topo.deleteAccessLogs() @@ -333,6 +343,7 @@ def test_internal_log_server_level_4(topology_st): """ topo = topology_st.standalone + default_log_level = topo.config.get_attr_val_utf8(LOG_ACCESS_LEVEL) log.info('Delete the previous access logs for the next test') topo.deleteAccessLogs() @@ -358,6 +369,7 @@ def test_internal_log_server_level_4(topology_st): # conn=Internal(0) op=0 assert topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') + topo.config.set(LOG_ACCESS_LEVEL, default_log_level) log.info('Delete the previous access logs for the next test') topo.deleteAccessLogs() @@ -398,7 +410,7 @@ def test_internal_log_level_260(topology_st, add_user_log_level_260): # These comments contain lines we are trying to find without regex (the op numbers are just examples) log.info("Check the access logs for ADD operation of the user") - # op=10 ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com" + # op=10 ADD dn="uid=test_user_777,ou=topology_st, branch1,dc=example,dc=com" assert topo.ds_access_log.match(r'.*op=[0-9]+ ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com".*') # (Internal) op=10(1)(1) MOD dn="cn=group,ou=Groups,dc=example,dc=com" assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' @@ -441,9 +453,6 @@ def test_internal_log_level_260(topology_st, add_user_log_level_260): # conn=Internal(0) op=0 assert topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') - log.info('Delete the previous access logs for the next test') - topo.deleteAccessLogs() - @pytest.mark.bz1358706 @pytest.mark.ds49029 @@ -525,9 +534,6 @@ def test_internal_log_level_131076(topology_st, add_user_log_level_131076): # conn=Internal(0) op=0 assert topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') - log.info('Delete the previous access logs for the next test') - topo.deleteAccessLogs() - @pytest.mark.bz1358706 @pytest.mark.ds49029 @@ -618,6 +624,41 @@ def test_internal_log_level_516(topology_st, add_user_log_level_516): # conn=Internal(0) op=0 assert topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') + [email protected] [email protected] +def test_access_log_truncated_search_message(topology_st): + """Tests that the access log message is properly truncated when the message is too long + + :id: 0a9af37d-3311-4a2f-ac0a-9a1c631aaf27 + :setup: Standalone instance + :steps: + 1. Make a search with a 2048+ characters basedn, filter and attribute list + 2. Check the access log has the message and it's truncated + :expectedresults: + 1. Operation should be successful + 2. Access log should contain truncated basedn, filter and attribute list + """ + + topo = topology_st.standalone + + large_str_base = "".join("cn=test," for _ in range(512)) + large_str_filter = "".join("(cn=test)" for _ in range(512)) + users = UserAccounts(topo, f'{large_str_base}dc=ending') + users._list_attrlist = [f'cn{i}' for i in range(512)] + log.info("Make a search") + users.filter(f'(|(objectclass=tester){large_str_filter}(cn=ending))') + + log.info('Restart the server to flush the logs') + topo.restart() + + assert topo.ds_access_log.match(r'.*cn=test,cn=test,.*') + assert topo.ds_access_log.match(r'.*objectClass=tester.*') + assert topo.ds_access_log.match(r'.*cn10.*') + assert not topo.ds_access_log.match(r'.*dc=ending.*') + assert not topo.ds_access_log.match(r'.*cn=ending.*') + assert not topo.ds_access_log.match(r'.*cn500.*') + log.info('Delete the previous access logs for the next test') topo.deleteAccessLogs() diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c index 2456abf1e..a8204fed4 100644 --- a/ldap/servers/slapd/log.c +++ b/ldap/servers/slapd/log.c @@ -81,8 +81,6 @@ static int slapi_log_map[] = { #define SLAPI_LOG_MIN SLAPI_LOG_FATAL /* from slapi-plugin.h */ #define SLAPI_LOG_MAX SLAPI_LOG_DEBUG /* from slapi-plugin.h */ -#define TBUFSIZE 50 /* size for time buffers */ -#define SLAPI_LOG_BUFSIZ 2048 /* size for data buffers */ /************************************************************************** * PROTOTYPES @@ -2553,8 +2551,9 @@ vslapd_log_access(char *fmt, va_list ap) { char buffer[SLAPI_LOG_BUFSIZ]; char vbuf[SLAPI_LOG_BUFSIZ]; - int blen = TBUFSIZE; - int vlen; + int32_t blen = TBUFSIZE; + int32_t vlen; + int32_t rc = LDAP_SUCCESS; time_t tnl; /* We do this sooner, because that we we can use the message in other calls */ @@ -2594,14 +2593,18 @@ vslapd_log_access(char *fmt, va_list ap) if (SLAPI_LOG_BUFSIZ - blen < vlen) { /* We won't be able to fit the message in! Uh-oh! */ - /* Should we actually just do the snprintf, and warn that message was truncated? */ - log__error_emergency("Insufficent buffer capacity to fit timestamp and message!", 1, 0); - return -1; + /* If the issue is not resolved during the fmt string creation (see op_shared_search()), + * we truncate the line and still log the message allowing the admin to check if + * someone is trying to do something bad. */ + vlen = strlen(vbuf); /* Truncated length */ + memcpy(&vbuf[vlen-4], "...\n", 4); /* Replace last characters with three dots and a new line character */ + slapi_log_err(SLAPI_LOG_ERR, "vslapd_log_access", "Insufficient buffer capacity to fit timestamp and message! The line in the access log was truncated\n"); + rc = -1; } log_append_buffer2(tnl, loginfo.log_access_buffer, buffer, blen, vbuf, vlen); - return (LDAP_SUCCESS); + return (rc); } int diff --git a/ldap/servers/slapd/log.h b/ldap/servers/slapd/log.h index a283b33b0..9fb4e7425 100644 --- a/ldap/servers/slapd/log.h +++ b/ldap/servers/slapd/log.h @@ -233,3 +233,8 @@ struct logging_opts #define LOG_AUDITFAIL_UNLOCK_READ() slapi_rwlock_unlock(loginfo.log_auditfail_rwlock) #define LOG_AUDITFAIL_LOCK_WRITE() slapi_rwlock_wrlock(loginfo.log_auditfail_rwlock) #define LOG_AUDITFAIL_UNLOCK_WRITE() slapi_rwlock_unlock(loginfo.log_auditfail_rwlock) + +/* For using with slapi_log_access */ +#define TBUFSIZE 50 /* size for time buffers */ +#define SLAPI_LOG_BUFSIZ 2048 /* size for data buffers */ +#define SLAPI_ACCESS_LOG_FMTBUF 128 /* size for access log formating line buffer */ diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c index dd6917363..dbc069935 100644 --- a/ldap/servers/slapd/opshared.c +++ b/ldap/servers/slapd/opshared.c @@ -13,6 +13,7 @@ /* opshared.c - functions shared between regular and internal operations */ +#include "log.h" #include "slap.h" #define PAGEDRESULTS_PAGE_END 1 @@ -291,47 +292,16 @@ op_shared_search(Slapi_PBlock *pb, int send_result) proxy_err = proxyauth_get_dn(pb, &proxydn, &errtext); if (operation_is_flag_set(operation, OP_FLAG_ACTION_LOG_ACCESS)) { - char *fmtstr; + char fmtstr[SLAPI_ACCESS_LOG_FMTBUF]; uint64_t connid; int32_t op_id; int32_t op_internal_id; int32_t op_nested_count; -#define SLAPD_SEARCH_FMTSTR_BASE "conn=%" PRIu64 " op=%d SRCH base=\"%s\" scope=%d " -#define SLAPD_SEARCH_FMTSTR_BASE_INT_INT "conn=Internal(%" PRIu64 ") op=%d(%d)(%d) SRCH base=\"%s\" scope=%d " -#define SLAPD_SEARCH_FMTSTR_BASE_EXT_INT "conn=%" PRIu64 " (Internal) op=%d(%d)(%d) SRCH base=\"%s\" scope=%d " -#define SLAPD_SEARCH_FMTSTR_REMAINDER " attrs=%s%s%s\n" - PR_ASSERT(fstr); if (internal_op) { get_internal_conn_op(&connid, &op_id, &op_internal_id, &op_nested_count); } - if (strlen(fstr) > 1024) { - /* - * slapi_log_access() throws away log lines that are longer than - * 2048 characters, so we limit the filter string to 1024 (better - * to log something rather than nothing) - */ - if (!internal_op) { - fmtstr = SLAPD_SEARCH_FMTSTR_BASE "filter=\"%.1024s...\"" SLAPD_SEARCH_FMTSTR_REMAINDER; - } else { - if (connid == 0) { - fmtstr = SLAPD_SEARCH_FMTSTR_BASE_INT_INT "filter=\"%.1024s...\"" SLAPD_SEARCH_FMTSTR_REMAINDER; - } else { - fmtstr = SLAPD_SEARCH_FMTSTR_BASE_EXT_INT "filter=\"%.1024s...\"" SLAPD_SEARCH_FMTSTR_REMAINDER; - } - } - } else { - if (!internal_op) { - fmtstr = SLAPD_SEARCH_FMTSTR_BASE "filter=\"%s\"" SLAPD_SEARCH_FMTSTR_REMAINDER; - } else { - if (connid == 0) { - fmtstr = SLAPD_SEARCH_FMTSTR_BASE_INT_INT "filter=\"%s\"" SLAPD_SEARCH_FMTSTR_REMAINDER; - } else { - fmtstr = SLAPD_SEARCH_FMTSTR_BASE_EXT_INT "filter=\"%s\"" SLAPD_SEARCH_FMTSTR_REMAINDER; - } - } - } if (NULL == attrs) { attrliststr = "ALL"; @@ -345,6 +315,37 @@ op_shared_search(Slapi_PBlock *pb, int send_result) proxystr = slapi_ch_smprintf(" authzid=\"%s\"", proxydn); } +#define SLAPD_SEARCH_FMTSTR_CONN_OP "conn=%" PRIu64 " op=%d" +#define SLAPD_SEARCH_FMTSTR_CONN_OP_INT_INT "conn=Internal(%" PRIu64 ") op=%d(%d)(%d)" +#define SLAPD_SEARCH_FMTSTR_CONN_OP_EXT_INT "conn=%" PRIu64 " (Internal) op=%d(%d)(%d)" +#define SLAPD_SEARCH_FMTSTR_REMAINDER "%s%s\n" + +#define SLAPD_SEARCH_BUFPART 512 +#define LOG_ACCESS_FORMAT_BUFSIZ(arg, logstr, bufsiz) ((strlen(arg)) < (bufsiz) ? (logstr "%s") : \ + (logstr "%." STRINGIFYDEFINE(bufsiz) "s...")) +/* Define a separate macro for attributes because when we strip it we should take care of the quotes */ +#define LOG_ACCESS_FORMAT_ATTR_BUFSIZ(arg, logstr, bufsiz) ((strlen(arg)) < (bufsiz) ? (logstr "%s") : \ + (logstr "%." STRINGIFYDEFINE(bufsiz) "s...\"")) + + /* + * slapi_log_access() throws away log lines that are longer than + * 2048 characters, so we limit the filter, base and attrs strings to 512 + * (better to log something rather than nothing) + */ + if (!internal_op) { + strcpy(fmtstr, SLAPD_SEARCH_FMTSTR_CONN_OP); + } else { + if (connid == 0) { + strcpy(fmtstr, SLAPD_SEARCH_FMTSTR_CONN_OP_INT_INT); + } else { + strcpy(fmtstr, SLAPD_SEARCH_FMTSTR_CONN_OP_EXT_INT); + } + } + strcat(fmtstr, LOG_ACCESS_FORMAT_BUFSIZ(normbase, " SRCH base=\"", SLAPD_SEARCH_BUFPART)); + strcat(fmtstr, LOG_ACCESS_FORMAT_BUFSIZ(fstr, "\" scope=%d filter=\"", SLAPD_SEARCH_BUFPART)); + strcat(fmtstr, LOG_ACCESS_FORMAT_ATTR_BUFSIZ(attrliststr, "\" attrs=", SLAPD_SEARCH_BUFPART)); + strcat(fmtstr, SLAPD_SEARCH_FMTSTR_REMAINDER); + if (!internal_op) { slapi_log_access(LDAP_DEBUG_STATS, fmtstr, pb_conn->c_connid,
0
9a9b03fcc2c1afcc270aeb450346533dbd6c594a
389ds/389-ds-base
Resolves: 237356 Summary: Move DS Admin Code into Admin Server (Comment #62) Description: providing slapd.inf having the DS static info for the setup/config
commit 9a9b03fcc2c1afcc270aeb450346533dbd6c594a Author: Noriko Hosoi <[email protected]> Date: Fri Jun 15 17:02:29 2007 +0000 Resolves: 237356 Summary: Move DS Admin Code into Admin Server (Comment #62) Description: providing slapd.inf having the DS static info for the setup/config diff --git a/Makefile.am b/Makefile.am index 3f0baf498..0f4027cd4 100644 --- a/Makefile.am +++ b/Makefile.am @@ -75,6 +75,7 @@ taskdir = $(datadir)@scripttemplatedir@ initdir = $(sysconfdir)@initdir@ instconfigdir = @instconfigdir@ perldir = $(libdir)@perldir@ +infdir = $(datadir)@infdir@ shared_lib_suffix = @shared_lib_suffix@ @@ -106,7 +107,7 @@ serverplugin_LTLIBRARIES = libacl-plugin.la libattr-unique-plugin.la \ libpassthru-plugin.la libpresence-plugin.la \ libpwdstorage-plugin.la libreferint-plugin.la libreplication-plugin.la \ libretrocl-plugin.la libroles-plugin.la libstatechange-plugin.la libsyntax-plugin.la \ - libviews-plugin.la $(LIBPAM_PASSTHRU_PLUGIN) $(LIBDNA_PLUGIN) $(LIBBITWISE_PLUGIN) + libviews-plugin.la $(LIBPAM_PASSTHRU_PLUGIN) $(LIBDNA_PLUGIN) $(LIBBITWISE_PLUGIN) nodist_property_DATA = ns-slapd.properties @@ -216,6 +217,8 @@ task_SCRIPTS = ldap/admin/src/scripts/template-bak2db \ init_SCRIPTS = wrappers/$(PACKAGE_NAME) +inf_DATA = ldap/cm/newinst/slapd.inf + #//////////////////////////////////////////////////////////////// # # Server Strings @@ -1018,7 +1021,9 @@ fixupcmd = sed \ -e 's,@ECHO_C\@,$(ECHO_C),g' \ -e 's,@brand\@,$(brand),g' \ -e 's,@capbrand\@,$(capbrand),g' \ + -e 's,@vendor\@,$(vendor),g' \ -e 's,@PACKAGE_VERSION\@,$(PACKAGE_VERSION),g' \ + -e 's,@BUILDNUM\@,$(BUILDNUM),g' \ -e 's,@perldir\@,$(perldir),g' \ -e 's,@shared_lib_suffix\@,$(shared_lib_suffix),g' else @@ -1049,7 +1054,9 @@ fixupcmd = sed \ -e 's,@ECHO_C\@,$(ECHO_C),g' \ -e 's,@brand\@,$(brand),g' \ -e 's,@capbrand\@,$(capbrand),g' \ + -e 's,@vendor\@,$(vendor),g' \ -e 's,@PACKAGE_VERSION\@,$(PACKAGE_VERSION),g' \ + -e 's,@BUILDNUM\@,$(BUILDNUM),g' \ -e 's,@perldir\@,$(perldir),g' \ -e 's,@shared_lib_suffix\@,$(shared_lib_suffix),g' endif diff --git a/Makefile.in b/Makefile.in index 37d831c74..b93f8823d 100644 --- a/Makefile.in +++ b/Makefile.in @@ -96,8 +96,9 @@ am__installdirs = "$(DESTDIR)$(serverdir)" \ "$(DESTDIR)$(sbindir)" "$(DESTDIR)$(bindir)" \ "$(DESTDIR)$(initdir)" "$(DESTDIR)$(perldir)" \ "$(DESTDIR)$(taskdir)" "$(DESTDIR)$(configdir)" \ - "$(DESTDIR)$(propertydir)" "$(DESTDIR)$(propertydir)" \ - "$(DESTDIR)$(sampledatadir)" "$(DESTDIR)$(schemadir)" + "$(DESTDIR)$(infdir)" "$(DESTDIR)$(propertydir)" \ + "$(DESTDIR)$(propertydir)" "$(DESTDIR)$(sampledatadir)" \ + "$(DESTDIR)$(schemadir)" serverLTLIBRARIES_INSTALL = $(INSTALL) serverpluginLTLIBRARIES_INSTALL = $(INSTALL) LTLIBRARIES = $(server_LTLIBRARIES) $(serverplugin_LTLIBRARIES) @@ -333,6 +334,10 @@ am_libns_dshttpd_la_OBJECTS = \ lib/libsi18n/libns_dshttpd_la-reshash.lo \ lib/libsi18n/libns_dshttpd_la-txtfile.lo $(am__objects_1) libns_dshttpd_la_OBJECTS = $(am_libns_dshttpd_la_OBJECTS) +libothercrypto_plugin_la_LIBADD = +am_libothercrypto_plugin_la_OBJECTS = ldap/servers/plugins/othercrypto/libothercrypto_plugin_la-othercrypto.lo +libothercrypto_plugin_la_OBJECTS = \ + $(am_libothercrypto_plugin_la_OBJECTS) libpam_passthru_plugin_la_DEPENDENCIES = $(am__DEPENDENCIES_1) am_libpam_passthru_plugin_la_OBJECTS = ldap/servers/plugins/pam_passthru/libpam_passthru_plugin_la-pam_ptconfig.lo \ ldap/servers/plugins/pam_passthru/libpam_passthru_plugin_la-pam_ptdebug.lo \ @@ -354,6 +359,9 @@ libpassthru_plugin_la_OBJECTS = $(am_libpassthru_plugin_la_OBJECTS) libpresence_plugin_la_LIBADD = am_libpresence_plugin_la_OBJECTS = ldap/servers/plugins/presence/libpresence_plugin_la-presence.lo libpresence_plugin_la_OBJECTS = $(am_libpresence_plugin_la_OBJECTS) +libpwderror_plugin_la_LIBADD = +am_libpwderror_plugin_la_OBJECTS = ldap/servers/plugins/pwderror/libpwderror_plugin_la-pwderror-plugin.lo +libpwderror_plugin_la_OBJECTS = $(am_libpwderror_plugin_la_OBJECTS) libpwdstorage_plugin_la_LIBADD = am_libpwdstorage_plugin_la_OBJECTS = ldap/servers/plugins/pwdstorage/libpwdstorage_plugin_la-clear_pwd.lo \ ldap/servers/plugins/pwdstorage/libpwdstorage_plugin_la-crypt_pwd.lo \ @@ -554,6 +562,10 @@ libviews_plugin_la_LIBADD = am_libviews_plugin_la_OBJECTS = \ ldap/servers/plugins/views/libviews_plugin_la-views.lo libviews_plugin_la_OBJECTS = $(am_libviews_plugin_la_OBJECTS) +libxor_plugin_la_LIBADD = +am_libxor_plugin_la_OBJECTS = \ + ldap/servers/plugins/xor/libxor_plugin_la-xorplugin.lo +libxor_plugin_la_OBJECTS = $(am_libxor_plugin_la_OBJECTS) binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) sbinPROGRAMS_INSTALL = $(INSTALL_PROGRAM) PROGRAMS = $(bin_PROGRAMS) $(noinst_PROGRAMS) $(sbin_PROGRAMS) @@ -748,19 +760,21 @@ SOURCES = $(libavl_a_SOURCES) $(libldaputil_a_SOURCES) \ $(libdna_plugin_la_SOURCES) $(libds_admin_la_SOURCES) \ $(libhttp_client_plugin_la_SOURCES) \ $(libns_dshttpd_la_SOURCES) \ + $(libothercrypto_plugin_la_SOURCES) \ $(libpam_passthru_plugin_la_SOURCES) \ $(libpassthru_plugin_la_SOURCES) \ $(libpresence_plugin_la_SOURCES) \ + $(libpwderror_plugin_la_SOURCES) \ $(libpwdstorage_plugin_la_SOURCES) \ $(libreferint_plugin_la_SOURCES) \ $(libreplication_plugin_la_SOURCES) \ $(libretrocl_plugin_la_SOURCES) $(libroles_plugin_la_SOURCES) \ $(libslapd_la_SOURCES) $(libstatechange_plugin_la_SOURCES) \ $(libsyntax_plugin_la_SOURCES) $(libviews_plugin_la_SOURCES) \ - $(dbscan_bin_SOURCES) $(ds_newinst_bin_SOURCES) \ - $(dsktune_bin_SOURCES) $(infadd_bin_SOURCES) \ - $(ldap_agent_bin_SOURCES) $(ldclt_bin_SOURCES) \ - $(ldif_bin_SOURCES) $(makstrdb_SOURCES) \ + $(libxor_plugin_la_SOURCES) $(dbscan_bin_SOURCES) \ + $(ds_newinst_bin_SOURCES) $(dsktune_bin_SOURCES) \ + $(infadd_bin_SOURCES) $(ldap_agent_bin_SOURCES) \ + $(ldclt_bin_SOURCES) $(ldif_bin_SOURCES) $(makstrdb_SOURCES) \ $(migratecred_bin_SOURCES) $(mmldif_bin_SOURCES) \ $(ns_slapd_SOURCES) $(pwdhash_bin_SOURCES) \ $(rsearch_bin_SOURCES) @@ -774,29 +788,32 @@ DIST_SOURCES = $(libavl_a_SOURCES) $(libldaputil_a_SOURCES) \ $(libdna_plugin_la_SOURCES) $(libds_admin_la_SOURCES) \ $(libhttp_client_plugin_la_SOURCES) \ $(libns_dshttpd_la_SOURCES) \ + $(libothercrypto_plugin_la_SOURCES) \ $(libpam_passthru_plugin_la_SOURCES) \ $(libpassthru_plugin_la_SOURCES) \ $(libpresence_plugin_la_SOURCES) \ + $(libpwderror_plugin_la_SOURCES) \ $(libpwdstorage_plugin_la_SOURCES) \ $(libreferint_plugin_la_SOURCES) \ $(libreplication_plugin_la_SOURCES) \ $(libretrocl_plugin_la_SOURCES) $(libroles_plugin_la_SOURCES) \ $(libslapd_la_SOURCES) $(libstatechange_plugin_la_SOURCES) \ $(libsyntax_plugin_la_SOURCES) $(libviews_plugin_la_SOURCES) \ - $(dbscan_bin_SOURCES) $(ds_newinst_bin_SOURCES) \ - $(dsktune_bin_SOURCES) $(infadd_bin_SOURCES) \ - $(ldap_agent_bin_SOURCES) $(am__ldclt_bin_SOURCES_DIST) \ - $(ldif_bin_SOURCES) $(makstrdb_SOURCES) \ - $(migratecred_bin_SOURCES) $(mmldif_bin_SOURCES) \ - $(am__ns_slapd_SOURCES_DIST) $(pwdhash_bin_SOURCES) \ - $(rsearch_bin_SOURCES) + $(libxor_plugin_la_SOURCES) $(dbscan_bin_SOURCES) \ + $(ds_newinst_bin_SOURCES) $(dsktune_bin_SOURCES) \ + $(infadd_bin_SOURCES) $(ldap_agent_bin_SOURCES) \ + $(am__ldclt_bin_SOURCES_DIST) $(ldif_bin_SOURCES) \ + $(makstrdb_SOURCES) $(migratecred_bin_SOURCES) \ + $(mmldif_bin_SOURCES) $(am__ns_slapd_SOURCES_DIST) \ + $(pwdhash_bin_SOURCES) $(rsearch_bin_SOURCES) configDATA_INSTALL = $(INSTALL_DATA) +infDATA_INSTALL = $(INSTALL_DATA) nodist_propertyDATA_INSTALL = $(INSTALL_DATA) propertyDATA_INSTALL = $(INSTALL_DATA) sampledataDATA_INSTALL = $(INSTALL_DATA) schemaDATA_INSTALL = $(INSTALL_DATA) -DATA = $(config_DATA) $(nodist_property_DATA) $(property_DATA) \ - $(sampledata_DATA) $(schema_DATA) +DATA = $(config_DATA) $(inf_DATA) $(nodist_property_DATA) \ + $(property_DATA) $(sampledata_DATA) $(schema_DATA) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) @@ -874,7 +891,6 @@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PKG_CONFIG = @PKG_CONFIG@ RANLIB = @RANLIB@ -SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOLARIS_FALSE = @SOLARIS_FALSE@ @@ -937,6 +953,7 @@ icu_bin = @icu_bin@ icu_inc = @icu_inc@ icu_lib = @icu_lib@ includedir = @includedir@ +infdir = $(datadir)@infdir@ infodir = @infodir@ initdir = $(sysconfdir)@initdir@ install_sh = @install_sh@ @@ -1037,13 +1054,17 @@ server_LTLIBRARIES = libslapd.la libds_admin.la libns-dshttpd.la @enable_pam_passthru_TRUE@LIBPAM_PASSTHRU_PLUGIN = libpam-passthru-plugin.la @enable_dna_TRUE@LIBDNA_PLUGIN = libdna-plugin.la @enable_bitwise_TRUE@LIBBITWISE_PLUGIN = libbitwise-plugin.la +LIBXOR_PLUGIN = libxor-plugin.la +LIBPWDERROR_PLUGIN = libpwderror-plugin.la +LIBOTHERCRYPTO_PLUGIN = libothercrypto-plugin.la serverplugin_LTLIBRARIES = libacl-plugin.la libattr-unique-plugin.la \ libback-ldbm.la libchainingdb-plugin.la libcos-plugin.la libdes-plugin.la \ libdistrib-plugin.la libhttp-client-plugin.la libcollation-plugin.la \ libpassthru-plugin.la libpresence-plugin.la \ libpwdstorage-plugin.la libreferint-plugin.la libreplication-plugin.la \ libretrocl-plugin.la libroles-plugin.la libstatechange-plugin.la libsyntax-plugin.la \ - libviews-plugin.la $(LIBPAM_PASSTHRU_PLUGIN) $(LIBDNA_PLUGIN) $(LIBBITWISE_PLUGIN) + libviews-plugin.la $(LIBPAM_PASSTHRU_PLUGIN) $(LIBDNA_PLUGIN) $(LIBBITWISE_PLUGIN) \ + $(LIBXOR_PLUGIN) $(LIBPWDERROR_PLUGIN) $(LIBOTHERCRYPTO_PLUGIN) nodist_property_DATA = ns-slapd.properties noinst_LIBRARIES = libavl.a libldaputil.a @@ -1148,6 +1169,7 @@ task_SCRIPTS = ldap/admin/src/scripts/template-bak2db \ $(srcdir)/ldap/admin/src/scripts/template-migrateTo7 init_SCRIPTS = wrappers/$(PACKAGE_NAME) +inf_DATA = ldap/cm/newinst/slapd.inf #//////////////////////////////////////////////////////////////// # @@ -1729,6 +1751,27 @@ libbitwise_plugin_la_SOURCES = ldap/servers/plugins/bitwise/bitwise.c libbitwise_plugin_la_CPPFLAGS = $(PLUGIN_CPPFLAGS) libbitwise_plugin_la_LDFLAGS = -avoid-version +#------------------------ +# libxor-plugin +#------------------------ +libxor_plugin_la_SOURCES = ldap/servers/plugins/xor/xorplugin.c +libxor_plugin_la_CPPFLAGS = $(PLUGIN_CPPFLAGS) +libxor_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libpwderror-plugin +#------------------------ +libpwderror_plugin_la_SOURCES = ldap/servers/plugins/pwderror/pwderror-plugin.c +libpwderror_plugin_la_CPPFLAGS = $(PLUGIN_CPPFLAGS) +libpwderror_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libothercrypto-plugin +#------------------------ +libothercrypto_plugin_la_SOURCES = ldap/servers/plugins/othercrypto/othercrypto.c +libothercrypto_plugin_la_CPPFLAGS = $(PLUGIN_CPPFLAGS) +libothercrypto_plugin_la_LDFLAGS = -avoid-version + #//////////////////////////////////////////////////////////////// # # Programs @@ -1912,7 +1955,9 @@ rsearch_bin_LDADD = $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK) $(SASL_LINK) $(LIBS @BUNDLE_FALSE@ -e 's,@ECHO_C\@,$(ECHO_C),g' \ @BUNDLE_FALSE@ -e 's,@brand\@,$(brand),g' \ @BUNDLE_FALSE@ -e 's,@capbrand\@,$(capbrand),g' \ +@BUNDLE_FALSE@ -e 's,@vendor\@,$(vendor),g' \ @BUNDLE_FALSE@ -e 's,@PACKAGE_VERSION\@,$(PACKAGE_VERSION),g' \ +@BUNDLE_FALSE@ -e 's,@BUILDNUM\@,$(BUILDNUM),g' \ @BUNDLE_FALSE@ -e 's,@perldir\@,$(perldir),g' \ @BUNDLE_FALSE@ -e 's,@shared_lib_suffix\@,$(shared_lib_suffix),g' @@ -1950,7 +1995,9 @@ rsearch_bin_LDADD = $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK) $(SASL_LINK) $(LIBS @BUNDLE_TRUE@ -e 's,@ECHO_C\@,$(ECHO_C),g' \ @BUNDLE_TRUE@ -e 's,@brand\@,$(brand),g' \ @BUNDLE_TRUE@ -e 's,@capbrand\@,$(capbrand),g' \ +@BUNDLE_TRUE@ -e 's,@vendor\@,$(vendor),g' \ @BUNDLE_TRUE@ -e 's,@PACKAGE_VERSION\@,$(PACKAGE_VERSION),g' \ +@BUNDLE_TRUE@ -e 's,@BUILDNUM\@,$(BUILDNUM),g' \ @BUNDLE_TRUE@ -e 's,@perldir\@,$(perldir),g' \ @BUNDLE_TRUE@ -e 's,@shared_lib_suffix\@,$(shared_lib_suffix),g' @@ -2766,6 +2813,17 @@ lib/ldaputil/libns_dshttpd_la-vtable.lo: lib/ldaputil/$(am__dirstamp) \ lib/ldaputil/$(DEPDIR)/$(am__dirstamp) libns-dshttpd.la: $(libns_dshttpd_la_OBJECTS) $(libns_dshttpd_la_DEPENDENCIES) $(CXXLINK) -rpath $(serverdir) $(libns_dshttpd_la_LDFLAGS) $(libns_dshttpd_la_OBJECTS) $(libns_dshttpd_la_LIBADD) $(LIBS) +ldap/servers/plugins/othercrypto/$(am__dirstamp): + @$(mkdir_p) ldap/servers/plugins/othercrypto + @: > ldap/servers/plugins/othercrypto/$(am__dirstamp) +ldap/servers/plugins/othercrypto/$(DEPDIR)/$(am__dirstamp): + @$(mkdir_p) ldap/servers/plugins/othercrypto/$(DEPDIR) + @: > ldap/servers/plugins/othercrypto/$(DEPDIR)/$(am__dirstamp) +ldap/servers/plugins/othercrypto/libothercrypto_plugin_la-othercrypto.lo: \ + ldap/servers/plugins/othercrypto/$(am__dirstamp) \ + ldap/servers/plugins/othercrypto/$(DEPDIR)/$(am__dirstamp) +libothercrypto-plugin.la: $(libothercrypto_plugin_la_OBJECTS) $(libothercrypto_plugin_la_DEPENDENCIES) + $(LINK) -rpath $(serverplugindir) $(libothercrypto_plugin_la_LDFLAGS) $(libothercrypto_plugin_la_OBJECTS) $(libothercrypto_plugin_la_LIBADD) $(LIBS) ldap/servers/plugins/pam_passthru/$(am__dirstamp): @$(mkdir_p) ldap/servers/plugins/pam_passthru @: > ldap/servers/plugins/pam_passthru/$(am__dirstamp) @@ -2823,6 +2881,17 @@ ldap/servers/plugins/presence/libpresence_plugin_la-presence.lo: \ ldap/servers/plugins/presence/$(DEPDIR)/$(am__dirstamp) libpresence-plugin.la: $(libpresence_plugin_la_OBJECTS) $(libpresence_plugin_la_DEPENDENCIES) $(LINK) -rpath $(serverplugindir) $(libpresence_plugin_la_LDFLAGS) $(libpresence_plugin_la_OBJECTS) $(libpresence_plugin_la_LIBADD) $(LIBS) +ldap/servers/plugins/pwderror/$(am__dirstamp): + @$(mkdir_p) ldap/servers/plugins/pwderror + @: > ldap/servers/plugins/pwderror/$(am__dirstamp) +ldap/servers/plugins/pwderror/$(DEPDIR)/$(am__dirstamp): + @$(mkdir_p) ldap/servers/plugins/pwderror/$(DEPDIR) + @: > ldap/servers/plugins/pwderror/$(DEPDIR)/$(am__dirstamp) +ldap/servers/plugins/pwderror/libpwderror_plugin_la-pwderror-plugin.lo: \ + ldap/servers/plugins/pwderror/$(am__dirstamp) \ + ldap/servers/plugins/pwderror/$(DEPDIR)/$(am__dirstamp) +libpwderror-plugin.la: $(libpwderror_plugin_la_OBJECTS) $(libpwderror_plugin_la_DEPENDENCIES) + $(LINK) -rpath $(serverplugindir) $(libpwderror_plugin_la_LDFLAGS) $(libpwderror_plugin_la_OBJECTS) $(libpwderror_plugin_la_LIBADD) $(LIBS) ldap/servers/plugins/pwdstorage/$(am__dirstamp): @$(mkdir_p) ldap/servers/plugins/pwdstorage @: > ldap/servers/plugins/pwdstorage/$(am__dirstamp) @@ -3411,6 +3480,17 @@ ldap/servers/plugins/views/libviews_plugin_la-views.lo: \ ldap/servers/plugins/views/$(DEPDIR)/$(am__dirstamp) libviews-plugin.la: $(libviews_plugin_la_OBJECTS) $(libviews_plugin_la_DEPENDENCIES) $(LINK) -rpath $(serverplugindir) $(libviews_plugin_la_LDFLAGS) $(libviews_plugin_la_OBJECTS) $(libviews_plugin_la_LIBADD) $(LIBS) +ldap/servers/plugins/xor/$(am__dirstamp): + @$(mkdir_p) ldap/servers/plugins/xor + @: > ldap/servers/plugins/xor/$(am__dirstamp) +ldap/servers/plugins/xor/$(DEPDIR)/$(am__dirstamp): + @$(mkdir_p) ldap/servers/plugins/xor/$(DEPDIR) + @: > ldap/servers/plugins/xor/$(DEPDIR)/$(am__dirstamp) +ldap/servers/plugins/xor/libxor_plugin_la-xorplugin.lo: \ + ldap/servers/plugins/xor/$(am__dirstamp) \ + ldap/servers/plugins/xor/$(DEPDIR)/$(am__dirstamp) +libxor-plugin.la: $(libxor_plugin_la_OBJECTS) $(libxor_plugin_la_DEPENDENCIES) + $(LINK) -rpath $(serverplugindir) $(libxor_plugin_la_LDFLAGS) $(libxor_plugin_la_OBJECTS) $(libxor_plugin_la_LIBADD) $(LIBS) install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(bindir)" || $(mkdir_p) "$(DESTDIR)$(bindir)" @@ -3960,6 +4040,8 @@ mostlyclean-compile: -rm -f ldap/servers/plugins/http/libhttp_client_plugin_la-http_client.lo -rm -f ldap/servers/plugins/http/libhttp_client_plugin_la-http_impl.$(OBJEXT) -rm -f ldap/servers/plugins/http/libhttp_client_plugin_la-http_impl.lo + -rm -f ldap/servers/plugins/othercrypto/libothercrypto_plugin_la-othercrypto.$(OBJEXT) + -rm -f ldap/servers/plugins/othercrypto/libothercrypto_plugin_la-othercrypto.lo -rm -f ldap/servers/plugins/pam_passthru/libpam_passthru_plugin_la-pam_ptconfig.$(OBJEXT) -rm -f ldap/servers/plugins/pam_passthru/libpam_passthru_plugin_la-pam_ptconfig.lo -rm -f ldap/servers/plugins/pam_passthru/libpam_passthru_plugin_la-pam_ptdebug.$(OBJEXT) @@ -3982,6 +4064,8 @@ mostlyclean-compile: -rm -f ldap/servers/plugins/passthru/libpassthru_plugin_la-ptutil.lo -rm -f ldap/servers/plugins/presence/libpresence_plugin_la-presence.$(OBJEXT) -rm -f ldap/servers/plugins/presence/libpresence_plugin_la-presence.lo + -rm -f ldap/servers/plugins/pwderror/libpwderror_plugin_la-pwderror-plugin.$(OBJEXT) + -rm -f ldap/servers/plugins/pwderror/libpwderror_plugin_la-pwderror-plugin.lo -rm -f ldap/servers/plugins/pwdstorage/libpwdstorage_plugin_la-clear_pwd.$(OBJEXT) -rm -f ldap/servers/plugins/pwdstorage/libpwdstorage_plugin_la-clear_pwd.lo -rm -f ldap/servers/plugins/pwdstorage/libpwdstorage_plugin_la-crypt_pwd.$(OBJEXT) @@ -4160,6 +4244,8 @@ mostlyclean-compile: -rm -f ldap/servers/plugins/uiduniq/libattr_unique_plugin_la-uid.lo -rm -f ldap/servers/plugins/views/libviews_plugin_la-views.$(OBJEXT) -rm -f ldap/servers/plugins/views/libviews_plugin_la-views.lo + -rm -f ldap/servers/plugins/xor/libxor_plugin_la-xorplugin.$(OBJEXT) + -rm -f ldap/servers/plugins/xor/libxor_plugin_la-xorplugin.lo -rm -f ldap/servers/slapd/back-ldbm/libback_ldbm_la-ancestorid.$(OBJEXT) -rm -f ldap/servers/slapd/back-ldbm/libback_ldbm_la-ancestorid.lo -rm -f ldap/servers/slapd/back-ldbm/libback_ldbm_la-archive.$(OBJEXT) @@ -4714,6 +4800,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/dna/$(DEPDIR)/libdna_plugin_la-dna.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/http/$(DEPDIR)/libhttp_client_plugin_la-http_client.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/http/$(DEPDIR)/libhttp_client_plugin_la-http_impl.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/othercrypto/$(DEPDIR)/libothercrypto_plugin_la-othercrypto.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/pam_passthru/$(DEPDIR)/libpam_passthru_plugin_la-pam_ptconfig.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/pam_passthru/$(DEPDIR)/libpam_passthru_plugin_la-pam_ptdebug.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/pam_passthru/$(DEPDIR)/libpam_passthru_plugin_la-pam_ptimpl.Plo@am__quote@ @@ -4725,6 +4812,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/passthru/$(DEPDIR)/libpassthru_plugin_la-ptpreop.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/passthru/$(DEPDIR)/libpassthru_plugin_la-ptutil.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/presence/$(DEPDIR)/libpresence_plugin_la-presence.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/pwderror/$(DEPDIR)/libpwderror_plugin_la-pwderror-plugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/pwdstorage/$(DEPDIR)/libpwdstorage_plugin_la-clear_pwd.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/pwdstorage/$(DEPDIR)/libpwdstorage_plugin_la-crypt_pwd.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/pwdstorage/$(DEPDIR)/libpwdstorage_plugin_la-md5_pwd.Plo@am__quote@ @@ -4814,6 +4902,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/uiduniq/$(DEPDIR)/libattr_unique_plugin_la-7bit.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/uiduniq/$(DEPDIR)/libattr_unique_plugin_la-uid.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/views/$(DEPDIR)/libviews_plugin_la-views.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/xor/$(DEPDIR)/libxor_plugin_la-xorplugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/ldap_agent_bin-agtmmap.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-add.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-agtmmap.Plo@am__quote@ @@ -6245,6 +6334,13 @@ lib/ldaputil/libns_dshttpd_la-vtable.lo: lib/ldaputil/vtable.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libns_dshttpd_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lib/ldaputil/libns_dshttpd_la-vtable.lo `test -f 'lib/ldaputil/vtable.c' || echo '$(srcdir)/'`lib/ldaputil/vtable.c +ldap/servers/plugins/othercrypto/libothercrypto_plugin_la-othercrypto.lo: ldap/servers/plugins/othercrypto/othercrypto.c +@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libothercrypto_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/plugins/othercrypto/libothercrypto_plugin_la-othercrypto.lo -MD -MP -MF "ldap/servers/plugins/othercrypto/$(DEPDIR)/libothercrypto_plugin_la-othercrypto.Tpo" -c -o ldap/servers/plugins/othercrypto/libothercrypto_plugin_la-othercrypto.lo `test -f 'ldap/servers/plugins/othercrypto/othercrypto.c' || echo '$(srcdir)/'`ldap/servers/plugins/othercrypto/othercrypto.c; \ +@am__fastdepCC_TRUE@ then mv -f "ldap/servers/plugins/othercrypto/$(DEPDIR)/libothercrypto_plugin_la-othercrypto.Tpo" "ldap/servers/plugins/othercrypto/$(DEPDIR)/libothercrypto_plugin_la-othercrypto.Plo"; else rm -f "ldap/servers/plugins/othercrypto/$(DEPDIR)/libothercrypto_plugin_la-othercrypto.Tpo"; exit 1; fi +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ldap/servers/plugins/othercrypto/othercrypto.c' object='ldap/servers/plugins/othercrypto/libothercrypto_plugin_la-othercrypto.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libothercrypto_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/plugins/othercrypto/libothercrypto_plugin_la-othercrypto.lo `test -f 'ldap/servers/plugins/othercrypto/othercrypto.c' || echo '$(srcdir)/'`ldap/servers/plugins/othercrypto/othercrypto.c + ldap/servers/plugins/pam_passthru/libpam_passthru_plugin_la-pam_ptconfig.lo: ldap/servers/plugins/pam_passthru/pam_ptconfig.c @am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libpam_passthru_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/plugins/pam_passthru/libpam_passthru_plugin_la-pam_ptconfig.lo -MD -MP -MF "ldap/servers/plugins/pam_passthru/$(DEPDIR)/libpam_passthru_plugin_la-pam_ptconfig.Tpo" -c -o ldap/servers/plugins/pam_passthru/libpam_passthru_plugin_la-pam_ptconfig.lo `test -f 'ldap/servers/plugins/pam_passthru/pam_ptconfig.c' || echo '$(srcdir)/'`ldap/servers/plugins/pam_passthru/pam_ptconfig.c; \ @am__fastdepCC_TRUE@ then mv -f "ldap/servers/plugins/pam_passthru/$(DEPDIR)/libpam_passthru_plugin_la-pam_ptconfig.Tpo" "ldap/servers/plugins/pam_passthru/$(DEPDIR)/libpam_passthru_plugin_la-pam_ptconfig.Plo"; else rm -f "ldap/servers/plugins/pam_passthru/$(DEPDIR)/libpam_passthru_plugin_la-pam_ptconfig.Tpo"; exit 1; fi @@ -6322,6 +6418,13 @@ ldap/servers/plugins/presence/libpresence_plugin_la-presence.lo: ldap/servers/pl @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libpresence_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/plugins/presence/libpresence_plugin_la-presence.lo `test -f 'ldap/servers/plugins/presence/presence.c' || echo '$(srcdir)/'`ldap/servers/plugins/presence/presence.c +ldap/servers/plugins/pwderror/libpwderror_plugin_la-pwderror-plugin.lo: ldap/servers/plugins/pwderror/pwderror-plugin.c +@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libpwderror_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/plugins/pwderror/libpwderror_plugin_la-pwderror-plugin.lo -MD -MP -MF "ldap/servers/plugins/pwderror/$(DEPDIR)/libpwderror_plugin_la-pwderror-plugin.Tpo" -c -o ldap/servers/plugins/pwderror/libpwderror_plugin_la-pwderror-plugin.lo `test -f 'ldap/servers/plugins/pwderror/pwderror-plugin.c' || echo '$(srcdir)/'`ldap/servers/plugins/pwderror/pwderror-plugin.c; \ +@am__fastdepCC_TRUE@ then mv -f "ldap/servers/plugins/pwderror/$(DEPDIR)/libpwderror_plugin_la-pwderror-plugin.Tpo" "ldap/servers/plugins/pwderror/$(DEPDIR)/libpwderror_plugin_la-pwderror-plugin.Plo"; else rm -f "ldap/servers/plugins/pwderror/$(DEPDIR)/libpwderror_plugin_la-pwderror-plugin.Tpo"; exit 1; fi +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ldap/servers/plugins/pwderror/pwderror-plugin.c' object='ldap/servers/plugins/pwderror/libpwderror_plugin_la-pwderror-plugin.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libpwderror_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/plugins/pwderror/libpwderror_plugin_la-pwderror-plugin.lo `test -f 'ldap/servers/plugins/pwderror/pwderror-plugin.c' || echo '$(srcdir)/'`ldap/servers/plugins/pwderror/pwderror-plugin.c + ldap/servers/plugins/pwdstorage/libpwdstorage_plugin_la-clear_pwd.lo: ldap/servers/plugins/pwdstorage/clear_pwd.c @am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libpwdstorage_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/plugins/pwdstorage/libpwdstorage_plugin_la-clear_pwd.lo -MD -MP -MF "ldap/servers/plugins/pwdstorage/$(DEPDIR)/libpwdstorage_plugin_la-clear_pwd.Tpo" -c -o ldap/servers/plugins/pwdstorage/libpwdstorage_plugin_la-clear_pwd.lo `test -f 'ldap/servers/plugins/pwdstorage/clear_pwd.c' || echo '$(srcdir)/'`ldap/servers/plugins/pwdstorage/clear_pwd.c; \ @am__fastdepCC_TRUE@ then mv -f "ldap/servers/plugins/pwdstorage/$(DEPDIR)/libpwdstorage_plugin_la-clear_pwd.Tpo" "ldap/servers/plugins/pwdstorage/$(DEPDIR)/libpwdstorage_plugin_la-clear_pwd.Plo"; else rm -f "ldap/servers/plugins/pwdstorage/$(DEPDIR)/libpwdstorage_plugin_la-clear_pwd.Tpo"; exit 1; fi @@ -7526,6 +7629,13 @@ ldap/servers/plugins/views/libviews_plugin_la-views.lo: ldap/servers/plugins/vie @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libviews_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/plugins/views/libviews_plugin_la-views.lo `test -f 'ldap/servers/plugins/views/views.c' || echo '$(srcdir)/'`ldap/servers/plugins/views/views.c +ldap/servers/plugins/xor/libxor_plugin_la-xorplugin.lo: ldap/servers/plugins/xor/xorplugin.c +@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libxor_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/plugins/xor/libxor_plugin_la-xorplugin.lo -MD -MP -MF "ldap/servers/plugins/xor/$(DEPDIR)/libxor_plugin_la-xorplugin.Tpo" -c -o ldap/servers/plugins/xor/libxor_plugin_la-xorplugin.lo `test -f 'ldap/servers/plugins/xor/xorplugin.c' || echo '$(srcdir)/'`ldap/servers/plugins/xor/xorplugin.c; \ +@am__fastdepCC_TRUE@ then mv -f "ldap/servers/plugins/xor/$(DEPDIR)/libxor_plugin_la-xorplugin.Tpo" "ldap/servers/plugins/xor/$(DEPDIR)/libxor_plugin_la-xorplugin.Plo"; else rm -f "ldap/servers/plugins/xor/$(DEPDIR)/libxor_plugin_la-xorplugin.Tpo"; exit 1; fi +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ldap/servers/plugins/xor/xorplugin.c' object='ldap/servers/plugins/xor/libxor_plugin_la-xorplugin.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libxor_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/plugins/xor/libxor_plugin_la-xorplugin.lo `test -f 'ldap/servers/plugins/xor/xorplugin.c' || echo '$(srcdir)/'`ldap/servers/plugins/xor/xorplugin.c + ldap/servers/slapd/tools/dbscan_bin-dbscan.o: ldap/servers/slapd/tools/dbscan.c @am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(dbscan_bin_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/slapd/tools/dbscan_bin-dbscan.o -MD -MP -MF "ldap/servers/slapd/tools/$(DEPDIR)/dbscan_bin-dbscan.Tpo" -c -o ldap/servers/slapd/tools/dbscan_bin-dbscan.o `test -f 'ldap/servers/slapd/tools/dbscan.c' || echo '$(srcdir)/'`ldap/servers/slapd/tools/dbscan.c; \ @am__fastdepCC_TRUE@ then mv -f "ldap/servers/slapd/tools/$(DEPDIR)/dbscan_bin-dbscan.Tpo" "ldap/servers/slapd/tools/$(DEPDIR)/dbscan_bin-dbscan.Po"; else rm -f "ldap/servers/slapd/tools/$(DEPDIR)/dbscan_bin-dbscan.Tpo"; exit 1; fi @@ -8783,9 +8893,11 @@ clean-libtool: -rm -rf ldap/servers/plugins/distrib/.libs ldap/servers/plugins/distrib/_libs -rm -rf ldap/servers/plugins/dna/.libs ldap/servers/plugins/dna/_libs -rm -rf ldap/servers/plugins/http/.libs ldap/servers/plugins/http/_libs + -rm -rf ldap/servers/plugins/othercrypto/.libs ldap/servers/plugins/othercrypto/_libs -rm -rf ldap/servers/plugins/pam_passthru/.libs ldap/servers/plugins/pam_passthru/_libs -rm -rf ldap/servers/plugins/passthru/.libs ldap/servers/plugins/passthru/_libs -rm -rf ldap/servers/plugins/presence/.libs ldap/servers/plugins/presence/_libs + -rm -rf ldap/servers/plugins/pwderror/.libs ldap/servers/plugins/pwderror/_libs -rm -rf ldap/servers/plugins/pwdstorage/.libs ldap/servers/plugins/pwdstorage/_libs -rm -rf ldap/servers/plugins/referint/.libs ldap/servers/plugins/referint/_libs -rm -rf ldap/servers/plugins/replication/.libs ldap/servers/plugins/replication/_libs @@ -8797,6 +8909,7 @@ clean-libtool: -rm -rf ldap/servers/plugins/syntaxes/.libs ldap/servers/plugins/syntaxes/_libs -rm -rf ldap/servers/plugins/uiduniq/.libs ldap/servers/plugins/uiduniq/_libs -rm -rf ldap/servers/plugins/views/.libs ldap/servers/plugins/views/_libs + -rm -rf ldap/servers/plugins/xor/.libs ldap/servers/plugins/xor/_libs -rm -rf ldap/servers/slapd/.libs ldap/servers/slapd/_libs -rm -rf ldap/servers/slapd/back-ldbm/.libs ldap/servers/slapd/back-ldbm/_libs -rm -rf lib/base/.libs lib/base/_libs @@ -8825,6 +8938,23 @@ uninstall-configDATA: echo " rm -f '$(DESTDIR)$(configdir)/$$f'"; \ rm -f "$(DESTDIR)$(configdir)/$$f"; \ done +install-infDATA: $(inf_DATA) + @$(NORMAL_INSTALL) + test -z "$(infdir)" || $(mkdir_p) "$(DESTDIR)$(infdir)" + @list='$(inf_DATA)'; for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + f=$(am__strip_dir) \ + echo " $(infDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(infdir)/$$f'"; \ + $(infDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(infdir)/$$f"; \ + done + +uninstall-infDATA: + @$(NORMAL_UNINSTALL) + @list='$(inf_DATA)'; for p in $$list; do \ + f=$(am__strip_dir) \ + echo " rm -f '$(DESTDIR)$(infdir)/$$f'"; \ + rm -f "$(DESTDIR)$(infdir)/$$f"; \ + done install-nodist_propertyDATA: $(nodist_property_DATA) @$(NORMAL_INSTALL) test -z "$(propertydir)" || $(mkdir_p) "$(DESTDIR)$(propertydir)" @@ -9075,7 +9205,7 @@ check: $(BUILT_SOURCES) all-am: Makefile $(LIBRARIES) $(LTLIBRARIES) $(PROGRAMS) $(SCRIPTS) \ $(DATA) config.h installdirs: - for dir in "$(DESTDIR)$(serverdir)" "$(DESTDIR)$(serverplugindir)" "$(DESTDIR)$(bindir)" "$(DESTDIR)$(sbindir)" "$(DESTDIR)$(bindir)" "$(DESTDIR)$(initdir)" "$(DESTDIR)$(perldir)" "$(DESTDIR)$(taskdir)" "$(DESTDIR)$(configdir)" "$(DESTDIR)$(propertydir)" "$(DESTDIR)$(propertydir)" "$(DESTDIR)$(sampledatadir)" "$(DESTDIR)$(schemadir)"; do \ + for dir in "$(DESTDIR)$(serverdir)" "$(DESTDIR)$(serverplugindir)" "$(DESTDIR)$(bindir)" "$(DESTDIR)$(sbindir)" "$(DESTDIR)$(bindir)" "$(DESTDIR)$(initdir)" "$(DESTDIR)$(perldir)" "$(DESTDIR)$(taskdir)" "$(DESTDIR)$(configdir)" "$(DESTDIR)$(infdir)" "$(DESTDIR)$(propertydir)" "$(DESTDIR)$(propertydir)" "$(DESTDIR)$(sampledatadir)" "$(DESTDIR)$(schemadir)"; do \ test -z "$$dir" || $(mkdir_p) "$$dir"; \ done install: $(BUILT_SOURCES) @@ -9122,12 +9252,16 @@ distclean-generic: -rm -f ldap/servers/plugins/dna/$(am__dirstamp) -rm -f ldap/servers/plugins/http/$(DEPDIR)/$(am__dirstamp) -rm -f ldap/servers/plugins/http/$(am__dirstamp) + -rm -f ldap/servers/plugins/othercrypto/$(DEPDIR)/$(am__dirstamp) + -rm -f ldap/servers/plugins/othercrypto/$(am__dirstamp) -rm -f ldap/servers/plugins/pam_passthru/$(DEPDIR)/$(am__dirstamp) -rm -f ldap/servers/plugins/pam_passthru/$(am__dirstamp) -rm -f ldap/servers/plugins/passthru/$(DEPDIR)/$(am__dirstamp) -rm -f ldap/servers/plugins/passthru/$(am__dirstamp) -rm -f ldap/servers/plugins/presence/$(DEPDIR)/$(am__dirstamp) -rm -f ldap/servers/plugins/presence/$(am__dirstamp) + -rm -f ldap/servers/plugins/pwderror/$(DEPDIR)/$(am__dirstamp) + -rm -f ldap/servers/plugins/pwderror/$(am__dirstamp) -rm -f ldap/servers/plugins/pwdstorage/$(DEPDIR)/$(am__dirstamp) -rm -f ldap/servers/plugins/pwdstorage/$(am__dirstamp) -rm -f ldap/servers/plugins/referint/$(DEPDIR)/$(am__dirstamp) @@ -9150,6 +9284,8 @@ distclean-generic: -rm -f ldap/servers/plugins/uiduniq/$(am__dirstamp) -rm -f ldap/servers/plugins/views/$(DEPDIR)/$(am__dirstamp) -rm -f ldap/servers/plugins/views/$(am__dirstamp) + -rm -f ldap/servers/plugins/xor/$(DEPDIR)/$(am__dirstamp) + -rm -f ldap/servers/plugins/xor/$(am__dirstamp) -rm -f ldap/servers/slapd/$(DEPDIR)/$(am__dirstamp) -rm -f ldap/servers/slapd/$(am__dirstamp) -rm -f ldap/servers/slapd/back-ldbm/$(DEPDIR)/$(am__dirstamp) @@ -9188,7 +9324,7 @@ clean-am: clean-binPROGRAMS clean-generic clean-libtool \ distclean: distclean-am -rm -f $(am__CONFIG_DISTCLEAN_FILES) - -rm -rf ldap/admin/lib/$(DEPDIR) ldap/admin/src/$(DEPDIR) ldap/libraries/libavl/$(DEPDIR) ldap/servers/plugins/acl/$(DEPDIR) ldap/servers/plugins/bitwise/$(DEPDIR) ldap/servers/plugins/chainingdb/$(DEPDIR) ldap/servers/plugins/collation/$(DEPDIR) ldap/servers/plugins/cos/$(DEPDIR) ldap/servers/plugins/distrib/$(DEPDIR) ldap/servers/plugins/dna/$(DEPDIR) ldap/servers/plugins/http/$(DEPDIR) ldap/servers/plugins/pam_passthru/$(DEPDIR) ldap/servers/plugins/passthru/$(DEPDIR) ldap/servers/plugins/presence/$(DEPDIR) ldap/servers/plugins/pwdstorage/$(DEPDIR) ldap/servers/plugins/referint/$(DEPDIR) ldap/servers/plugins/replication/$(DEPDIR) ldap/servers/plugins/retrocl/$(DEPDIR) ldap/servers/plugins/rever/$(DEPDIR) ldap/servers/plugins/roles/$(DEPDIR) ldap/servers/plugins/shared/$(DEPDIR) ldap/servers/plugins/statechange/$(DEPDIR) ldap/servers/plugins/syntaxes/$(DEPDIR) ldap/servers/plugins/uiduniq/$(DEPDIR) ldap/servers/plugins/views/$(DEPDIR) ldap/servers/slapd/$(DEPDIR) ldap/servers/slapd/back-ldbm/$(DEPDIR) ldap/servers/slapd/tools/$(DEPDIR) ldap/servers/slapd/tools/ldclt/$(DEPDIR) ldap/servers/slapd/tools/rsearch/$(DEPDIR) ldap/servers/snmp/$(DEPDIR) ldap/systools/$(DEPDIR) lib/base/$(DEPDIR) lib/ldaputil/$(DEPDIR) lib/libaccess/$(DEPDIR) lib/libadmin/$(DEPDIR) lib/libsi18n/$(DEPDIR) + -rm -rf ldap/admin/lib/$(DEPDIR) ldap/admin/src/$(DEPDIR) ldap/libraries/libavl/$(DEPDIR) ldap/servers/plugins/acl/$(DEPDIR) ldap/servers/plugins/bitwise/$(DEPDIR) ldap/servers/plugins/chainingdb/$(DEPDIR) ldap/servers/plugins/collation/$(DEPDIR) ldap/servers/plugins/cos/$(DEPDIR) ldap/servers/plugins/distrib/$(DEPDIR) ldap/servers/plugins/dna/$(DEPDIR) ldap/servers/plugins/http/$(DEPDIR) ldap/servers/plugins/othercrypto/$(DEPDIR) ldap/servers/plugins/pam_passthru/$(DEPDIR) ldap/servers/plugins/passthru/$(DEPDIR) ldap/servers/plugins/presence/$(DEPDIR) ldap/servers/plugins/pwderror/$(DEPDIR) ldap/servers/plugins/pwdstorage/$(DEPDIR) ldap/servers/plugins/referint/$(DEPDIR) ldap/servers/plugins/replication/$(DEPDIR) ldap/servers/plugins/retrocl/$(DEPDIR) ldap/servers/plugins/rever/$(DEPDIR) ldap/servers/plugins/roles/$(DEPDIR) ldap/servers/plugins/shared/$(DEPDIR) ldap/servers/plugins/statechange/$(DEPDIR) ldap/servers/plugins/syntaxes/$(DEPDIR) ldap/servers/plugins/uiduniq/$(DEPDIR) ldap/servers/plugins/views/$(DEPDIR) ldap/servers/plugins/xor/$(DEPDIR) ldap/servers/slapd/$(DEPDIR) ldap/servers/slapd/back-ldbm/$(DEPDIR) ldap/servers/slapd/tools/$(DEPDIR) ldap/servers/slapd/tools/ldclt/$(DEPDIR) ldap/servers/slapd/tools/rsearch/$(DEPDIR) ldap/servers/snmp/$(DEPDIR) ldap/systools/$(DEPDIR) lib/base/$(DEPDIR) lib/ldaputil/$(DEPDIR) lib/libaccess/$(DEPDIR) lib/libadmin/$(DEPDIR) lib/libsi18n/$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-hdr distclean-libtool distclean-tags @@ -9203,9 +9339,10 @@ info: info-am info-am: -install-data-am: install-configDATA install-initSCRIPTS \ - install-nodist_propertyDATA install-perlSCRIPTS \ - install-propertyDATA install-sampledataDATA install-schemaDATA \ +install-data-am: install-configDATA install-infDATA \ + install-initSCRIPTS install-nodist_propertyDATA \ + install-perlSCRIPTS install-propertyDATA \ + install-sampledataDATA install-schemaDATA \ install-serverLTLIBRARIES install-serverpluginLTLIBRARIES \ install-taskSCRIPTS @@ -9221,7 +9358,7 @@ installcheck-am: maintainer-clean: maintainer-clean-am -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -rf $(top_srcdir)/autom4te.cache - -rm -rf ldap/admin/lib/$(DEPDIR) ldap/admin/src/$(DEPDIR) ldap/libraries/libavl/$(DEPDIR) ldap/servers/plugins/acl/$(DEPDIR) ldap/servers/plugins/bitwise/$(DEPDIR) ldap/servers/plugins/chainingdb/$(DEPDIR) ldap/servers/plugins/collation/$(DEPDIR) ldap/servers/plugins/cos/$(DEPDIR) ldap/servers/plugins/distrib/$(DEPDIR) ldap/servers/plugins/dna/$(DEPDIR) ldap/servers/plugins/http/$(DEPDIR) ldap/servers/plugins/pam_passthru/$(DEPDIR) ldap/servers/plugins/passthru/$(DEPDIR) ldap/servers/plugins/presence/$(DEPDIR) ldap/servers/plugins/pwdstorage/$(DEPDIR) ldap/servers/plugins/referint/$(DEPDIR) ldap/servers/plugins/replication/$(DEPDIR) ldap/servers/plugins/retrocl/$(DEPDIR) ldap/servers/plugins/rever/$(DEPDIR) ldap/servers/plugins/roles/$(DEPDIR) ldap/servers/plugins/shared/$(DEPDIR) ldap/servers/plugins/statechange/$(DEPDIR) ldap/servers/plugins/syntaxes/$(DEPDIR) ldap/servers/plugins/uiduniq/$(DEPDIR) ldap/servers/plugins/views/$(DEPDIR) ldap/servers/slapd/$(DEPDIR) ldap/servers/slapd/back-ldbm/$(DEPDIR) ldap/servers/slapd/tools/$(DEPDIR) ldap/servers/slapd/tools/ldclt/$(DEPDIR) ldap/servers/slapd/tools/rsearch/$(DEPDIR) ldap/servers/snmp/$(DEPDIR) ldap/systools/$(DEPDIR) lib/base/$(DEPDIR) lib/ldaputil/$(DEPDIR) lib/libaccess/$(DEPDIR) lib/libadmin/$(DEPDIR) lib/libsi18n/$(DEPDIR) + -rm -rf ldap/admin/lib/$(DEPDIR) ldap/admin/src/$(DEPDIR) ldap/libraries/libavl/$(DEPDIR) ldap/servers/plugins/acl/$(DEPDIR) ldap/servers/plugins/bitwise/$(DEPDIR) ldap/servers/plugins/chainingdb/$(DEPDIR) ldap/servers/plugins/collation/$(DEPDIR) ldap/servers/plugins/cos/$(DEPDIR) ldap/servers/plugins/distrib/$(DEPDIR) ldap/servers/plugins/dna/$(DEPDIR) ldap/servers/plugins/http/$(DEPDIR) ldap/servers/plugins/othercrypto/$(DEPDIR) ldap/servers/plugins/pam_passthru/$(DEPDIR) ldap/servers/plugins/passthru/$(DEPDIR) ldap/servers/plugins/presence/$(DEPDIR) ldap/servers/plugins/pwderror/$(DEPDIR) ldap/servers/plugins/pwdstorage/$(DEPDIR) ldap/servers/plugins/referint/$(DEPDIR) ldap/servers/plugins/replication/$(DEPDIR) ldap/servers/plugins/retrocl/$(DEPDIR) ldap/servers/plugins/rever/$(DEPDIR) ldap/servers/plugins/roles/$(DEPDIR) ldap/servers/plugins/shared/$(DEPDIR) ldap/servers/plugins/statechange/$(DEPDIR) ldap/servers/plugins/syntaxes/$(DEPDIR) ldap/servers/plugins/uiduniq/$(DEPDIR) ldap/servers/plugins/views/$(DEPDIR) ldap/servers/plugins/xor/$(DEPDIR) ldap/servers/slapd/$(DEPDIR) ldap/servers/slapd/back-ldbm/$(DEPDIR) ldap/servers/slapd/tools/$(DEPDIR) ldap/servers/slapd/tools/ldclt/$(DEPDIR) ldap/servers/slapd/tools/rsearch/$(DEPDIR) ldap/servers/snmp/$(DEPDIR) ldap/systools/$(DEPDIR) lib/base/$(DEPDIR) lib/ldaputil/$(DEPDIR) lib/libaccess/$(DEPDIR) lib/libadmin/$(DEPDIR) lib/libsi18n/$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic @@ -9239,12 +9376,12 @@ ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS uninstall-binSCRIPTS \ - uninstall-configDATA uninstall-info-am uninstall-initSCRIPTS \ - uninstall-nodist_propertyDATA uninstall-perlSCRIPTS \ - uninstall-propertyDATA uninstall-sampledataDATA \ - uninstall-sbinPROGRAMS uninstall-schemaDATA \ - uninstall-serverLTLIBRARIES uninstall-serverpluginLTLIBRARIES \ - uninstall-taskSCRIPTS + uninstall-configDATA uninstall-infDATA uninstall-info-am \ + uninstall-initSCRIPTS uninstall-nodist_propertyDATA \ + uninstall-perlSCRIPTS uninstall-propertyDATA \ + uninstall-sampledataDATA uninstall-sbinPROGRAMS \ + uninstall-schemaDATA uninstall-serverLTLIBRARIES \ + uninstall-serverpluginLTLIBRARIES uninstall-taskSCRIPTS .PHONY: CTAGS GTAGS all all-am am--refresh check check-am clean \ clean-binPROGRAMS clean-generic clean-libtool \ @@ -9257,22 +9394,22 @@ uninstall-am: uninstall-binPROGRAMS uninstall-binSCRIPTS \ dvi-am html html-am info info-am install install-am \ install-binPROGRAMS install-binSCRIPTS install-configDATA \ install-data install-data-am install-exec install-exec-am \ - install-info install-info-am install-initSCRIPTS install-man \ - install-nodist_propertyDATA install-perlSCRIPTS \ - install-propertyDATA install-sampledataDATA \ - install-sbinPROGRAMS install-schemaDATA \ + install-infDATA install-info install-info-am \ + install-initSCRIPTS install-man install-nodist_propertyDATA \ + install-perlSCRIPTS install-propertyDATA \ + install-sampledataDATA install-sbinPROGRAMS install-schemaDATA \ install-serverLTLIBRARIES install-serverpluginLTLIBRARIES \ install-strip install-taskSCRIPTS installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \ uninstall-am uninstall-binPROGRAMS uninstall-binSCRIPTS \ - uninstall-configDATA uninstall-info-am uninstall-initSCRIPTS \ - uninstall-nodist_propertyDATA uninstall-perlSCRIPTS \ - uninstall-propertyDATA uninstall-sampledataDATA \ - uninstall-sbinPROGRAMS uninstall-schemaDATA \ - uninstall-serverLTLIBRARIES uninstall-serverpluginLTLIBRARIES \ - uninstall-taskSCRIPTS + uninstall-configDATA uninstall-infDATA uninstall-info-am \ + uninstall-initSCRIPTS uninstall-nodist_propertyDATA \ + uninstall-perlSCRIPTS uninstall-propertyDATA \ + uninstall-sampledataDATA uninstall-sbinPROGRAMS \ + uninstall-schemaDATA uninstall-serverLTLIBRARIES \ + uninstall-serverpluginLTLIBRARIES uninstall-taskSCRIPTS dirver.h: Makefile diff --git a/aclocal.m4 b/aclocal.m4 index c7c1c6fbc..9064efa9b 100644 --- a/aclocal.m4 +++ b/aclocal.m4 @@ -1578,27 +1578,10 @@ linux*) # before this can be enabled. hardcode_into_libs=yes - # find out which ABI we are using - libsuff= - case "$host_cpu" in - x86_64*|s390x*|powerpc64*) - echo '[#]line __oline__ "configure"' > conftest.$ac_ext - if AC_TRY_EVAL(ac_compile); then - case `/usr/bin/file conftest.$ac_objext` in - *64-bit*) - libsuff=64 - sys_lib_search_path_spec="/lib${libsuff} /usr/lib${libsuff} /usr/local/lib${libsuff}" - ;; - esac - fi - rm -rf conftest* - ;; - esac - # Append ld.so.conf contents to the search path if test -f /etc/ld.so.conf; then - lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` - sys_lib_dlsearch_path_spec="/lib${libsuff} /usr/lib${libsuff} $lt_ld_extra" + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on @@ -4305,9 +4288,6 @@ CC=$lt_[]_LT_AC_TAGVAR(compiler, $1) # Is the compiler the GNU C compiler? with_gcc=$_LT_AC_TAGVAR(GCC, $1) -gcc_dir=\`gcc -print-file-name=. | $SED 's,/\.$,,'\` -gcc_ver=\`gcc -dumpversion\` - # An ERE matcher. EGREP=$lt_EGREP @@ -4441,11 +4421,11 @@ striplib=$lt_striplib # Dependencies to place before the objects being linked to create a # shared library. -predep_objects=\`echo $lt_[]_LT_AC_TAGVAR(predep_objects, $1) | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +predep_objects=$lt_[]_LT_AC_TAGVAR(predep_objects, $1) # Dependencies to place after the objects being linked to create a # shared library. -postdep_objects=\`echo $lt_[]_LT_AC_TAGVAR(postdep_objects, $1) | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +postdep_objects=$lt_[]_LT_AC_TAGVAR(postdep_objects, $1) # Dependencies to place before the objects being linked to create a # shared library. @@ -4457,7 +4437,7 @@ postdeps=$lt_[]_LT_AC_TAGVAR(postdeps, $1) # The library search path used internally by the compiler when linking # a shared library. -compiler_lib_search_path=\`echo $lt_[]_LT_AC_TAGVAR(compiler_lib_search_path, $1) | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +compiler_lib_search_path=$lt_[]_LT_AC_TAGVAR(compiler_lib_search_path, $1) # Method to check whether dependent libraries are shared objects. deplibs_check_method=$lt_deplibs_check_method @@ -4537,7 +4517,7 @@ variables_saved_for_relink="$variables_saved_for_relink" link_all_deplibs=$_LT_AC_TAGVAR(link_all_deplibs, $1) # Compile-time system search path for libraries -sys_lib_search_path_spec=\`echo $lt_sys_lib_search_path_spec | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec # Run-time system search path for libraries sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec @@ -6373,7 +6353,6 @@ do done done done -IFS=$as_save_IFS lt_ac_max=0 lt_ac_count=0 # Add /usr/xpg4/bin/sed as it is typically found on Solaris @@ -6406,7 +6385,6 @@ for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do done ]) SED=$lt_cv_path_SED -AC_SUBST([SED]) AC_MSG_RESULT([$SED]) ]) diff --git a/configure b/configure index 67bdf5615..da588fb7d 100755 --- a/configure +++ b/configure @@ -465,7 +465,7 @@ ac_includes_default="\ #endif" ac_default_prefix=/opt/$PACKAGE_NAME -ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA CYGPATH_W PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER MAKEINFO install_sh STRIP ac_ct_STRIP INSTALL_STRIP_PROGRAM mkdir_p AWK SET_MAKE am__leading_dot AMTAR am__tar am__untar MAINTAINER_MODE_TRUE MAINTAINER_MODE_FALSE MAINT build build_cpu build_vendor build_os host host_cpu host_vendor host_os CXX CXXFLAGS LDFLAGS CPPFLAGS ac_ct_CXX EXEEXT OBJEXT DEPDIR am__include am__quote AMDEP_TRUE AMDEP_FALSE AMDEPBACKSLASH CXXDEPMODE am__fastdepCXX_TRUE am__fastdepCXX_FALSE CC CFLAGS ac_ct_CC CCDEPMODE am__fastdepCC_TRUE am__fastdepCC_FALSE SED EGREP LN_S ECHO AR ac_ct_AR RANLIB ac_ct_RANLIB CPP CXXCPP F77 FFLAGS ac_ct_F77 LIBTOOL LIBOBJS debug_defs BUNDLE_TRUE BUNDLE_FALSE enable_pam_passthru_TRUE enable_pam_passthru_FALSE enable_dna_TRUE enable_dna_FALSE enable_ldapi_TRUE enable_ldapi_FALSE enable_bitwise_TRUE enable_bitwise_FALSE configdir sampledatadir propertydir schemadir serverdir serverplugindir scripttemplatedir perldir instconfigdir WINNT_TRUE WINNT_FALSE LIBSOCKET LIBNSL LIBDL LIBCSTD LIBCRUN initdir shared_lib_suffix HPUX_TRUE HPUX_FALSE SOLARIS_TRUE SOLARIS_FALSE PKG_CONFIG ICU_CONFIG NETSNMP_CONFIG nspr_inc nspr_lib nspr_libdir nss_inc nss_lib nss_libdir ldapsdk_inc ldapsdk_lib ldapsdk_libdir ldapsdk_bindir db_inc db_incdir db_lib db_libdir db_bindir db_libver sasl_inc sasl_lib sasl_libdir svrcore_inc svrcore_lib icu_lib icu_inc icu_bin netsnmp_inc netsnmp_lib netsnmp_libdir netsnmp_link brand capbrand vendor LTLIBOBJS' +ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA CYGPATH_W PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER MAKEINFO install_sh STRIP ac_ct_STRIP INSTALL_STRIP_PROGRAM mkdir_p AWK SET_MAKE am__leading_dot AMTAR am__tar am__untar MAINTAINER_MODE_TRUE MAINTAINER_MODE_FALSE MAINT build build_cpu build_vendor build_os host host_cpu host_vendor host_os CXX CXXFLAGS LDFLAGS CPPFLAGS ac_ct_CXX EXEEXT OBJEXT DEPDIR am__include am__quote AMDEP_TRUE AMDEP_FALSE AMDEPBACKSLASH CXXDEPMODE am__fastdepCXX_TRUE am__fastdepCXX_FALSE CC CFLAGS ac_ct_CC CCDEPMODE am__fastdepCC_TRUE am__fastdepCC_FALSE EGREP LN_S ECHO AR ac_ct_AR RANLIB ac_ct_RANLIB CPP CXXCPP F77 FFLAGS ac_ct_F77 LIBTOOL LIBOBJS debug_defs BUNDLE_TRUE BUNDLE_FALSE enable_pam_passthru_TRUE enable_pam_passthru_FALSE enable_dna_TRUE enable_dna_FALSE enable_ldapi_TRUE enable_ldapi_FALSE enable_bitwise_TRUE enable_bitwise_FALSE configdir sampledatadir propertydir schemadir serverdir serverplugindir scripttemplatedir perldir infdir instconfigdir WINNT_TRUE WINNT_FALSE LIBSOCKET LIBNSL LIBDL LIBCSTD LIBCRUN initdir shared_lib_suffix HPUX_TRUE HPUX_FALSE SOLARIS_TRUE SOLARIS_FALSE PKG_CONFIG ICU_CONFIG NETSNMP_CONFIG nspr_inc nspr_lib nspr_libdir nss_inc nss_lib nss_libdir ldapsdk_inc ldapsdk_lib ldapsdk_libdir ldapsdk_bindir db_inc db_incdir db_lib db_libdir db_bindir db_libver sasl_inc sasl_lib sasl_libdir svrcore_inc svrcore_lib icu_lib icu_inc icu_bin netsnmp_inc netsnmp_lib netsnmp_libdir netsnmp_link brand capbrand vendor LTLIBOBJS' ac_subst_files='' # Initialize some variables set by options. @@ -3836,7 +3836,6 @@ do done done done -IFS=$as_save_IFS lt_ac_max=0 lt_ac_count=0 # Add /usr/xpg4/bin/sed as it is typically found on Solaris @@ -3871,7 +3870,6 @@ done fi SED=$lt_cv_path_SED - echo "$as_me:$LINENO: result: $SED" >&5 echo "${ECHO_T}$SED" >&6 @@ -4312,7 +4310,7 @@ ia64-*-hpux*) ;; *-*-irix6*) # Find out which ABI we are using. - echo '#line 4315 "configure"' > conftest.$ac_ext + echo '#line 4313 "configure"' > conftest.$ac_ext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? @@ -5447,7 +5445,7 @@ fi # Provide some information about the compiler. -echo "$as_me:5450:" \ +echo "$as_me:5448:" \ "checking for Fortran 77 compiler version" >&5 ac_compiler=`set X $ac_compile; echo $2` { (eval echo "$as_me:$LINENO: \"$ac_compiler --version </dev/null >&5\"") >&5 @@ -6510,11 +6508,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:6513: $lt_compile\"" >&5) + (eval echo "\"\$as_me:6511: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:6517: \$? = $ac_status" >&5 + echo "$as_me:6515: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. @@ -6778,11 +6776,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:6781: $lt_compile\"" >&5) + (eval echo "\"\$as_me:6779: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:6785: \$? = $ac_status" >&5 + echo "$as_me:6783: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. @@ -6882,11 +6880,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:6885: $lt_compile\"" >&5) + (eval echo "\"\$as_me:6883: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 - echo "$as_me:6889: \$? = $ac_status" >&5 + echo "$as_me:6887: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized @@ -8347,31 +8345,10 @@ linux*) # before this can be enabled. hardcode_into_libs=yes - # find out which ABI we are using - libsuff= - case "$host_cpu" in - x86_64*|s390x*|powerpc64*) - echo '#line 8354 "configure"' > conftest.$ac_ext - if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; then - case `/usr/bin/file conftest.$ac_objext` in - *64-bit*) - libsuff=64 - sys_lib_search_path_spec="/lib${libsuff} /usr/lib${libsuff} /usr/local/lib${libsuff}" - ;; - esac - fi - rm -rf conftest* - ;; - esac - # Append ld.so.conf contents to the search path if test -f /etc/ld.so.conf; then - lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` - sys_lib_dlsearch_path_spec="/lib${libsuff} /usr/lib${libsuff} $lt_ld_extra" + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on @@ -9248,7 +9225,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<EOF -#line 9251 "configure" +#line 9228 "configure" #include "confdefs.h" #if HAVE_DLFCN_H @@ -9348,7 +9325,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<EOF -#line 9351 "configure" +#line 9328 "configure" #include "confdefs.h" #if HAVE_DLFCN_H @@ -9679,9 +9656,6 @@ CC=$lt_compiler # Is the compiler the GNU C compiler? with_gcc=$GCC -gcc_dir=\`gcc -print-file-name=. | $SED 's,/\.$,,'\` -gcc_ver=\`gcc -dumpversion\` - # An ERE matcher. EGREP=$lt_EGREP @@ -9815,11 +9789,11 @@ striplib=$lt_striplib # Dependencies to place before the objects being linked to create a # shared library. -predep_objects=\`echo $lt_predep_objects | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +predep_objects=$lt_predep_objects # Dependencies to place after the objects being linked to create a # shared library. -postdep_objects=\`echo $lt_postdep_objects | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +postdep_objects=$lt_postdep_objects # Dependencies to place before the objects being linked to create a # shared library. @@ -9831,7 +9805,7 @@ postdeps=$lt_postdeps # The library search path used internally by the compiler when linking # a shared library. -compiler_lib_search_path=\`echo $lt_compiler_lib_search_path | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +compiler_lib_search_path=$lt_compiler_lib_search_path # Method to check whether dependent libraries are shared objects. deplibs_check_method=$lt_deplibs_check_method @@ -9911,7 +9885,7 @@ variables_saved_for_relink="$variables_saved_for_relink" link_all_deplibs=$link_all_deplibs # Compile-time system search path for libraries -sys_lib_search_path_spec=\`echo $lt_sys_lib_search_path_spec | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec # Run-time system search path for libraries sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec @@ -11691,11 +11665,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:11694: $lt_compile\"" >&5) + (eval echo "\"\$as_me:11668: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:11698: \$? = $ac_status" >&5 + echo "$as_me:11672: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. @@ -11795,11 +11769,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:11798: $lt_compile\"" >&5) + (eval echo "\"\$as_me:11772: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 - echo "$as_me:11802: \$? = $ac_status" >&5 + echo "$as_me:11776: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized @@ -12327,31 +12301,10 @@ linux*) # before this can be enabled. hardcode_into_libs=yes - # find out which ABI we are using - libsuff= - case "$host_cpu" in - x86_64*|s390x*|powerpc64*) - echo '#line 12334 "configure"' > conftest.$ac_ext - if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; then - case `/usr/bin/file conftest.$ac_objext` in - *64-bit*) - libsuff=64 - sys_lib_search_path_spec="/lib${libsuff} /usr/lib${libsuff} /usr/local/lib${libsuff}" - ;; - esac - fi - rm -rf conftest* - ;; - esac - # Append ld.so.conf contents to the search path if test -f /etc/ld.so.conf; then - lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` - sys_lib_dlsearch_path_spec="/lib${libsuff} /usr/lib${libsuff} $lt_ld_extra" + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on @@ -12735,9 +12688,6 @@ CC=$lt_compiler_CXX # Is the compiler the GNU C compiler? with_gcc=$GCC_CXX -gcc_dir=\`gcc -print-file-name=. | $SED 's,/\.$,,'\` -gcc_ver=\`gcc -dumpversion\` - # An ERE matcher. EGREP=$lt_EGREP @@ -12871,11 +12821,11 @@ striplib=$lt_striplib # Dependencies to place before the objects being linked to create a # shared library. -predep_objects=\`echo $lt_predep_objects_CXX | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +predep_objects=$lt_predep_objects_CXX # Dependencies to place after the objects being linked to create a # shared library. -postdep_objects=\`echo $lt_postdep_objects_CXX | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +postdep_objects=$lt_postdep_objects_CXX # Dependencies to place before the objects being linked to create a # shared library. @@ -12887,7 +12837,7 @@ postdeps=$lt_postdeps_CXX # The library search path used internally by the compiler when linking # a shared library. -compiler_lib_search_path=\`echo $lt_compiler_lib_search_path_CXX | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +compiler_lib_search_path=$lt_compiler_lib_search_path_CXX # Method to check whether dependent libraries are shared objects. deplibs_check_method=$lt_deplibs_check_method @@ -12967,7 +12917,7 @@ variables_saved_for_relink="$variables_saved_for_relink" link_all_deplibs=$link_all_deplibs_CXX # Compile-time system search path for libraries -sys_lib_search_path_spec=\`echo $lt_sys_lib_search_path_spec | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec # Run-time system search path for libraries sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec @@ -13389,11 +13339,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:13392: $lt_compile\"" >&5) + (eval echo "\"\$as_me:13342: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:13396: \$? = $ac_status" >&5 + echo "$as_me:13346: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. @@ -13493,11 +13443,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:13496: $lt_compile\"" >&5) + (eval echo "\"\$as_me:13446: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 - echo "$as_me:13500: \$? = $ac_status" >&5 + echo "$as_me:13450: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized @@ -14938,31 +14888,10 @@ linux*) # before this can be enabled. hardcode_into_libs=yes - # find out which ABI we are using - libsuff= - case "$host_cpu" in - x86_64*|s390x*|powerpc64*) - echo '#line 14945 "configure"' > conftest.$ac_ext - if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; then - case `/usr/bin/file conftest.$ac_objext` in - *64-bit*) - libsuff=64 - sys_lib_search_path_spec="/lib${libsuff} /usr/lib${libsuff} /usr/local/lib${libsuff}" - ;; - esac - fi - rm -rf conftest* - ;; - esac - # Append ld.so.conf contents to the search path if test -f /etc/ld.so.conf; then - lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` - sys_lib_dlsearch_path_spec="/lib${libsuff} /usr/lib${libsuff} $lt_ld_extra" + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on @@ -15346,9 +15275,6 @@ CC=$lt_compiler_F77 # Is the compiler the GNU C compiler? with_gcc=$GCC_F77 -gcc_dir=\`gcc -print-file-name=. | $SED 's,/\.$,,'\` -gcc_ver=\`gcc -dumpversion\` - # An ERE matcher. EGREP=$lt_EGREP @@ -15482,11 +15408,11 @@ striplib=$lt_striplib # Dependencies to place before the objects being linked to create a # shared library. -predep_objects=\`echo $lt_predep_objects_F77 | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +predep_objects=$lt_predep_objects_F77 # Dependencies to place after the objects being linked to create a # shared library. -postdep_objects=\`echo $lt_postdep_objects_F77 | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +postdep_objects=$lt_postdep_objects_F77 # Dependencies to place before the objects being linked to create a # shared library. @@ -15498,7 +15424,7 @@ postdeps=$lt_postdeps_F77 # The library search path used internally by the compiler when linking # a shared library. -compiler_lib_search_path=\`echo $lt_compiler_lib_search_path_F77 | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +compiler_lib_search_path=$lt_compiler_lib_search_path_F77 # Method to check whether dependent libraries are shared objects. deplibs_check_method=$lt_deplibs_check_method @@ -15578,7 +15504,7 @@ variables_saved_for_relink="$variables_saved_for_relink" link_all_deplibs=$link_all_deplibs_F77 # Compile-time system search path for libraries -sys_lib_search_path_spec=\`echo $lt_sys_lib_search_path_spec | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec # Run-time system search path for libraries sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec @@ -15720,11 +15646,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:15723: $lt_compile\"" >&5) + (eval echo "\"\$as_me:15649: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:15727: \$? = $ac_status" >&5 + echo "$as_me:15653: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. @@ -15988,11 +15914,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:15991: $lt_compile\"" >&5) + (eval echo "\"\$as_me:15917: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:15995: \$? = $ac_status" >&5 + echo "$as_me:15921: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. @@ -16092,11 +16018,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:16095: $lt_compile\"" >&5) + (eval echo "\"\$as_me:16021: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 - echo "$as_me:16099: \$? = $ac_status" >&5 + echo "$as_me:16025: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized @@ -17557,31 +17483,10 @@ linux*) # before this can be enabled. hardcode_into_libs=yes - # find out which ABI we are using - libsuff= - case "$host_cpu" in - x86_64*|s390x*|powerpc64*) - echo '#line 17564 "configure"' > conftest.$ac_ext - if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; then - case `/usr/bin/file conftest.$ac_objext` in - *64-bit*) - libsuff=64 - sys_lib_search_path_spec="/lib${libsuff} /usr/lib${libsuff} /usr/local/lib${libsuff}" - ;; - esac - fi - rm -rf conftest* - ;; - esac - # Append ld.so.conf contents to the search path if test -f /etc/ld.so.conf; then - lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` - sys_lib_dlsearch_path_spec="/lib${libsuff} /usr/lib${libsuff} $lt_ld_extra" + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on @@ -17965,9 +17870,6 @@ CC=$lt_compiler_GCJ # Is the compiler the GNU C compiler? with_gcc=$GCC_GCJ -gcc_dir=\`gcc -print-file-name=. | $SED 's,/\.$,,'\` -gcc_ver=\`gcc -dumpversion\` - # An ERE matcher. EGREP=$lt_EGREP @@ -18101,11 +18003,11 @@ striplib=$lt_striplib # Dependencies to place before the objects being linked to create a # shared library. -predep_objects=\`echo $lt_predep_objects_GCJ | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +predep_objects=$lt_predep_objects_GCJ # Dependencies to place after the objects being linked to create a # shared library. -postdep_objects=\`echo $lt_postdep_objects_GCJ | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +postdep_objects=$lt_postdep_objects_GCJ # Dependencies to place before the objects being linked to create a # shared library. @@ -18117,7 +18019,7 @@ postdeps=$lt_postdeps_GCJ # The library search path used internally by the compiler when linking # a shared library. -compiler_lib_search_path=\`echo $lt_compiler_lib_search_path_GCJ | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +compiler_lib_search_path=$lt_compiler_lib_search_path_GCJ # Method to check whether dependent libraries are shared objects. deplibs_check_method=$lt_deplibs_check_method @@ -18197,7 +18099,7 @@ variables_saved_for_relink="$variables_saved_for_relink" link_all_deplibs=$link_all_deplibs_GCJ # Compile-time system search path for libraries -sys_lib_search_path_spec=\`echo $lt_sys_lib_search_path_spec | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec # Run-time system search path for libraries sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec @@ -18449,9 +18351,6 @@ CC=$lt_compiler_RC # Is the compiler the GNU C compiler? with_gcc=$GCC_RC -gcc_dir=\`gcc -print-file-name=. | $SED 's,/\.$,,'\` -gcc_ver=\`gcc -dumpversion\` - # An ERE matcher. EGREP=$lt_EGREP @@ -18585,11 +18484,11 @@ striplib=$lt_striplib # Dependencies to place before the objects being linked to create a # shared library. -predep_objects=\`echo $lt_predep_objects_RC | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +predep_objects=$lt_predep_objects_RC # Dependencies to place after the objects being linked to create a # shared library. -postdep_objects=\`echo $lt_postdep_objects_RC | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +postdep_objects=$lt_postdep_objects_RC # Dependencies to place before the objects being linked to create a # shared library. @@ -18601,7 +18500,7 @@ postdeps=$lt_postdeps_RC # The library search path used internally by the compiler when linking # a shared library. -compiler_lib_search_path=\`echo $lt_compiler_lib_search_path_RC | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +compiler_lib_search_path=$lt_compiler_lib_search_path_RC # Method to check whether dependent libraries are shared objects. deplibs_check_method=$lt_deplibs_check_method @@ -18681,7 +18580,7 @@ variables_saved_for_relink="$variables_saved_for_relink" link_all_deplibs=$link_all_deplibs_RC # Compile-time system search path for libraries -sys_lib_search_path_spec=\`echo $lt_sys_lib_search_path_spec | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec # Run-time system search path for libraries sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec @@ -23157,6 +23056,8 @@ if test "$with_fhs_opt" = "yes"; then serverdir= # relative to libdir serverplugindir=/plugins + # relative to datadir + infdir=/inf else if test "$with_fhs" = "yes"; then ac_default_prefix=/usr @@ -23173,6 +23074,8 @@ else serverdir=/$PACKAGE_NAME # relative to libdir serverplugindir=/$PACKAGE_NAME/plugins + # relative to datadir + infdir=/$PACKAGE_NAME/inf fi # Shared paths for all layouts @@ -23194,6 +23097,7 @@ perldir=/$PACKAGE_NAME/perl + # check for --with-instconfigdir echo "$as_me:$LINENO: checking for --with-instconfigdir" >&5 echo $ECHO_N "checking for --with-instconfigdir... $ECHO_C" >&6 @@ -25985,7 +25889,6 @@ s,@ac_ct_CC@,$ac_ct_CC,;t t s,@CCDEPMODE@,$CCDEPMODE,;t t s,@am__fastdepCC_TRUE@,$am__fastdepCC_TRUE,;t t s,@am__fastdepCC_FALSE@,$am__fastdepCC_FALSE,;t t -s,@SED@,$SED,;t t s,@EGREP@,$EGREP,;t t s,@LN_S@,$LN_S,;t t s,@ECHO@,$ECHO,;t t @@ -26019,6 +25922,7 @@ s,@serverdir@,$serverdir,;t t s,@serverplugindir@,$serverplugindir,;t t s,@scripttemplatedir@,$scripttemplatedir,;t t s,@perldir@,$perldir,;t t +s,@infdir@,$infdir,;t t s,@instconfigdir@,$instconfigdir,;t t s,@WINNT_TRUE@,$WINNT_TRUE,;t t s,@WINNT_FALSE@,$WINNT_FALSE,;t t diff --git a/configure.ac b/configure.ac index 5d6ab021a..c53241dee 100644 --- a/configure.ac +++ b/configure.ac @@ -163,6 +163,8 @@ if test "$with_fhs_opt" = "yes"; then serverdir= # relative to libdir serverplugindir=/plugins + # relative to datadir + infdir=/inf else if test "$with_fhs" = "yes"; then ac_default_prefix=/usr @@ -181,6 +183,8 @@ else serverdir=/$PACKAGE_NAME # relative to libdir serverplugindir=/$PACKAGE_NAME/plugins + # relative to datadir + infdir=/$PACKAGE_NAME/inf fi # Shared paths for all layouts @@ -201,6 +205,7 @@ AC_SUBST(serverdir) AC_SUBST(serverplugindir) AC_SUBST(scripttemplatedir) AC_SUBST(perldir) +AC_SUBST(infdir) # check for --with-instconfigdir AC_MSG_CHECKING(for --with-instconfigdir) diff --git a/ldap/cm/newinst/slapd.inf b/ldap/cm/newinst/slapd.inf.in similarity index 78% rename from ldap/cm/newinst/slapd.inf rename to ldap/cm/newinst/slapd.inf.in index 435faf19e..0393da74a 100644 --- a/ldap/cm/newinst/slapd.inf +++ b/ldap/cm/newinst/slapd.inf.in @@ -33,30 +33,27 @@ # # # Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -# Copyright (C) 2005 Red Hat, Inc. +# Copyright (C) 2007 Red Hat, Inc. # All rights reserved. # END COPYRIGHT BLOCK # [General] -Name=Fedora Directory Suite -Components=slapd, slapd-client +Name=@capbrand@ Directory Suite +Components=slapd [slapd] -Name= %%%SERVER_NAME%%% -InstanceNamePrefix= %%%INSTANCE_NAME_PREFIX%%% +Name= @capbrand@ Directory Server +InstanceNamePrefix= Directory Server NickName= slapd -Version= %%%SERVER_VERSION%%% +Version= @PACKAGE_VERSION@ Compatible= 1.0 -BuildNumber= %%%SERVER_BUILD_NUM%%% -Expires= %%%PUMPKIN_HOUR%%% -Security= %%%SECURITY%%% -Vendor= Fedora Project -Description= %%%SERVER_NAME%%% -Dependencies= base/1.0, svrcore/1.0 +BuildNumber= @BUILDNUM@ +Expires= 0 +Security= domestic +Vendor= @vendor@ +Description= @capbrand@ Directory Server ProductName=Directory Server -IsDirLite=%%%IS_DIR_LITE%%% -SourcePath=slapd -Archive= nsslapd.zip +IsDirLite=false PrePreInstall= dsktune PreInstall= ns-config PostInstall= bin/slapd/admin/bin/ns-update @@ -65,15 +62,3 @@ PostUninstall= Checked=True Mandatory=False IsLdap=True - -[slapd-client] -Name= Fedora Directory Server Console -NickName= slapd-client -Version= %%%SERVER_VERSION%%% -Compatible= 1.0 -Checked=True -Mandatory=False -IsLdap=False -SourcePath=slapd -IsMCC=True -Archive=slapd-client.zip
0
957bea3e270e3af820b1fe500017acde90e4356d
389ds/389-ds-base
Ticket 618 - Crash at shutdown while stopping replica agreements Bug Description: During shutdown, the replica agreement thread can be long to detect the shutdown. If replica agreements fails to detect shutdown before timeout (default is 120s), the shutdown continue and deletes replica agreement structure. Fix Description: The fix consist to make the main thread wait for the RA thread. https://fedorahosted.org/389/ticket/618 Reviewed by: Rich Megginson (thanks for the review Rich) Platforms tested: fedora 18 Flag Day: no Doc impact: no
commit 957bea3e270e3af820b1fe500017acde90e4356d Author: Thierry bordaz (tbordaz) <[email protected]> Date: Thu Mar 21 11:11:34 2013 +0100 Ticket 618 - Crash at shutdown while stopping replica agreements Bug Description: During shutdown, the replica agreement thread can be long to detect the shutdown. If replica agreements fails to detect shutdown before timeout (default is 120s), the shutdown continue and deletes replica agreement structure. Fix Description: The fix consist to make the main thread wait for the RA thread. https://fedorahosted.org/389/ticket/618 Reviewed by: Rich Megginson (thanks for the review Rich) Platforms tested: fedora 18 Flag Day: no Doc impact: no diff --git a/ldap/servers/plugins/replication/repl5_protocol.c b/ldap/servers/plugins/replication/repl5_protocol.c index 250ce48aa..d9744fce4 100644 --- a/ldap/servers/plugins/replication/repl5_protocol.c +++ b/ldap/servers/plugins/replication/repl5_protocol.c @@ -72,6 +72,7 @@ typedef struct repl_protocol int state; int next_state; PRUint64 protocol_timeout; + PRThread *agmt_thread; PRLock *lock; } repl_protocol; @@ -366,12 +367,13 @@ prot_start(Repl_Protocol *rp) PR_ASSERT(NULL != rp); if (NULL != rp) { - if (PR_CreateThread(PR_USER_THREAD, prot_thread_main, (void *)rp, + rp->agmt_thread = PR_CreateThread(PR_USER_THREAD, prot_thread_main, (void *)rp, #if defined(__hpux) && defined(__ia64) - PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_UNJOINABLE_THREAD, 524288L ) == NULL) + PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_UNJOINABLE_THREAD, 524288L ); #else - PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_UNJOINABLE_THREAD, SLAPD_DEFAULT_THREAD_STACKSIZE) == NULL) + PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_UNJOINABLE_THREAD, SLAPD_DEFAULT_THREAD_STACKSIZE); #endif + if (rp->agmt_thread == NULL) { PRErrorCode prerr = PR_GetError(); @@ -426,6 +428,11 @@ prot_stop(Repl_Protocol *rp) slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Error: prot_stop() " " called on NULL protocol instance.\n"); } + + if (rp->agmt_thread != NULL) { + (void) PR_JoinThread(rp->agmt_thread); + rp->agmt_thread = NULL; + } } /*
0
df3a0a2675fba52ec8bb5b9548b326f714814b56
389ds/389-ds-base
Ticket 403 - CLEANALLRUV amendment Bug Description: If the replica the task is run on doesn't have any agmts, it doesn't correctly finish the task. Also, on a second pass, if the rid was already cleaned, you could get stuck in a loop(waiting for all the deleted replica's changes to be applied). Fix Description: Check how many agmts we process, if its zero, exit correctly. To fix the loop use ruv_covers_csn_strict - this correctly handles rid's that have already been cleaned. https://fedorahosted.org/389/ticket/403 Reviewed by: Noriko!
commit df3a0a2675fba52ec8bb5b9548b326f714814b56 Author: Mark Reynolds <[email protected]> Date: Tue Aug 14 19:03:31 2012 -0400 Ticket 403 - CLEANALLRUV amendment Bug Description: If the replica the task is run on doesn't have any agmts, it doesn't correctly finish the task. Also, on a second pass, if the rid was already cleaned, you could get stuck in a loop(waiting for all the deleted replica's changes to be applied). Fix Description: Check how many agmts we process, if its zero, exit correctly. To fix the loop use ruv_covers_csn_strict - this correctly handles rid's that have already been cleaned. https://fedorahosted.org/389/ticket/403 Reviewed by: Noriko! diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c index a03740b1e..65b060b56 100644 --- a/ldap/servers/plugins/replication/repl5_replica_config.c +++ b/ldap/servers/plugins/replication/repl5_replica_config.c @@ -1448,8 +1448,9 @@ replica_cleanallruv_thread(void *arg) char csnstr[CSN_STRSIZE]; char *returntext = NULL; char *rid_text = NULL; - int found_dirty_rid = 1; int agmt_not_notified = 1; + int found_dirty_rid = 1; + int agmt_count = 0; int interval = 10; int free_obj = 0; int rc = 0; @@ -1491,7 +1492,7 @@ replica_cleanallruv_thread(void *arg) ruv_obj = replica_get_ruv(data->replica); ruv = object_get_data (ruv_obj); while(data->maxcsn && !is_task_aborted(data->rid) && !is_cleaned_rid(data->rid) && !slapi_is_shutting_down()){ - if(csn_get_replicaid(data->maxcsn) == 0 || ruv_covers_csn(ruv,data->maxcsn)){ + if(csn_get_replicaid(data->maxcsn) == 0 || ruv_covers_csn_strict(ruv,data->maxcsn)){ /* We are caught up, now we can clean the ruv's */ break; } @@ -1587,6 +1588,7 @@ replica_cleanallruv_thread(void *arg) agmt_obj = agmtlist_get_next_agreement_for_replica (data->replica, agmt_obj); continue; } + agmt_count++; if(replica_cleanallruv_check_ruv(agmt, rid_text, data->task) == 0){ found_dirty_rid = 0; } else { @@ -1622,7 +1624,7 @@ done: /* * If the replicas are cleaned, release the rid, and trim the changelog */ - if(!found_dirty_rid){ + if(!found_dirty_rid || agmt_count == 0){ trigger_cl_trimming(data->rid); delete_cleaned_rid(data->replica, data->rid, data->maxcsn); cleanruv_log(data->task, CLEANALLRUV_ID, "Successfully cleaned rid(%d).", data->rid); diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c index db94ba7e9..ecf1f9304 100644 --- a/ldap/servers/plugins/replication/repl_extop.c +++ b/ldap/servers/plugins/replication/repl_extop.c @@ -1618,7 +1618,7 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb) break; } slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "cleanAllRUV_task: checking if we're caught up...\n"); - if(ruv_covers_csn(ruv,maxcsn) || csn_get_replicaid(maxcsn) == 0){ + if(ruv_covers_csn_strict(ruv,maxcsn) || csn_get_replicaid(maxcsn) == 0){ /* We are caught up */ break; } else {
0
1e2745f8754a3a4eceded6ef173cf0721a6e1bef
389ds/389-ds-base
Ticket 511 - allow turning off vattr lookup in search entry return Bug Description: Functions vattr_map_namespace_sp_getlist and slapi_vattr_namespace_values_get_sp, take a long time, even if there are no vattrs being used. Fix Description: Added a new atomic config setting to ignore vattrs. Then we bypass the expensive vattr code when disabled. New config setting: nsslapd-ignore-virtual-attrs: on|off default is "off" https://fedorahosted.org/389/ticket/511 Reviewed by: ?
commit 1e2745f8754a3a4eceded6ef173cf0721a6e1bef Author: Mark Reynolds <[email protected]> Date: Wed Dec 12 14:59:17 2012 -0500 Ticket 511 - allow turning off vattr lookup in search entry return Bug Description: Functions vattr_map_namespace_sp_getlist and slapi_vattr_namespace_values_get_sp, take a long time, even if there are no vattrs being used. Fix Description: Added a new atomic config setting to ignore vattrs. Then we bypass the expensive vattr code when disabled. New config setting: nsslapd-ignore-virtual-attrs: on|off default is "off" https://fedorahosted.org/389/ticket/511 Reviewed by: ? diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c index ab366fc3a..3fcd8c9f4 100644 --- a/ldap/servers/slapd/libglobs.c +++ b/ldap/servers/slapd/libglobs.c @@ -1010,6 +1010,10 @@ static struct config_get_and_set { NULL, 0, (void**)&global_slapdFrontendConfig.allowed_sasl_mechs, CONFIG_STRING, (ConfigGetFunc)config_get_allowed_sasl_mechs, DEFAULT_ALLOWED_TO_DELETE_ATTRS}, + {CONFIG_IGNORE_VATTRS, config_set_ignore_vattrs, + NULL, 0, + (void**)&global_slapdFrontendConfig.ignore_vattrs, + CONFIG_STRING, (ConfigGetFunc)config_get_ignore_vattrs, DEFAULT_ALLOWED_TO_DELETE_ATTRS}, #ifdef MEMPOOL_EXPERIMENTAL ,{CONFIG_MEMPOOL_SWITCH_ATTRIBUTE, config_set_mempool_switch, NULL, 0, @@ -1436,6 +1440,7 @@ FrontendConfig_init () { init_disk_logging_critical = cfg->disk_logging_critical = LDAP_OFF; init_ndn_cache_enabled = cfg->ndn_cache_enabled = LDAP_OFF; cfg->ndn_cache_max_size = NDN_DEFAULT_SIZE; + cfg->ignore_vattrs = slapi_counter_new(); #ifdef MEMPOOL_EXPERIMENTAL init_mempool_switch = cfg->mempool_switch = LDAP_ON; @@ -1561,6 +1566,20 @@ config_value_is_null( const char *attrname, const char *value, char *errorbuf, return 0; } +int +config_set_ignore_vattrs (const char *attrname, char *value, char *errorbuf, int apply ) +{ + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + int retVal = LDAP_SUCCESS; + int val; + + retVal = config_set_onoff ( attrname, value, &val, errorbuf, apply); + if(retVal == LDAP_SUCCESS){ + slapi_counter_set_value(slapdFrontendConfig->ignore_vattrs, val); + } + return retVal; +} + int config_set_disk_monitoring( const char *attrname, char *value, char *errorbuf, int apply ) { @@ -4127,6 +4146,14 @@ config_get_port(){ } +int +config_get_ignore_vattrs() +{ + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + + return slapi_counter_get_value(slapdFrontendConfig->ignore_vattrs); +} + int config_get_disk_monitoring(){ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h index b5940259a..3c83e3064 100644 --- a/ldap/servers/slapd/proto-slap.h +++ b/ldap/servers/slapd/proto-slap.h @@ -552,6 +552,8 @@ int config_get_ndn_cache_enabled(); char *config_get_allowed_sasl_mechs(); int config_set_allowed_sasl_mechs(const char *attrname, char *value, char *errorbuf, int apply); int config_get_schemamod(); +int config_set_ignore_vattrs(const char *attrname, char *value, char *errorbuf, int apply); +int config_get_ignore_vattrs(); PLHashNumber hashNocaseString(const void *key); PRIntn hashNocaseCompare(const void *v1, const void *v2); diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h index 8797c0480..edb75b7c4 100644 --- a/ldap/servers/slapd/slap.h +++ b/ldap/servers/slapd/slap.h @@ -2053,6 +2053,7 @@ typedef struct _slapdEntryPoints { #define CONFIG_NDN_CACHE "nsslapd-ndn-cache-enabled" #define CONFIG_NDN_CACHE_SIZE "nsslapd-ndn-cache-max-size" #define CONFIG_ALLOWED_SASL_MECHS "nsslapd-allowed-sasl-mechanisms" +#define CONFIG_IGNORE_VATTRS "nsslapd-ignore-virtual-attrs" #ifdef MEMPOOL_EXPERIMENTAL #define CONFIG_MEMPOOL_SWITCH_ATTRIBUTE "nsslapd-mempool" @@ -2275,6 +2276,9 @@ typedef struct _slapdFrontendConfig { /* normalized dn cache */ int ndn_cache_enabled; size_t ndn_cache_max_size; + + /* atomic settings */ + Slapi_Counter *ignore_vattrs; } slapdFrontendConfig_t; /* possible values for slapdFrontendConfig_t.schemareplace */ diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c index e8b23422f..56c348d83 100644 --- a/ldap/servers/slapd/vattr.c +++ b/ldap/servers/slapd/vattr.c @@ -850,21 +850,26 @@ int slapi_vattr_namespace_values_get_sp(vattr_context *c, vattr_sp_handle_list *list = NULL; vattr_get_thang *my_get = NULL; int attr_count = 0; + int ignore_vattrs; - rc = vattr_context_grok(&c); - if (0 != rc) { - /* Print a handy error log message */ - if(!vattr_context_is_loop_msg_displayed(&c)) - { - LDAPDebug(LDAP_DEBUG_ANY,"Detected virtual attribute loop in get on entry %s, attribute %s\n", slapi_entry_get_dn_const(e), type, 0); - vattr_context_set_loop_msg_displayed(&c); + ignore_vattrs = config_get_ignore_vattrs(); + + if(!ignore_vattrs){ + rc = vattr_context_grok(&c); + if (0 != rc) { + /* Print a handy error log message */ + if(!vattr_context_is_loop_msg_displayed(&c)) + { + LDAPDebug(LDAP_DEBUG_ANY,"Detected virtual attribute loop in get on entry %s, attribute %s\n", slapi_entry_get_dn_const(e), type, 0); + vattr_context_set_loop_msg_displayed(&c); + } + return rc; } - return rc; } /* Having done that, we now consult the attribute map to find service providers who are interested */ /* Look for attribute in the map */ - if(!(flags & SLAPI_REALATTRS_ONLY)) + if(!(flags & SLAPI_REALATTRS_ONLY) && !ignore_vattrs) { /* we use the vattr namespace aware version of this */ list = vattr_map_namespace_sp_getlist(namespace_dn, type); @@ -2185,6 +2190,11 @@ vattr_sp_handle_list *vattr_map_namespace_sp_getlist(Slapi_DN *dn, const char *t vattr_map_entry *result = NULL; vattr_sp_handle_list* return_list = 0; + if(config_get_ignore_vattrs()){ + /* we don't care about vattrs */ + return NULL; + } + ret = vattr_map_lookup(type_to_find,&result); if (0 == ret) { return_list = (vattr_sp_handle_list*) result->sp_list;
0
a4c4daaa54d1ba94e342fcbf9af13a547886f0e3
389ds/389-ds-base
Ticket 620 - Better logging of error messages for 389-ds-base Bug Description: In a replication environment, if you have one master set with passwordIsGlobalpolicy to on, and another master does not, then the password policy updates are rejected. However, there is no clear logging to tell you why the operatoin was rejected. Fix Description: Write a clear message in the error log stating what was the cause of the failure. https://fedorahosted.org/389/ticket/620 Reviewed by: nhosoi & nkinder (Thanks!!)
commit a4c4daaa54d1ba94e342fcbf9af13a547886f0e3 Author: Mark Reynolds <[email protected]> Date: Wed Mar 27 17:23:10 2013 -0400 Ticket 620 - Better logging of error messages for 389-ds-base Bug Description: In a replication environment, if you have one master set with passwordIsGlobalpolicy to on, and another master does not, then the password policy updates are rejected. However, there is no clear logging to tell you why the operatoin was rejected. Fix Description: Write a clear message in the error log stating what was the cause of the failure. https://fedorahosted.org/389/ticket/620 Reviewed by: nhosoi & nkinder (Thanks!!) diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c index d42ef2c1f..cc1621b10 100644 --- a/ldap/servers/slapd/modify.c +++ b/ldap/servers/slapd/modify.c @@ -128,22 +128,22 @@ static struct attr_value_check { void do_modify( Slapi_PBlock *pb ) { - Slapi_Operation *operation; - BerElement *ber; - char *last, *type = NULL; - ber_tag_t tag; - ber_len_t len; - LDAPMod *mod; - LDAPMod **mods; - Slapi_Mods smods; - int err; - int pw_change = 0; /* 0= no password change */ - int ignored_some_mods = 0; - int has_password_mod = 0; /* number of password mods */ - char *old_pw = NULL; /* remember the old password */ - char *rawdn = NULL; - int minssf_exclude_rootdse = 0; - LDAPMod **normalized_mods = NULL; + Slapi_Operation *operation; + Slapi_Mods smods; + BerElement *ber; + ber_tag_t tag; + ber_len_t len; + LDAPMod **normalized_mods = NULL; + LDAPMod *mod; + LDAPMod **mods; + char *last, *type = NULL; + char *old_pw = NULL; /* remember the old password */ + char *rawdn = NULL; + int minssf_exclude_rootdse = 0; + int ignored_some_mods = 0; + int has_password_mod = 0; /* number of password mods */ + int pw_change = 0; /* 0 = no password change */ + int err; LDAPDebug( LDAP_DEBUG_TRACE, "do_modify\n", 0, 0, 0 ); @@ -291,8 +291,10 @@ do_modify( Slapi_PBlock *pb ) /* check if user is allowed to modify the specified attribute */ if (!op_shared_is_allowed_attr (mod->mod_type, pb->pb_conn->c_isreplication_session)) { - /* for now we just ignore attributes that client is not allowed - to modify so not to break existing clients */ + /* + * For now we just ignore attributes that client is not allowed + * to modify so not to break existing clients + */ ++ignored_some_mods; ber_bvecfree(mod->mod_bvalues); slapi_ch_free((void **)&(mod->mod_type)); @@ -310,6 +312,14 @@ do_modify( Slapi_PBlock *pb ) } if (ignored_some_mods && (0 == smods.num_elements)) { + if(pb->pb_conn->c_isreplication_session){ + int connid, opid; + slapi_pblock_get(pb, SLAPI_CONN_ID, &connid); + slapi_pblock_get(pb, SLAPI_OPERATION_ID, &opid); + LDAPDebug( LDAP_DEBUG_ANY,"Rejecting replicated password policy operation(conn=%d op=%d) for " + "entry %s. To allow these changes to be accepted, set passwordIsGlobalPolicy to 'on' in " + "cn=config.\n", connid, opid, rawdn); + } send_ldap_result( pb, LDAP_UNWILLING_TO_PERFORM, NULL, NULL, 0, NULL ); goto free_and_return; }
0
465fa850af4658d18235f26f6e05c5a386e70e3d
389ds/389-ds-base
Ticket 123 - Enhancement request:"whoami" extended operation Description: Fixed compiler warning
commit 465fa850af4658d18235f26f6e05c5a386e70e3d Author: Anupam Jain <[email protected]> Date: Mon Jun 24 16:36:24 2013 -0700 Ticket 123 - Enhancement request:"whoami" extended operation Description: Fixed compiler warning diff --git a/ldap/servers/plugins/whoami/whoami.c b/ldap/servers/plugins/whoami/whoami.c index f6f46bf2d..42df8c197 100644 --- a/ldap/servers/plugins/whoami/whoami.c +++ b/ldap/servers/plugins/whoami/whoami.c @@ -65,7 +65,6 @@ int whoami_exop( Slapi_PBlock *pb ) struct berval *bval; struct berval retbval; - Slapi_DN *dn = NULL; char *client_dn = NULL; char *fdn = NULL; char *oid = NULL;
0
67730e723afaf37c3153a1e93e7224e306f155d1
389ds/389-ds-base
Fixes for memory leaks in ntds
commit 67730e723afaf37c3153a1e93e7224e306f155d1 Author: David Boreham <[email protected]> Date: Wed May 11 21:00:08 2005 +0000 Fixes for memory leaks in ntds diff --git a/ldap/servers/ntds/netman/netman.cpp b/ldap/servers/ntds/netman/netman.cpp index 5d9447b9e..ac94415a4 100644 --- a/ldap/servers/ntds/netman/netman.cpp +++ b/ldap/servers/ntds/netman/netman.cpp @@ -41,203 +41,211 @@ #include "netman.h" // **************************************************************** -// UTF16ToUTF8 +// quickFree // **************************************************************** -char* UTF16ToUTF8(unsigned short* inString) +void quickFree(char** buf) { - int length = WideCharToMultiByte(CP_ACP, 0, inString, -1, 0, 0, 0, 0); - char* outString = NULL; - - outString = (char*)malloc(length); - - WideCharToMultiByte(CP_ACP, 0, inString, -1, outString, length, 0, 0); - - return outString; + if(*buf != NULL) + { + free(*buf); + *buf = NULL; + } } // **************************************************************** -// UTF8ToUTF16 +// UTF16ToUTF8 // **************************************************************** -unsigned short* UTF8ToUTF16(char* inString) +int UTF16ToUTF8(unsigned short* inStr, char* outStr, unsigned long* outStrBufLen) { - unsigned short* outString = NULL; - int length = MultiByteToWideChar(CP_ACP, 0, inString, -1, 0, 0); + int result = 0; + unsigned long length = WideCharToMultiByte(CP_ACP, 0, inStr, -1, 0, 0, 0, 0); - outString = (unsigned short*)malloc(length * 2); + if(outStr == NULL) + { + result = 0; + goto exit; + } + if(*outStrBufLen < length) + { + result = -1; + goto exit; + } - MultiByteToWideChar(CP_ACP, 0, inString, -1, outString, length); + WideCharToMultiByte(CP_ACP, 0, inStr, -1, outStr, length, 0, 0); + +exit: + *outStrBufLen = length; - return outString; + return result; } // **************************************************************** -// BinToHexStr +// UTF8ToUTF16 // **************************************************************** -int BinToHexStr(char* bin, unsigned long binLen, char** hexStr) +int UTF8ToUTF16(char* inStr, unsigned short* outStr, unsigned long* outStrBufLen) { - int hexStrLen = binLen * 2 + 1; - - *hexStr = (char*)calloc(hexStrLen, sizeof(char)); + int result = 0; + unsigned long length = MultiByteToWideChar(CP_ACP, 0, inStr, -1, 0, 0) * 2; - for(unsigned long i = 0; i < binLen; i++) + if(outStr == NULL) { - sprintf(&(*hexStr)[i * 2], "%02X", (unsigned char)bin[i]); + result = 0; + goto exit; } - - return 0; -} - -// **************************************************************** -// HexStrToBin -// **************************************************************** -int HexStrToBin(char* hexStr, char** bin, unsigned long* binLen) -{ - int temp; - *binLen = strlen(hexStr) / 2; - - *bin = (char*)malloc(*binLen); - - for(unsigned long i = 0; i < *binLen; i++) + if(*outStrBufLen < length) { - sscanf(&hexStr[i * 2], "%02X", &temp); - (*bin)[i] = (unsigned char)temp; + result = -1; + goto exit; } - return 0; + MultiByteToWideChar(CP_ACP, 0, inStr, -1, outStr, length); + +exit: + *outStrBufLen = length; + + return result; } // **************************************************************** -// GetSIDByAccountName +// BinToHexStr // **************************************************************** -int GetSIDByAccountName(char* accountName, char** sid) +int BinToHexStr(char* bin, unsigned long binLen, char* hexStr, unsigned long* hexStrBufLen) { int result = 0; - unsigned long sidLen = 0; - char* domain; - unsigned long domainLen = 0; - SID_NAME_USE testType; + unsigned long length = binLen * 2 + 1; + unsigned long i; - if(LookupAccountName(NULL, accountName, NULL, &sidLen, NULL, &domainLen, &testType) == 0) + if(hexStr == NULL) { - result = GetLastError(); + result = 0; + goto exit; } - - *sid = (char*)malloc(sidLen); - domain = (char*)malloc(domainLen); - - if(LookupAccountName(NULL, accountName, *sid, &sidLen, domain, &domainLen, &testType) == 0) + if(*hexStrBufLen < length) { - result = GetLastError(); + result = -1; + goto exit; } - else + + for(i = 0; i < binLen; i++) { - result = 0; + sprintf(&hexStr[i * 2], "%02X", (unsigned char)bin[i]); } +exit: + *hexStrBufLen = length; + return result; } // **************************************************************** -// GetAccountNameBySID +// HexStrToBin // **************************************************************** -int GetAccountNameBySID(char* sid, char** accountName) +int HexStrToBin(char* hexStr, char* bin, unsigned long* binBufLen) { int result = 0; - unsigned long sidLen = 0; - char* domain; - unsigned long domainLen = 0; - SID_NAME_USE testType; - - unsigned long accountNameLen = 0; + unsigned long length = strlen(hexStr) / 2; + unsigned long i; + int temp; - if(LookupAccountSid(NULL, sid, NULL, &accountNameLen, NULL, &domainLen, &testType) == 0) + if(bin == NULL) { - result = GetLastError(); + result = 0; + goto exit; } - - domain = (char*)malloc(domainLen); - *accountName = (char*)calloc(accountNameLen, sizeof(char)); - - if(LookupAccountSid(NULL, sid, *accountName, &accountNameLen, domain, &domainLen, &testType) == 0) + if(*binBufLen < length) { - result = GetLastError(); + result = -1; + goto exit; } - else + + for(i = 0; i < length; i++) { - result = 0; + sscanf(&hexStr[i * 2], "%02X", &temp); + bin[i] = (unsigned char)temp; } +exit: + *binBufLen = length; + return result; } // **************************************************************** -// GetSIDHexStrByAccountName +// GetSIDByAccountName // **************************************************************** -int GetSIDHexStrByAccountName(char* accountName, char** sidHexStr) +int GetSIDByAccountName(char* accountName, char* sid, unsigned long* sidBufLen) { int result = 0; - char* sid; unsigned long sidLen = 0; - char* domain; unsigned long domainLen = 0; + char* domain = NULL; SID_NAME_USE testType; - if(LookupAccountName(NULL, accountName, NULL, &sidLen, NULL, &domainLen, &testType) == 0) + LookupAccountName(NULL, accountName, NULL, &sidLen, NULL, &domainLen, &testType); + + if(sid == NULL) { - result = GetLastError(); + result = 0; + goto exit; + } + if(*sidBufLen < sidLen) + { + result = -1; + goto exit; } - - sid = (char*)malloc(sidLen); domain = (char*)malloc(domainLen); - if(LookupAccountName(NULL, accountName, sid, &sidLen, domain, &domainLen, &testType) == 0) + if(LookupAccountName(NULL, accountName, (void*)sid, &sidLen, domain, &domainLen, &testType) == 0) { result = GetLastError(); - } - else - { - result = 0; + goto exit; } +exit: + *sidBufLen = sidLen; - BinToHexStr(sid, sidLen, sidHexStr); + quickFree(&domain); return result; } // **************************************************************** -// GetAccountNameBySIDHexStr +// GetAccountNameBySID // **************************************************************** -int GetAccountNameBySIDHexStr(char* sidHexStr, char** accountName) +int GetAccountNameBySID(char* sid, char* accountName, unsigned long* accountNameBufLen) { int result = 0; - char* sid; - unsigned long sidLen = 0; - char* domain; + unsigned long accountNameLen = 0; unsigned long domainLen = 0; + char* domain = NULL; SID_NAME_USE testType; - unsigned long accountNameLen = 0; + LookupAccountSid(NULL, sid, NULL, &accountNameLen, NULL, &domainLen, &testType); - HexStrToBin(sidHexStr, &sid, &sidLen); - if(LookupAccountSid(NULL, sid, NULL, &accountNameLen, NULL, &domainLen, &testType) == 0) + if(accountName == NULL) { - result = GetLastError(); + result = 0; + goto exit; + } + if(*accountNameBufLen < accountNameLen) + { + result = -1; + goto exit; } - domain = (char*)malloc(domainLen); - *accountName = (char*)calloc(accountNameLen, sizeof(char)); - if(LookupAccountSid(NULL, sid, *accountName, &accountNameLen, domain, &domainLen, &testType) == 0) + if(LookupAccountSid(NULL, sid, accountName, &accountNameLen, domain, &domainLen, &testType) == 0) { result = GetLastError(); - } - else - { - result = 0; + goto exit; } +exit: + *accountNameBufLen = accountNameLen; + + quickFree(&domain); + return result; } @@ -246,7 +254,7 @@ int GetAccountNameBySIDHexStr(char* sidHexStr, char** accountName) // **************************************************************** NTUser::NTUser() { - userInfo = NULL; + currentAccountName = NULL; groupsInfo = NULL; currentGroupEntry = 0; @@ -257,6 +265,8 @@ NTUser::NTUser() currentLocalGroupEntry = 0; localGroupEntriesRead = 0; localGroupEntriesTotal = 0; + + resultBuf = NULL; } // **************************************************************** @@ -264,11 +274,7 @@ NTUser::NTUser() // **************************************************************** NTUser::~NTUser() { - if(userInfo != NULL) - { - NetApiBufferFree(userInfo); - userInfo = NULL; - } + quickFree((char**)&currentAccountName); if(groupsInfo != NULL) { NetApiBufferFree(groupsInfo); @@ -279,35 +285,42 @@ NTUser::~NTUser() NetApiBufferFree(localGroupsInfo); localGroupsInfo = NULL; } + quickFree((char**)&resultBuf); } // **************************************************************** // NTUser::NewUser // **************************************************************** -void NTUser::NewUser(char* username) +int NTUser::NewUser(char* username) { - if(userInfo != NULL) - { - NetApiBufferFree(userInfo); - userInfo = NULL; - } + int result = 0; + unsigned long length; + PUSER_INFO_3 info = NULL; + DWORD badParam = 0; - NetApiBufferAllocate(sizeof(USER_INFO_3),(LPVOID*)&userInfo); - memset(userInfo, 0, sizeof(USER_INFO_3)); - userInfo->usri3_name = UTF8ToUTF16(username); + quickFree((char**)&currentAccountName); + UTF8ToUTF16(username, NULL, &length); + currentAccountName = (unsigned short*)malloc(length); + UTF8ToUTF16(username, currentAccountName, &length); - // Possible required inits for AddUser - //userInfo->usri3_priv = USER_PRIV_USER; - //userInfo->usri3_home_dir = NULL; - //userInfo->usri3_comment = NULL; - //userInfo->usri3_script_path = NULL; + info = (USER_INFO_3*)malloc(sizeof(USER_INFO_3)); + memset(info, 0, sizeof(USER_INFO_3)); + info->usri3_name = currentAccountName; // NT4 required inits for AddUser - userInfo->usri3_flags = UF_SCRIPT; - userInfo->usri3_primary_group_id = DOMAIN_GROUP_RID_USERS; + info->usri3_flags = UF_SCRIPT; + info->usri3_primary_group_id = DOMAIN_GROUP_RID_USERS; // Other inits - userInfo->usri3_acct_expires = (unsigned long)-1; + info->usri3_acct_expires = (unsigned long)-1; + + // Add user + result = NetUserAdd(NULL, USER_INFO_LEVEL, (unsigned char*)info, &badParam); + + // Free buffers + quickFree((char**)&info); + + return result; } // **************************************************************** @@ -316,14 +329,15 @@ void NTUser::NewUser(char* username) int NTUser::RetriveUserByAccountName(char* username) { int result; + unsigned long length = 0; + PUSER_INFO_3 info; - if(userInfo != NULL) - { - NetApiBufferFree(userInfo); - userInfo = NULL; - } + quickFree((char**)&currentAccountName); + UTF8ToUTF16(username, NULL, &length); + currentAccountName = (unsigned short*)malloc(length); + UTF8ToUTF16(username, currentAccountName, &length); - result = NetUserGetInfo(NULL, UTF8ToUTF16(username), USER_INFO_LEVEL, (unsigned char**)&userInfo); + result = NetUserGetInfo(NULL, currentAccountName, USER_INFO_LEVEL, (unsigned char**)&info); return result; } @@ -334,19 +348,23 @@ int NTUser::RetriveUserByAccountName(char* username) int NTUser::RetriveUserBySIDHexStr(char* sidHexStr) { int result = 0; + unsigned long length = 0; char* username; + char* sid; - if(userInfo != NULL) - { - NetApiBufferFree(userInfo); - userInfo = NULL; - } + quickFree((char**)&currentAccountName); + + HexStrToBin(sidHexStr, NULL, &length); + sid = (char*)malloc(length); + HexStrToBin(sidHexStr, sid, &length); - if(GetAccountNameBySIDHexStr(sidHexStr, &username) != 0) + if(GetAccountNameBySID(sid, NULL, &length) != 0) { result = -1; goto exit; } + username = (char*)malloc(length); + GetAccountNameBySID(sid, username, &length); if(RetriveUserByAccountName(username) != 0) { @@ -355,37 +373,8 @@ int NTUser::RetriveUserBySIDHexStr(char* sidHexStr) } exit: - return result; -} - -// **************************************************************** -// NTUser::StoreUser -// **************************************************************** -int NTUser::StoreUser() -{ - int result = 0; - - if(userInfo != NULL) - { - result = NetUserSetInfo(NULL, userInfo->usri3_name, USER_INFO_LEVEL, (unsigned char*)userInfo, NULL); - } - else - { - result = -1; - } - - return result; -} - -// **************************************************************** -// NTUser::AddUser -// **************************************************************** -int NTUser::AddUser() -{ - int result; - DWORD badParam = 0; - - result = NetUserAdd(NULL, USER_INFO_LEVEL, (unsigned char*)userInfo, &badParam); + quickFree(&sid); + quickFree(&username); return result; } @@ -393,34 +382,21 @@ int NTUser::AddUser() // **************************************************************** // NTUser::DeleteUser // **************************************************************** -int NTUser::DeleteUser(char* username) -{ - int result; - - result = NetUserDel(NULL, UTF8ToUTF16(username)); - - return result; -} - -// **************************************************************** -// NTUser::ChangeUsername -// **************************************************************** -int NTUser::ChangeUsername(char* oldUsername, char* newUsername) +int NTUser::DeleteUser() { int result; - if((result = RetriveUserByAccountName(oldUsername)) == 0) + if(currentAccountName == NULL) { - userInfo->usri3_name = UTF8ToUTF16(newUsername); - if((result = AddUser()) == 0) - { - if((result = DeleteUser(oldUsername)) != 0) - { - DeleteUser(newUsername); - } - } + result = -1; + goto exit; } + result = NetUserDel(NULL, currentAccountName); + + quickFree((char**)&currentAccountName); + +exit: return result; } @@ -430,12 +406,21 @@ int NTUser::ChangeUsername(char* oldUsername, char* newUsername) char* NTUser::GetAccountName() { char* result = NULL; + unsigned long length = 0; - if(userInfo != NULL) + if(currentAccountName == NULL) { - result = UTF16ToUTF8(userInfo->usri3_name); + goto exit; + result = NULL; } + quickFree(&resultBuf); + UTF16ToUTF8(currentAccountName, NULL, &length); + resultBuf = (char*)malloc(length); + UTF16ToUTF8(currentAccountName, resultBuf, &length); + result = resultBuf; + +exit: return result; } @@ -445,12 +430,39 @@ char* NTUser::GetAccountName() char* NTUser::GetSIDHexStr() { char* result = NULL; + unsigned long length = 0; + unsigned long binLength = 0; + char* username = NULL; + char* sid = NULL; - if(userInfo != NULL) + if(currentAccountName == NULL) { - GetSIDHexStrByAccountName(UTF16ToUTF8(userInfo->usri3_name), &result); + result = NULL; + goto exit; } + UTF16ToUTF8(currentAccountName, NULL, &length); + username = (char*)malloc(length); + UTF16ToUTF8(currentAccountName, username, &length); + + if(GetSIDByAccountName(username, NULL, &binLength) != 0) + { + result = NULL; + goto exit; + } + sid = (char*)malloc(binLength); + GetSIDByAccountName(username, sid, &binLength); + + quickFree(&resultBuf); + BinToHexStr(sid, binLength, NULL, &length); + resultBuf = (char*)malloc(length); + BinToHexStr(sid, binLength, resultBuf, &length); + result = resultBuf; + +exit: + quickFree(&username); + quickFree(&sid); + return result; } @@ -460,12 +472,25 @@ char* NTUser::GetSIDHexStr() unsigned long NTUser::GetAccountExpires() { unsigned long result = 0; + PUSER_INFO_3 info; + + if(currentAccountName == NULL) + { + result = 0; + goto exit; + } - if(userInfo != NULL) + if((result = NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info)) != NERR_Success) { - result = userInfo->usri3_acct_expires; + result = 0; + goto exit; } + result = info->usri3_acct_expires; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -475,16 +500,18 @@ unsigned long NTUser::GetAccountExpires() int NTUser::SetAccountExpires(unsigned long accountExpires) { int result = 0; + USER_INFO_1017 info; - if(userInfo != NULL) - { - userInfo->usri3_acct_expires = accountExpires; - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } - + + info.usri1017_acct_expires = accountExpires; + result = NetUserSetInfo(NULL, currentAccountName, 1017, (unsigned char*)&info, NULL); + +exit: return result; } @@ -494,12 +521,25 @@ int NTUser::SetAccountExpires(unsigned long accountExpires) unsigned long NTUser::GetBadPasswordCount() { unsigned long result = 0; + PUSER_INFO_3 info; - if(userInfo != NULL) + if(currentAccountName == NULL) { - result = userInfo->usri3_bad_pw_count; + result = 0; + goto exit; } + if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + { + result = 0; + goto exit; + } + + result = info->usri3_bad_pw_count; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -509,12 +549,25 @@ unsigned long NTUser::GetBadPasswordCount() unsigned long NTUser::GetCodePage() { unsigned long result = 0; + PUSER_INFO_3 info; - if(userInfo != NULL) + if(currentAccountName == NULL) { - result = userInfo->usri3_code_page; + result = 0; + goto exit; + } + + if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + { + result = 0; + goto exit; } + result = info->usri3_code_page; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -524,16 +577,18 @@ unsigned long NTUser::GetCodePage() int NTUser::SetCodePage(unsigned long codePage) { int result = 0; + USER_INFO_1025 info; - if(userInfo != NULL) - { - userInfo->usri3_code_page = codePage; - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + info.usri1025_code_page = codePage; + result = NetUserSetInfo(NULL, currentAccountName, 1025, (unsigned char*)&info, NULL); + +exit: return result; } @@ -543,12 +598,30 @@ int NTUser::SetCodePage(unsigned long codePage) char* NTUser::GetComment() { char* result = NULL; + unsigned long length; + PUSER_INFO_3 info; + + if(currentAccountName == NULL) + { + result = NULL; + goto exit; + } - if(userInfo != NULL) + if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) { - result = UTF16ToUTF8(userInfo->usri3_comment); + result = NULL; + goto exit; } + quickFree(&resultBuf); + UTF16ToUTF8(info->usri3_comment, NULL, &length); + resultBuf = (char*)malloc(length); + UTF16ToUTF8(info->usri3_comment, resultBuf, &length); + result = resultBuf; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -558,16 +631,26 @@ char* NTUser::GetComment() int NTUser::SetComment(char* comment) { int result = 0; + unsigned long length; + unsigned short* wideStr = NULL; + USER_INFO_1007 info; - if(userInfo != NULL) - { - userInfo->usri3_comment = UTF8ToUTF16(comment); - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + UTF8ToUTF16(comment, NULL, &length); + wideStr = (unsigned short*)malloc(length); + UTF8ToUTF16(comment, wideStr, &length); + + info.usri1007_comment = wideStr; + result = NetUserSetInfo(NULL, currentAccountName, 1007, (unsigned char*)&info, NULL); + +exit: + quickFree((char**)&wideStr); + return result; } @@ -577,12 +660,25 @@ int NTUser::SetComment(char* comment) unsigned long NTUser::GetCountryCode() { unsigned long result = 0; + PUSER_INFO_3 info; + + if(currentAccountName == NULL) + { + result = 0; + goto exit; + } - if(userInfo != NULL) + if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) { - result = userInfo->usri3_country_code; + result = 0; + goto exit; } + result = info->usri3_country_code; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -592,16 +688,18 @@ unsigned long NTUser::GetCountryCode() int NTUser::SetCountryCode(unsigned long countryCode) { int result = 0; + USER_INFO_1024 info; - if(userInfo != NULL) - { - userInfo->usri3_country_code = countryCode; - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + info.usri1024_country_code = countryCode; + result = NetUserSetInfo(NULL, currentAccountName, 1024, (unsigned char*)&info, NULL); + +exit: return result; } @@ -611,12 +709,25 @@ int NTUser::SetCountryCode(unsigned long countryCode) unsigned long NTUser::GetFlags() { unsigned long result = 0; + PUSER_INFO_3 info; - if(userInfo != NULL) + if(currentAccountName == NULL) { - result = userInfo->usri3_flags; + result = 0; + goto exit; } + if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + { + result = 0; + goto exit; + } + + result = info->usri3_flags; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -626,16 +737,18 @@ unsigned long NTUser::GetFlags() int NTUser::SetFlags(unsigned long flags) { int result = 0; + USER_INFO_1008 info; - if(userInfo != NULL) - { - userInfo->usri3_flags = flags; - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } - + + info.usri1008_flags = flags; + result = NetUserSetInfo(NULL, currentAccountName, 1008, (unsigned char*)&info, NULL); + +exit: return result; } @@ -645,12 +758,30 @@ int NTUser::SetFlags(unsigned long flags) char* NTUser::GetHomeDir() { char* result = NULL; + unsigned long length; + PUSER_INFO_3 info; - if(userInfo != NULL) + if(currentAccountName == NULL) { - result = UTF16ToUTF8(userInfo->usri3_home_dir); + result = NULL; + goto exit; } + if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + { + result = NULL; + goto exit; + } + + quickFree(&resultBuf); + UTF16ToUTF8(info->usri3_home_dir, NULL, &length); + resultBuf = (char*)malloc(length); + UTF16ToUTF8(info->usri3_home_dir, resultBuf, &length); + result = resultBuf; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -660,16 +791,26 @@ char* NTUser::GetHomeDir() int NTUser::SetHomeDir(char* path) { int result = 0; + unsigned long length; + unsigned short* wideStr = NULL; + USER_INFO_1006 info; - if(userInfo != NULL) - { - userInfo->usri3_home_dir = UTF8ToUTF16(path); - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + UTF8ToUTF16(path, NULL, &length); + wideStr = (unsigned short*)malloc(length); + UTF8ToUTF16(path, wideStr, &length); + + info.usri1006_home_dir = wideStr; + result = NetUserSetInfo(NULL, currentAccountName, 1006, (unsigned char*)&info, NULL); + +exit: + quickFree((char**)&wideStr); + return result; } @@ -679,12 +820,30 @@ int NTUser::SetHomeDir(char* path) char* NTUser::GetHomeDirDrive() { char* result = NULL; + unsigned long length; + PUSER_INFO_3 info; - if(userInfo != NULL) + if(currentAccountName == NULL) { - result = UTF16ToUTF8(userInfo->usri3_home_dir_drive); + result = NULL; + goto exit; } + if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + { + result = NULL; + goto exit; + } + + quickFree(&resultBuf); + UTF16ToUTF8(info->usri3_home_dir_drive, NULL, &length); + resultBuf = (char*)malloc(length); + UTF16ToUTF8(info->usri3_home_dir_drive, resultBuf, &length); + result = resultBuf; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -694,16 +853,26 @@ char* NTUser::GetHomeDirDrive() int NTUser::SetHomeDirDrive(char* path) { int result = 0; + unsigned long length; + unsigned short* wideStr = NULL; + USER_INFO_1053 info; - if(userInfo != NULL) - { - userInfo->usri3_home_dir_drive = UTF8ToUTF16(path); - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + UTF8ToUTF16(path, NULL, &length); + wideStr = (unsigned short*)malloc(length); + UTF8ToUTF16(path, wideStr, &length); + + info.usri1053_home_dir_drive = wideStr; + result = NetUserSetInfo(NULL, currentAccountName, 1053, (unsigned char*)&info, NULL); + +exit: + quickFree((char**)&wideStr); + return result; } @@ -713,12 +882,25 @@ int NTUser::SetHomeDirDrive(char* path) unsigned long NTUser::GetLastLogoff() { unsigned long result = 0; + PUSER_INFO_3 info; + + if(currentAccountName == NULL) + { + result = 0; + goto exit; + } - if(userInfo != NULL) + if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) { - result = userInfo->usri3_last_logoff; + result = 0; + goto exit; } + result = info->usri3_last_logoff; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -728,12 +910,25 @@ unsigned long NTUser::GetLastLogoff() unsigned long NTUser::GetLastLogon() { unsigned long result = 0; + PUSER_INFO_3 info; - if(userInfo != NULL) + if(currentAccountName == NULL) { - result = userInfo->usri3_last_logon; + result = 0; + goto exit; } + if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + { + result = 0; + goto exit; + } + + result = info->usri3_last_logon; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -743,12 +938,30 @@ unsigned long NTUser::GetLastLogon() char* NTUser::GetLogonHours() { char* result = NULL; + unsigned long length; + PUSER_INFO_3 info; - if(userInfo != NULL) + if(currentAccountName == NULL) { - BinToHexStr((char*)userInfo->usri3_logon_hours, 21, &result); + result = NULL; + goto exit; } + if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + { + result = NULL; + goto exit; + } + + quickFree(&resultBuf); + BinToHexStr((char*)info->usri3_logon_hours, 21, NULL, &length); + resultBuf = (char*)malloc(length); + BinToHexStr((char*)info->usri3_script_path, 21, resultBuf, &length); + result = resultBuf; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -758,19 +971,26 @@ char* NTUser::GetLogonHours() int NTUser::SetLogonHours(char* logonHours) { int result = 0; - char* binValue; - unsigned long binLen = 0; + unsigned long length; + char* bin = NULL; + USER_INFO_1020 info; - if(userInfo != NULL) - { - HexStrToBin(logonHours, &binValue, &binLen); - userInfo->usri3_logon_hours = (unsigned char*)binValue; - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + HexStrToBin(logonHours, NULL, &length); + bin = (char*)malloc(length); + HexStrToBin(logonHours, bin, &length); + + info.usri1020_logon_hours = (unsigned char*)bin; + result = NetUserSetInfo(NULL, currentAccountName, 1020, (unsigned char*)&info, NULL); + +exit: + quickFree(&bin); + return result; } @@ -780,12 +1000,25 @@ int NTUser::SetLogonHours(char* logonHours) unsigned long NTUser::GetMaxStorage() { unsigned long result = 0; + PUSER_INFO_3 info; - if(userInfo != NULL) + if(currentAccountName == NULL) { - result = userInfo->usri3_max_storage; + result = 0; + goto exit; } + if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + { + result = 0; + goto exit; + } + + result = info->usri3_max_storage; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -795,16 +1028,18 @@ unsigned long NTUser::GetMaxStorage() int NTUser::SetMaxStorage(unsigned long maxStorage) { int result = 0; + USER_INFO_1018 info; - if(userInfo != NULL) - { - userInfo->usri3_max_storage = maxStorage; - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + info.usri1018_max_storage = maxStorage; + result = NetUserSetInfo(NULL, currentAccountName, 1018, (unsigned char*)&info, NULL); + +exit: return result; } @@ -814,12 +1049,25 @@ int NTUser::SetMaxStorage(unsigned long maxStorage) unsigned long NTUser::GetNumLogons() { unsigned long result = 0; + PUSER_INFO_3 info; + + if(currentAccountName == NULL) + { + result = 0; + goto exit; + } - if(userInfo != NULL) + if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) { - result = userInfo->usri3_num_logons; + result = 0; + goto exit; } + result = info->usri3_num_logons; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -829,12 +1077,30 @@ unsigned long NTUser::GetNumLogons() char* NTUser::GetProfile() { char* result = NULL; + unsigned long length; + PUSER_INFO_3 info; - if(userInfo != NULL) + if(currentAccountName == NULL) { - result = UTF16ToUTF8(userInfo->usri3_profile); + result = NULL; + goto exit; + } + + if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + { + result = NULL; + goto exit; } + quickFree(&resultBuf); + UTF16ToUTF8(info->usri3_profile, NULL, &length); + resultBuf = (char*)malloc(length); + UTF16ToUTF8(info->usri3_profile, resultBuf, &length); + result = resultBuf; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -844,16 +1110,26 @@ char* NTUser::GetProfile() int NTUser::SetProfile(char* path) { int result = 0; + unsigned long length; + unsigned short* wideStr = NULL; + USER_INFO_1052 info; - if(userInfo != NULL) - { - userInfo->usri3_profile = UTF8ToUTF16(path); - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + UTF8ToUTF16(path, NULL, &length); + wideStr = (unsigned short*)malloc(length); + UTF8ToUTF16(path, wideStr, &length); + + info.usri1052_profile = wideStr; + result = NetUserSetInfo(NULL, currentAccountName, 1052, (unsigned char*)&info, NULL); + +exit: + quickFree((char**)&wideStr); + return result; } @@ -863,12 +1139,30 @@ int NTUser::SetProfile(char* path) char* NTUser::GetScriptPath() { char* result = NULL; + unsigned long length; + PUSER_INFO_3 info; - if(userInfo != NULL) + if(currentAccountName == NULL) { - result = UTF16ToUTF8(userInfo->usri3_script_path); + result = NULL; + goto exit; } + if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + { + result = NULL; + goto exit; + } + + quickFree(&resultBuf); + UTF16ToUTF8(info->usri3_script_path, NULL, &length); + resultBuf = (char*)malloc(length); + UTF16ToUTF8(info->usri3_script_path, resultBuf, &length); + result = resultBuf; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -878,16 +1172,26 @@ char* NTUser::GetScriptPath() int NTUser::SetScriptPath(char* path) { int result = 0; + unsigned long length; + unsigned short* wideStr = NULL; + USER_INFO_1009 info; - if(userInfo != NULL) - { - userInfo->usri3_script_path = UTF8ToUTF16(path); - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + UTF8ToUTF16(path, NULL, &length); + wideStr = (unsigned short*)malloc(length); + UTF8ToUTF16(path, wideStr, &length); + + info.usri1009_script_path = wideStr; + result = NetUserSetInfo(NULL, currentAccountName, 1009, (unsigned char*)&info, NULL); + +exit: + quickFree((char**)&wideStr); + return result; } @@ -897,12 +1201,30 @@ int NTUser::SetScriptPath(char* path) char* NTUser::GetWorkstations() { char* result = NULL; + unsigned long length; + PUSER_INFO_3 info; + + if(currentAccountName == NULL) + { + result = NULL; + goto exit; + } - if(userInfo != NULL) + if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) { - result = UTF16ToUTF8(userInfo->usri3_workstations); + result = NULL; + goto exit; } + quickFree(&resultBuf); + UTF16ToUTF8(info->usri3_workstations, NULL, &length); + resultBuf = (char*)malloc(length); + UTF16ToUTF8(info->usri3_workstations, resultBuf, &length); + result = resultBuf; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -912,16 +1234,26 @@ char* NTUser::GetWorkstations() int NTUser::SetWorkstations(char* workstations) { int result = 0; + unsigned long length; + unsigned short* wideStr = NULL; + USER_INFO_1014 info; - if(userInfo != NULL) - { - userInfo->usri3_workstations = UTF8ToUTF16(workstations); - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + UTF8ToUTF16(workstations, NULL, &length); + wideStr = (unsigned short*)malloc(length); + UTF8ToUTF16(workstations, wideStr, &length); + + info.usri1014_workstations = wideStr; + result = NetUserSetInfo(NULL, currentAccountName, 1014, (unsigned char*)&info, NULL); + +exit: + quickFree((char**)&wideStr); + return result; } @@ -931,12 +1263,30 @@ int NTUser::SetWorkstations(char* workstations) char* NTUser::GetFullname() { char* result = NULL; + unsigned long length; + PUSER_INFO_3 info; + + if(currentAccountName == NULL) + { + result = NULL; + goto exit; + } - if(userInfo != NULL) + if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) { - result = UTF16ToUTF8(userInfo->usri3_full_name); + result = NULL; + goto exit; } + quickFree(&resultBuf); + UTF16ToUTF8(info->usri3_full_name, NULL, &length); + resultBuf = (char*)malloc(length); + UTF16ToUTF8(info->usri3_full_name, resultBuf, &length); + result = resultBuf; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -946,16 +1296,26 @@ char* NTUser::GetFullname() int NTUser::SetFullname(char* fullname) { int result = 0; + unsigned long length; + unsigned short* wideStr = NULL; + USER_INFO_1011 info; - if(userInfo != NULL) - { - userInfo->usri3_full_name = UTF8ToUTF16(fullname); - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + UTF8ToUTF16(fullname, NULL, &length); + wideStr = (unsigned short*)malloc(length); + UTF8ToUTF16(fullname, wideStr, &length); + + info.usri1011_full_name = wideStr; + result = NetUserSetInfo(NULL, currentAccountName, 1011, (unsigned char*)&info, NULL); + +exit: + quickFree((char**)&wideStr); + return result; } @@ -965,16 +1325,26 @@ int NTUser::SetFullname(char* fullname) int NTUser::SetPassword(char* password) { int result = 0; + unsigned long length; + unsigned short* wideStr = NULL; + USER_INFO_1003 info; - if(userInfo != NULL) - { - userInfo->usri3_password = UTF8ToUTF16(password); - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + UTF8ToUTF16(password, NULL, &length); + wideStr = (unsigned short*)malloc(length); + UTF8ToUTF16(password, wideStr, &length); + + info.usri1003_password = wideStr; + result = NetUserSetInfo(NULL, currentAccountName, 1003, (unsigned char*)&info, NULL); + +exit: + quickFree((char**)&wideStr); + return result; } @@ -984,16 +1354,24 @@ int NTUser::SetPassword(char* password) int NTUser::AddToGroup(char* groupName) { int result = 0; + unsigned long length; + unsigned short* wideStr = NULL; - if(userInfo != NULL) - { - result = NetGroupAddUser(NULL, UTF8ToUTF16(groupName), userInfo->usri3_name); - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + UTF8ToUTF16(groupName, NULL, &length); + wideStr = (unsigned short*)malloc(length); + UTF8ToUTF16(groupName, wideStr, &length); + + result = NetGroupAddUser(NULL, wideStr, currentAccountName); + +exit: + quickFree((char**)&wideStr); + return result; } @@ -1003,16 +1381,24 @@ int NTUser::AddToGroup(char* groupName) int NTUser::RemoveFromGroup(char* groupName) { int result = 0; + unsigned long length; + unsigned short* wideStr = NULL; - if(userInfo != NULL) - { - result = NetGroupDelUser(NULL, UTF8ToUTF16(groupName), userInfo->usri3_name); - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + UTF8ToUTF16(groupName, NULL, &length); + wideStr = (unsigned short*)malloc(length); + UTF8ToUTF16(groupName, wideStr, &length); + + result = NetGroupDelUser(NULL, wideStr, currentAccountName); + +exit: + quickFree((char**)&wideStr); + return result; } @@ -1023,23 +1409,24 @@ int NTUser::LoadGroups() { int result = 0; - if(userInfo != NULL) + if(currentAccountName == NULL) { - if(groupsInfo != NULL) - { - NetApiBufferFree(groupsInfo); - groupsInfo = NULL; - currentGroupEntry = 0; - groupEntriesRead = 0; - groupEntriesTotal = 0; - } - result = NetUserGetGroups(NULL, userInfo->usri3_name, USER_GROUPS_INFO_LEVEL, (unsigned char**)&groupsInfo, MAX_PREFERRED_LENGTH, &groupEntriesRead, &groupEntriesTotal); + result = -1; + goto exit; } - else + + if(groupsInfo != NULL) { - result = -1; + NetApiBufferFree(groupsInfo); + groupsInfo = NULL; + currentGroupEntry = 0; + groupEntriesRead = 0; + groupEntriesTotal = 0; } + result = NetUserGetGroups(NULL, currentAccountName, USER_GROUPS_INFO_LEVEL, (unsigned char**)&groupsInfo, MAX_PREFERRED_LENGTH, &groupEntriesRead, &groupEntriesTotal); + +exit: return result; } @@ -1067,17 +1454,21 @@ bool NTUser::HasMoreGroups() // **************************************************************** char* NTUser::NextGroupName() { - char* groupName = NULL; - GROUP_USERS_INFO_0* thisEntry; + char* result = NULL; + unsigned long length; if(currentGroupEntry < groupEntriesRead) { - thisEntry = &(groupsInfo[currentGroupEntry]); - groupName = UTF16ToUTF8(thisEntry->grui0_name); + quickFree(&resultBuf); + UTF16ToUTF8(groupsInfo[currentGroupEntry].grui0_name, NULL, &length); + resultBuf = (char*)malloc(length); + UTF16ToUTF8(groupsInfo[currentGroupEntry].grui0_name, resultBuf, &length); + result = resultBuf; + currentGroupEntry++; } - return groupName; + return result; } // **************************************************************** @@ -1086,6 +1477,8 @@ char* NTUser::NextGroupName() int NTUser::AddToLocalGroup(char* localGroupName) { int result = 0; + unsigned long length; + unsigned short* wideStr; char userSID[256]; wchar_t domain[256]; DWORD SIDLen = sizeof(userSID); @@ -1096,21 +1489,27 @@ int NTUser::AddToLocalGroup(char* localGroupName) memset(&domain, 0, sizeof(domain)); memset(&userSID, 0, sizeof(userSID)); - if(userInfo != NULL) + if(currentAccountName == NULL) { - result = LookupAccountName(NULL, UTF16ToUTF8(userInfo->usri3_name), &userSID, &SIDLen, (LPTSTR)&domain, &domainLen, &SIDUseIndicator); - - if(result != 0) - { - membersbuf[0].lgrmi0_sid = &userSID; - result = NetLocalGroupAddMembers(NULL, UTF8ToUTF16(localGroupName), 0, (LPBYTE)&membersbuf, 1); - } + result = -1; + goto exit; } - else + + result = LookupAccountName(NULL, GetAccountName(), &userSID, &SIDLen, (LPTSTR)&domain, &domainLen, &SIDUseIndicator); + + if(result != 0) { - result = -1; + membersbuf[0].lgrmi0_sid = &userSID; + + UTF8ToUTF16(localGroupName, NULL, &length); + wideStr = (unsigned short*)malloc(length); + UTF8ToUTF16(localGroupName, wideStr, &length); + result = NetLocalGroupAddMembers(NULL, wideStr, 0, (LPBYTE)&membersbuf, 1); } +exit: + quickFree((char**)&wideStr); + return result; } @@ -1120,6 +1519,8 @@ int NTUser::AddToLocalGroup(char* localGroupName) int NTUser::RemoveFromLocalGroup(char* localGroupName) { int result = 0; + unsigned long length; + unsigned short* wideStr; char userSID[256]; wchar_t domain[256]; DWORD SIDLen = sizeof(userSID); @@ -1130,21 +1531,27 @@ int NTUser::RemoveFromLocalGroup(char* localGroupName) memset(&domain, 0, sizeof(domain)); memset(&userSID, 0, sizeof(userSID)); - if(userInfo != NULL) + if(currentAccountName == NULL) { - result = LookupAccountName(NULL, UTF16ToUTF8(userInfo->usri3_name), &userSID, &SIDLen, (LPTSTR)&domain, &domainLen, &SIDUseIndicator); - - if(result != 0) - { - membersbuf[0].lgrmi0_sid = &userSID; - result = NetLocalGroupDelMembers(NULL, UTF8ToUTF16(localGroupName), 0, (LPBYTE)&membersbuf, 1); - } + result = -1; + goto exit; } - else + + result = LookupAccountName(NULL, GetAccountName(), &userSID, &SIDLen, (LPTSTR)&domain, &domainLen, &SIDUseIndicator); + + if(result != 0) { - result = -1; + membersbuf[0].lgrmi0_sid = &userSID; + + UTF8ToUTF16(localGroupName, NULL, &length); + wideStr = (unsigned short*)malloc(length); + UTF8ToUTF16(localGroupName, wideStr, &length); + result = NetLocalGroupDelMembers(NULL, wideStr, 0, (LPBYTE)&membersbuf, 1); } +exit: + quickFree((char**)&wideStr); + return result; } @@ -1155,24 +1562,24 @@ int NTUser::LoadLocalGroups() { int result = 0; - if(userInfo != NULL) + if(currentAccountName == NULL) { - if(localGroupsInfo != NULL) - { - NetApiBufferFree(localGroupsInfo); - localGroupsInfo = NULL; - currentLocalGroupEntry = 0; - localGroupEntriesRead = 0; - localGroupEntriesTotal = 0; - } - - result = NetUserGetLocalGroups(NULL, userInfo->usri3_name, 0, USER_LOCALGROUPS_INFO_LEVEL, (unsigned char**)&localGroupsInfo, MAX_PREFERRED_LENGTH, &localGroupEntriesRead, &localGroupEntriesTotal); + result = -1; + goto exit; } - else + + if(localGroupsInfo != NULL) { - result = -1; + NetApiBufferFree(localGroupsInfo); + localGroupsInfo = NULL; + currentLocalGroupEntry = 0; + localGroupEntriesRead = 0; + localGroupEntriesTotal = 0; } + result = NetUserGetLocalGroups(NULL, currentAccountName, 0, USER_LOCALGROUPS_INFO_LEVEL, (unsigned char**)&localGroupsInfo, MAX_PREFERRED_LENGTH, &localGroupEntriesRead, &localGroupEntriesTotal); + +exit: return result; } @@ -1200,17 +1607,21 @@ bool NTUser::HasMoreLocalGroups() // **************************************************************** char* NTUser::NextLocalGroupName() { - char* localGroupName = NULL; - LOCALGROUP_USERS_INFO_0* thisEntry; + char* result = NULL; + unsigned long length; if(currentLocalGroupEntry < localGroupEntriesRead) { - thisEntry = &(localGroupsInfo[currentLocalGroupEntry]); - localGroupName = UTF16ToUTF8(thisEntry->lgrui0_name); + quickFree(&resultBuf); + UTF16ToUTF8(localGroupsInfo[currentLocalGroupEntry].lgrui0_name, NULL, &length); + resultBuf = (char*)malloc(length); + UTF16ToUTF8(localGroupsInfo[currentLocalGroupEntry].lgrui0_name, resultBuf, &length); + result = resultBuf; + currentLocalGroupEntry++; } - return localGroupName; + return result; } // **************************************************************** @@ -1218,11 +1629,10 @@ char* NTUser::NextLocalGroupName() // **************************************************************** NTUserList::NTUserList() { - entriesRead = 0; - totalEntries = 0; bufptr = NULL; currentEntry = 0; resumeHandle = 0; + resultBuf = NULL; } // **************************************************************** @@ -1235,6 +1645,7 @@ NTUserList::~NTUserList() NetApiBufferFree(bufptr); bufptr = NULL; } + quickFree(&resultBuf); } // **************************************************************** @@ -1273,17 +1684,21 @@ bool NTUserList::hasMore() // **************************************************************** char* NTUserList::nextUsername() { - char* username = NULL; - USER_INFO_3* thisEntry; + char* result = NULL; + unsigned long length; if(currentEntry < entriesRead) { - thisEntry = &(bufptr[currentEntry]); - username = UTF16ToUTF8(thisEntry->usri3_name); + quickFree(&resultBuf); + UTF16ToUTF8(bufptr[currentEntry].usri3_name, NULL, &length); + resultBuf = (char*)malloc(length); + UTF16ToUTF8(bufptr[currentEntry].usri3_name, resultBuf, &length); + result = resultBuf; + currentEntry++; } - return username; + return result; } // **************************************************************** @@ -1291,11 +1706,12 @@ char* NTUserList::nextUsername() // **************************************************************** NTGroup::NTGroup() { - groupInfo = NULL; + currentAccountName = NULL; usersInfo = NULL; currentUserEntry = 0; userEntriesRead = 0; userEntriesTotal = 0; + resultBuf = NULL; } // **************************************************************** @@ -1303,60 +1719,59 @@ NTGroup::NTGroup() // **************************************************************** NTGroup::~NTGroup() { - if(groupInfo != NULL) - { - NetApiBufferFree(groupInfo); - groupInfo = NULL; - } + quickFree((char**)&currentAccountName); if(usersInfo != NULL) { NetApiBufferFree(usersInfo); usersInfo = NULL; } + quickFree(&resultBuf); } // **************************************************************** // NTGroup::NewGroup // **************************************************************** -void NTGroup::NewGroup(char* groupName) +int NTGroup::NewGroup(char* groupName) { - if(groupInfo != NULL) - { - NetApiBufferFree(groupInfo); - groupInfo = NULL; - } + int result = 0; + unsigned long length; + PGROUP_INFO_2 info = NULL; + DWORD badParam = 0; - NetApiBufferAllocate(sizeof(GROUP_INFO_2),(LPVOID*)&groupInfo); - memset(groupInfo, 0, sizeof(GROUP_INFO_2)); - groupInfo->grpi2_name = UTF8ToUTF16(groupName); -} + quickFree((char**)&currentAccountName); -// **************************************************************** -// NTGroup::DeleteGroup -// **************************************************************** -int NTGroup::DeleteGroup(char* groupName) -{ - int result; + UTF8ToUTF16(groupName, NULL, &length); + currentAccountName = (unsigned short*)malloc(length); + UTF8ToUTF16(groupName, currentAccountName, &length); + + info = (PGROUP_INFO_2)malloc(sizeof(GROUP_INFO_2)); + memset(info, 0, sizeof(GROUP_INFO_2)); + info->grpi2_name = currentAccountName; - result = NetGroupDel(NULL, UTF8ToUTF16(groupName)); + // Add group + result = NetGroupAdd(NULL, GROUP_INFO_LEVEL, (unsigned char*)info, &badParam); + + // Free buffers + quickFree((char**)&info); return result; } // **************************************************************** -// NTGroup::RetriveGroup +// NTGroup::RetriveGroupByAccountName // **************************************************************** int NTGroup::RetriveGroupByAccountName(char* groupName) { int result; + unsigned long length; + PGROUP_INFO_2 info = NULL; - if(groupInfo != NULL) - { - NetApiBufferFree(groupInfo); - groupInfo = NULL; - } + quickFree((char**)&currentAccountName); + UTF8ToUTF16(groupName, NULL, &length); + currentAccountName = (unsigned short*)malloc(length); + UTF8ToUTF16(groupName, currentAccountName, &length); - result = NetGroupGetInfo(NULL, UTF8ToUTF16(groupName), GROUP_INFO_LEVEL, (unsigned char**)&groupInfo); + result = NetGroupGetInfo(NULL, currentAccountName, GROUP_INFO_LEVEL, (unsigned char**)&info); return result; } @@ -1367,19 +1782,23 @@ int NTGroup::RetriveGroupByAccountName(char* groupName) int NTGroup::RetriveGroupBySIDHexStr(char* sidHexStr) { int result = 0; + unsigned long length; char* groupName; + char* sid; - if(groupInfo != NULL) - { - NetApiBufferFree(groupInfo); - groupInfo = NULL; - } + quickFree((char**)&currentAccountName); - if(GetAccountNameBySIDHexStr(sidHexStr, &groupName) != 0) + HexStrToBin(sidHexStr, NULL, &length); + sid = (char*)malloc(length); + HexStrToBin(sidHexStr, sid, &length); + + if(GetAccountNameBySID(sid, NULL, &length) != 0) { result = -1; goto exit; } + groupName = (char*)malloc(length); + GetAccountNameBySID(sid, groupName, &length); if(RetriveGroupByAccountName(groupName) != 0) { @@ -1388,34 +1807,30 @@ int NTGroup::RetriveGroupBySIDHexStr(char* sidHexStr) } exit: - return result; -} - -// **************************************************************** -// NTGroup::AddGroup -// **************************************************************** -int NTGroup::AddGroup() -{ - int result; - DWORD badParam = 0; - - result = NetGroupAdd(NULL, GROUP_INFO_LEVEL, (unsigned char*)groupInfo, &badParam); + quickFree(&sid); + quickFree(&groupName); return result; } // **************************************************************** -// NTGroup::StoreGroup +// NTGroup::DeleteGroup // **************************************************************** -int NTGroup::StoreGroup() +int NTGroup::DeleteGroup() { - int result = -1; + int result; - if(groupInfo != NULL) + if(currentAccountName == NULL) { - result = NetGroupSetInfo(NULL, groupInfo->grpi2_name, GROUP_INFO_LEVEL, (unsigned char*)groupInfo, NULL); + result = -1; + goto exit; } + result = NetGroupDel(NULL, currentAccountName); + + quickFree((char**)&currentAccountName); + +exit: return result; } @@ -1425,12 +1840,21 @@ int NTGroup::StoreGroup() char* NTGroup::GetAccountName() { char* result = NULL; + unsigned long length = 0; - if(groupInfo != NULL) + if(currentAccountName == NULL) { - result = UTF16ToUTF8(groupInfo->grpi2_name); + goto exit; + result = NULL; } + quickFree(&resultBuf); + UTF16ToUTF8(currentAccountName, NULL, &length); + resultBuf = (char*)malloc(length); + UTF16ToUTF8(currentAccountName, resultBuf, &length); + result = resultBuf; + +exit: return result; } @@ -1438,13 +1862,40 @@ char* NTGroup::GetAccountName() // NTGroup::GetSIDHexStr // **************************************************************** char* NTGroup::GetSIDHexStr() -{ +{ char* result = NULL; + unsigned long length = 0; + unsigned long binLength = 0; + char* groupName = NULL; + char* sid = NULL; - if(groupInfo != NULL) + if(currentAccountName == NULL) { - GetSIDHexStrByAccountName(UTF16ToUTF8(groupInfo->grpi2_name), &result); + result = NULL; + goto exit; + } + + UTF16ToUTF8(currentAccountName, NULL, &length); + groupName = (char*)malloc(length); + UTF16ToUTF8(currentAccountName, groupName, &length); + + if(GetSIDByAccountName(groupName, NULL, &binLength) != 0) + { + result = NULL; + goto exit; } + sid = (char*)malloc(binLength); + GetSIDByAccountName(groupName, sid, &binLength); + + quickFree(&resultBuf); + BinToHexStr(sid, binLength, NULL, &length); + resultBuf = (char*)malloc(length); + BinToHexStr(sid, binLength, resultBuf, &length); + result = resultBuf; + +exit: + quickFree(&groupName); + quickFree(&sid); return result; } @@ -1455,12 +1906,30 @@ char* NTGroup::GetSIDHexStr() char* NTGroup::GetComment() { char* result = NULL; + unsigned long length; + PGROUP_INFO_2 info; + + if(currentAccountName == NULL) + { + result = NULL; + goto exit; + } - if(groupInfo != NULL) + if(NetGroupGetInfo(NULL, currentAccountName, 2, (unsigned char**)&info) != NERR_Success) { - result = UTF16ToUTF8(groupInfo->grpi2_comment); + result = NULL; + goto exit; } + quickFree(&resultBuf); + UTF16ToUTF8(info->grpi2_comment, NULL, &length); + resultBuf = (char*)malloc(length); + UTF16ToUTF8(info->grpi2_comment, resultBuf, &length); + result = resultBuf; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -1470,16 +1939,26 @@ char* NTGroup::GetComment() int NTGroup::SetComment(char* comment) { int result = 0; + unsigned long length; + unsigned short* wideStr = NULL; + GROUP_INFO_1002 info; - if(groupInfo != NULL) - { - groupInfo->grpi2_comment = UTF8ToUTF16(comment); - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + UTF8ToUTF16(comment, NULL, &length); + wideStr = (unsigned short*)malloc(length); + UTF8ToUTF16(comment, wideStr, &length); + + info.grpi1002_comment = wideStr; + result = NetGroupSetInfo(NULL, currentAccountName, 1002, (unsigned char*)&info, NULL); + +exit: + quickFree((char**)&wideStr); + return result; } @@ -1489,16 +1968,24 @@ int NTGroup::SetComment(char* comment) int NTGroup::AddUser(char* userName) { int result = 0; + unsigned long length; + unsigned short* wideStr = NULL; - if(groupInfo != NULL) - { - result = NetGroupAddUser(NULL, groupInfo->grpi2_name, UTF8ToUTF16(userName)); - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + UTF8ToUTF16(userName, NULL, &length); + wideStr = (unsigned short*)malloc(length); + UTF8ToUTF16(userName, wideStr, &length); + + result = NetGroupAddUser(NULL, currentAccountName, wideStr); + +exit: + quickFree((char**)&wideStr); + return result; } @@ -1508,16 +1995,24 @@ int NTGroup::AddUser(char* userName) int NTGroup::RemoveUser(char* userName) { int result = 0; + unsigned long length; + unsigned short* wideStr = NULL; - if(groupInfo != NULL) - { - result = NetGroupDelUser(NULL, groupInfo->grpi2_name, UTF8ToUTF16(userName)); - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + UTF8ToUTF16(userName, NULL, &length); + wideStr = (unsigned short*)malloc(length); + UTF8ToUTF16(userName, wideStr, &length); + + result = NetGroupDelUser(NULL, currentAccountName, wideStr); + +exit: + quickFree((char**)&wideStr); + return result; } @@ -1528,23 +2023,23 @@ int NTGroup::LoadUsers() { int result = 0; - if(groupInfo != NULL) + if(currentAccountName == NULL) { - if(usersInfo != NULL) - { - NetApiBufferFree(usersInfo); - usersInfo = NULL; - currentUserEntry = 0; - userEntriesRead = 0; - userEntriesTotal = 0; - } - result = NetGroupGetUsers(NULL, groupInfo->grpi2_name, GROUP_USERS_INFO_LEVEL, (unsigned char**)&usersInfo, MAX_PREFERRED_LENGTH, &userEntriesRead, &userEntriesTotal, NULL); + result = -1; + goto exit; } - else + + if(usersInfo != NULL) { - result = -1; + NetApiBufferFree(usersInfo); + usersInfo = NULL; + currentUserEntry = 0; + userEntriesRead = 0; + userEntriesTotal = 0; } + result = NetGroupGetUsers(NULL, currentAccountName, GROUP_USERS_INFO_LEVEL, (unsigned char**)&usersInfo, MAX_PREFERRED_LENGTH, &userEntriesRead, &userEntriesTotal, NULL); +exit: return result; } @@ -1572,17 +2067,21 @@ bool NTGroup::HasMoreUsers() // **************************************************************** char* NTGroup::NextUserName() { - char* userName = NULL; - LOCALGROUP_USERS_INFO_0* thisEntry; + char* result = NULL; + unsigned long length; if(currentUserEntry < userEntriesRead) { - thisEntry = &(usersInfo[currentUserEntry]); - userName = UTF16ToUTF8(thisEntry->lgrui0_name); + quickFree(&resultBuf); + UTF16ToUTF8(usersInfo[currentUserEntry].lgrui0_name, NULL, &length); + resultBuf = (char*)malloc(length); + UTF16ToUTF8(usersInfo[currentUserEntry].lgrui0_name, resultBuf, &length); + result = resultBuf; + currentUserEntry++; } - return userName; + return result; } // **************************************************************** @@ -1590,11 +2089,10 @@ char* NTGroup::NextUserName() // **************************************************************** NTGroupList::NTGroupList() { - entriesRead = 0; - totalEntries = 0; bufptr = NULL; currentEntry = 0; resumeHandle = 0; + resultBuf = NULL; } // **************************************************************** @@ -1607,6 +2105,7 @@ NTGroupList::~NTGroupList() NetApiBufferFree(bufptr); bufptr = NULL; } + quickFree(&resultBuf); } // **************************************************************** @@ -1645,17 +2144,21 @@ bool NTGroupList::hasMore() // **************************************************************** char* NTGroupList::nextGroupName() { - char* groupName = NULL; - GROUP_INFO_2* thisEntry; + char* result = NULL; + unsigned long length; if(currentEntry < entriesRead) { - thisEntry = &(bufptr[currentEntry]); - groupName = UTF16ToUTF8(thisEntry->grpi2_name); + quickFree(&resultBuf); + UTF16ToUTF8(bufptr[currentEntry].grpi2_name, NULL, &length); + resultBuf = (char*)malloc(length); + UTF16ToUTF8(bufptr[currentEntry].grpi2_name, resultBuf, &length); + result = resultBuf; + currentEntry++; } - return groupName; + return result; } // **************************************************************** @@ -1663,11 +2166,12 @@ char* NTGroupList::nextGroupName() // **************************************************************** NTLocalGroup::NTLocalGroup() { - localGroupInfo = NULL; + currentAccountName = NULL; usersInfo = NULL; currentUserEntry = 0; userEntriesRead = 0; userEntriesTotal = 0; + resultBuf = NULL; } // **************************************************************** @@ -1675,60 +2179,59 @@ NTLocalGroup::NTLocalGroup() // **************************************************************** NTLocalGroup::~NTLocalGroup() { - if(localGroupInfo != NULL) - { - NetApiBufferFree(localGroupInfo); - localGroupInfo = NULL; - } + quickFree((char**)&currentAccountName); if(usersInfo != NULL) { NetApiBufferFree(usersInfo); usersInfo = NULL; } + quickFree(&resultBuf); } // **************************************************************** // NTLocalGroup::NewLocalGroup // **************************************************************** -void NTLocalGroup::NewLocalGroup(char* localGroupName) +int NTLocalGroup::NewLocalGroup(char* localGroupName) { - if(localGroupInfo != NULL) - { - NetApiBufferFree(localGroupInfo); - localGroupInfo = NULL; - } - NetApiBufferAllocate(sizeof(LOCALGROUP_INFO_1),(LPVOID*)&localGroupInfo); - memset(localGroupInfo, 0, sizeof(LOCALGROUP_INFO_1)); - localGroupInfo->lgrpi1_name = UTF8ToUTF16(localGroupName); -} + int result = 0; + unsigned long length; + PLOCALGROUP_INFO_1 info = NULL; + DWORD badParam = 0; -// **************************************************************** -// NTLocalGroup::DeleteLocalGroup -// **************************************************************** -int NTLocalGroup::DeleteLocalGroup(char* localGroupName) -{ - int result; + quickFree((char**)&currentAccountName); + UTF8ToUTF16(localGroupName, NULL, &length); + currentAccountName = (unsigned short*)malloc(length); + UTF8ToUTF16(localGroupName, currentAccountName, &length); + + info = (PLOCALGROUP_INFO_1)malloc(sizeof(LOCALGROUP_INFO_1)); + memset(info, 0, sizeof(LOCALGROUP_INFO_1)); + info->lgrpi1_name = currentAccountName; - result = NetLocalGroupDel(NULL, UTF8ToUTF16(localGroupName)); + // Add group + result = NetLocalGroupAdd(NULL, LOCALGROUP_INFO_LEVEL, (unsigned char*)info, &badParam); + + // Free buffers + quickFree((char**)&info); return result; } // **************************************************************** -// NTLocalGroup::RetriveLocalGroup +// NTLocalGroup::RetriveLocalGroupByAccountName // **************************************************************** int NTLocalGroup::RetriveLocalGroupByAccountName(char* localGroupName) { int result; + unsigned long length; + PLOCALGROUP_INFO_1 info = NULL; - if(localGroupInfo != NULL) - { - NetApiBufferFree(localGroupInfo); - localGroupInfo = NULL; - } + quickFree((char**)&currentAccountName); + UTF8ToUTF16(localGroupName, NULL, &length); + currentAccountName = (unsigned short*)malloc(length); + UTF8ToUTF16(localGroupName, currentAccountName, &length); - result = NetLocalGroupGetInfo(NULL, UTF8ToUTF16(localGroupName), LOCALGROUP_INFO_LEVEL, (unsigned char**)&localGroupInfo); + result = NetLocalGroupGetInfo(NULL, currentAccountName, LOCALGROUP_INFO_LEVEL, (unsigned char**)&info); return result; } @@ -1739,19 +2242,23 @@ int NTLocalGroup::RetriveLocalGroupByAccountName(char* localGroupName) int NTLocalGroup::RetriveLocalGroupBySIDHexStr(char* sidHexStr) { int result = 0; + unsigned long length; char* localGroupName; + char* sid; - if(localGroupInfo != NULL) - { - NetApiBufferFree(localGroupInfo); - localGroupInfo = NULL; - } + quickFree((char**)&currentAccountName); - if(GetAccountNameBySIDHexStr(sidHexStr, &localGroupName) != 0) + HexStrToBin(sidHexStr, NULL, &length); + sid = (char*)malloc(length); + HexStrToBin(sidHexStr, sid, &length); + + if(GetAccountNameBySID(sid, NULL, &length) != 0) { result = -1; goto exit; } + localGroupName = (char*)malloc(length); + GetAccountNameBySID(sid, localGroupName, &length); if(RetriveLocalGroupByAccountName(localGroupName) != 0) { @@ -1760,38 +2267,30 @@ int NTLocalGroup::RetriveLocalGroupBySIDHexStr(char* sidHexStr) } exit: - return result; -} - -// **************************************************************** -// NTLocalGroup::AddLocalGroup -// **************************************************************** -int NTLocalGroup::AddLocalGroup() -{ - int result; - DWORD badParam = 0; - - result = NetLocalGroupAdd(NULL, LOCALGROUP_INFO_LEVEL, (unsigned char*)localGroupInfo, &badParam); + quickFree(&sid); + quickFree(&localGroupName); return result; } // **************************************************************** -// NTLocalGroup::StoreLocalGroup +// NTLocalGroup::DeleteLocalGroup // **************************************************************** -int NTLocalGroup::StoreLocalGroup() +int NTLocalGroup::DeleteLocalGroup() { - int result = 0; + int result; - if(localGroupInfo != NULL) - { - result = NetLocalGroupSetInfo(NULL, localGroupInfo->lgrpi1_name, LOCALGROUP_INFO_LEVEL, (unsigned char*)localGroupInfo, NULL); - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + result = NetLocalGroupDel(NULL, currentAccountName); + + quickFree((char**)&currentAccountName); + +exit: return result; } @@ -1801,12 +2300,21 @@ int NTLocalGroup::StoreLocalGroup() char* NTLocalGroup::GetAccountName() { char* result = NULL; + unsigned long length = 0; - if(localGroupInfo != NULL) + if(currentAccountName == NULL) { - result = UTF16ToUTF8(localGroupInfo->lgrpi1_name); + goto exit; + result = NULL; } + quickFree((char**)&resultBuf); + UTF16ToUTF8(currentAccountName, NULL, &length); + resultBuf = (char*)malloc(length); + UTF16ToUTF8(currentAccountName, resultBuf, &length); + result = resultBuf; + +exit: return result; } @@ -1816,11 +2324,38 @@ char* NTLocalGroup::GetAccountName() char* NTLocalGroup::GetSIDHexStr() { char* result = NULL; + unsigned long length = 0; + unsigned long binLength = 0; + char* localGroupName = NULL; + char* sid = NULL; + + if(currentAccountName == NULL) + { + result = NULL; + goto exit; + } - if(localGroupInfo != NULL) + UTF16ToUTF8(currentAccountName, NULL, &length); + localGroupName = (char*)malloc(length); + UTF16ToUTF8(currentAccountName, localGroupName, &length); + + if(GetSIDByAccountName(localGroupName, NULL, &binLength) != 0) { - GetSIDHexStrByAccountName(UTF16ToUTF8(localGroupInfo->lgrpi1_name), &result); + result = NULL; + goto exit; } + sid = (char*)malloc(binLength); + GetSIDByAccountName(localGroupName, sid, &binLength); + + quickFree(&resultBuf); + BinToHexStr(sid, binLength, NULL, &length); + resultBuf = (char*)malloc(length); + BinToHexStr(sid, binLength, resultBuf, &length); + result = resultBuf; + +exit: + quickFree(&localGroupName); + quickFree(&sid); return result; } @@ -1831,12 +2366,30 @@ char* NTLocalGroup::GetSIDHexStr() char* NTLocalGroup::GetComment() { char* result = NULL; + unsigned long length; + PLOCALGROUP_INFO_1 info; + + if(currentAccountName == NULL) + { + result = NULL; + goto exit; + } - if(localGroupInfo != NULL) + if(NetLocalGroupGetInfo(NULL, currentAccountName, 1, (unsigned char**)&info) != NERR_Success) { - result = UTF16ToUTF8(localGroupInfo->lgrpi1_comment); + result = NULL; + goto exit; } + quickFree(&resultBuf); + UTF16ToUTF8(info->lgrpi1_comment, NULL, &length); + resultBuf = (char*)malloc(length); + UTF16ToUTF8(info->lgrpi1_comment, resultBuf, &length); + result = resultBuf; + +exit: + NetApiBufferFree((void*)info); + return result; } @@ -1846,16 +2399,26 @@ char* NTLocalGroup::GetComment() int NTLocalGroup::SetComment(char* comment) { int result = 0; + unsigned long length; + unsigned short* wideStr = NULL; + LOCALGROUP_INFO_1002 info; - if(localGroupInfo != NULL) - { - localGroupInfo->lgrpi1_comment = UTF8ToUTF16(comment); - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + UTF8ToUTF16(comment, NULL, &length); + wideStr = (unsigned short*)malloc(length); + UTF8ToUTF16(comment, wideStr, &length); + + info.lgrpi1002_comment = wideStr; + result = NetLocalGroupSetInfo(NULL, currentAccountName, 1002, (unsigned char*)&info, NULL); + +exit: + quickFree((char**)&wideStr); + return result; } @@ -1865,19 +2428,26 @@ int NTLocalGroup::SetComment(char* comment) int NTLocalGroup::AddUser(char* username) { int result = 0; + unsigned long length; + char* sid; LOCALGROUP_MEMBERS_INFO_0 members[1]; - if(localGroupInfo != NULL) - { - GetSIDByAccountName(username, (char**)&members[0].lgrmi0_sid); - result = NetLocalGroupAddMembers(NULL, localGroupInfo->lgrpi1_name, 0, (unsigned char*)&members, 1); - free(members[0].lgrmi0_sid); - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + GetSIDByAccountName(username, NULL, &length); + sid = (char*)malloc(length); + GetSIDByAccountName(username, sid, &length); + members[0].lgrmi0_sid = sid; + + result = NetLocalGroupAddMembers(NULL, currentAccountName, 0, (unsigned char*)&members, 1); + +exit: + quickFree(&sid); + return result; } @@ -1887,19 +2457,26 @@ int NTLocalGroup::AddUser(char* username) int NTLocalGroup::RemoveUser(char* username) { int result = 0; + unsigned long length; + char* sid; LOCALGROUP_MEMBERS_INFO_0 members[1]; - if(localGroupInfo != NULL) - { - GetSIDByAccountName(username, (char**)&members[0].lgrmi0_sid); - result = NetLocalGroupDelMembers(NULL, localGroupInfo->lgrpi1_name, 0, (unsigned char*)&members, 1); - free(members[0].lgrmi0_sid); - } - else + if(currentAccountName == NULL) { result = -1; + goto exit; } + GetSIDByAccountName(username, NULL, &length); + sid = (char*)malloc(length); + GetSIDByAccountName(username, sid, &length); + members[0].lgrmi0_sid = sid; + + result = NetLocalGroupDelMembers(NULL, currentAccountName, 0, (unsigned char*)&members, 1); + +exit: + quickFree(&sid); + return result; } @@ -1910,23 +2487,23 @@ int NTLocalGroup::LoadUsers() { int result = 0; - if(localGroupInfo != NULL) + if(currentAccountName == NULL) { - if(usersInfo != NULL) - { - NetApiBufferFree(usersInfo); - usersInfo = NULL; - currentUserEntry = 0; - userEntriesRead = 0; - userEntriesTotal = 0; - } - result = NetLocalGroupGetMembers(NULL, localGroupInfo->lgrpi1_name, LOCALGROUP_USERS_INFO_LEVEL, (unsigned char**)&usersInfo, MAX_PREFERRED_LENGTH, &userEntriesRead, &userEntriesTotal, NULL); + result = -1; + goto exit; } - else + + if(usersInfo != NULL) { - result = -1; + NetApiBufferFree(usersInfo); + usersInfo = NULL; + currentUserEntry = 0; + userEntriesRead = 0; + userEntriesTotal = 0; } + result = NetLocalGroupGetMembers(NULL, currentAccountName, LOCALGROUP_USERS_INFO_LEVEL, (unsigned char**)&usersInfo, MAX_PREFERRED_LENGTH, &userEntriesRead, &userEntriesTotal, NULL); +exit: return result; } @@ -1954,17 +2531,21 @@ bool NTLocalGroup::HasMoreUsers() // **************************************************************** char* NTLocalGroup::NextUserName() { - char* username = NULL; - LOCALGROUP_MEMBERS_INFO_0* thisEntry; + char* result = NULL; + unsigned long length; if(currentUserEntry < userEntriesRead) { - thisEntry = &(usersInfo[currentUserEntry]); - GetAccountNameBySID((char*)thisEntry->lgrmi0_sid, &username); + quickFree(&resultBuf); + GetAccountNameBySID((char*)usersInfo[currentUserEntry].lgrmi0_sid, NULL, &length); + resultBuf = (char*)malloc(length); + GetAccountNameBySID((char*)usersInfo[currentUserEntry].lgrmi0_sid, resultBuf, &length); + result = resultBuf; + currentUserEntry++; } - return username; + return result; } // **************************************************************** @@ -1972,11 +2553,10 @@ char* NTLocalGroup::NextUserName() // **************************************************************** NTLocalGroupList::NTLocalGroupList() { - entriesRead = 0; - totalEntries = 0; bufptr = NULL; currentEntry = 0; resumeHandle = 0; + resultBuf = NULL; } // **************************************************************** @@ -1989,6 +2569,7 @@ NTLocalGroupList::~NTLocalGroupList() NetApiBufferFree(bufptr); bufptr = NULL; } + quickFree(&resultBuf); } // **************************************************************** @@ -2027,15 +2608,19 @@ bool NTLocalGroupList::hasMore() // **************************************************************** char* NTLocalGroupList::nextLocalGroupName() { - char* localGroupName = NULL; - LOCALGROUP_INFO_1* thisEntry; + char* result = NULL; + unsigned long length; if(currentEntry < entriesRead) { - thisEntry = &(bufptr[currentEntry]); - localGroupName = UTF16ToUTF8(thisEntry->lgrpi1_name); + quickFree(&resultBuf); + UTF16ToUTF8(bufptr[currentEntry].lgrpi1_name, NULL, &length); + resultBuf = (char*)malloc(length); + UTF16ToUTF8(bufptr[currentEntry].lgrpi1_name, resultBuf, &length); + result = resultBuf; + currentEntry++; } - return localGroupName; + return result; } diff --git a/ldap/servers/ntds/netman/netman.h b/ldap/servers/ntds/netman/netman.h index 0771b16ac..2bbd820d6 100644 --- a/ldap/servers/ntds/netman/netman.h +++ b/ldap/servers/ntds/netman/netman.h @@ -63,13 +63,10 @@ public: NTUser(); ~NTUser(); - void NewUser(char* username); + int NewUser(char* username); int RetriveUserByAccountName(char* username); int RetriveUserBySIDHexStr(char* sidHexStr); - int StoreUser(); - int AddUser(); - int DeleteUser(char* username); - int ChangeUsername(char* oldUsername, char* newUsername); + int DeleteUser(); char* GetAccountName(); char* GetSIDHexStr(); @@ -118,7 +115,7 @@ public: char* NextLocalGroupName(); private: - USER_INFO_3* userInfo; + unsigned short* currentAccountName; GROUP_USERS_INFO_0* groupsInfo; DWORD currentGroupEntry; @@ -129,6 +126,8 @@ private: DWORD currentLocalGroupEntry; DWORD localGroupEntriesRead; DWORD localGroupEntriesTotal; + + char* resultBuf; }; // **************************************************************** @@ -150,6 +149,8 @@ private: DWORD totalEntries; DWORD resumeHandle; DWORD currentEntry; + + char* resultBuf; }; // **************************************************************** @@ -161,12 +162,10 @@ public: NTGroup(); ~NTGroup(); - void NewGroup(char* groupName); - int DeleteGroup(char* groupName); + int NewGroup(char* groupName); int RetriveGroupByAccountName(char* groupName); int RetriveGroupBySIDHexStr(char* sidHexStr); - int AddGroup(); - int StoreGroup(); + int DeleteGroup(); char* GetAccountName(); char* GetSIDHexStr(); @@ -180,13 +179,14 @@ public: char* NextUserName(); private: - GROUP_INFO_2* groupInfo; + unsigned short* currentAccountName; LOCALGROUP_USERS_INFO_0* usersInfo; DWORD currentUserEntry; DWORD userEntriesRead; DWORD userEntriesTotal; + char* resultBuf; }; // **************************************************************** @@ -208,6 +208,8 @@ private: DWORD totalEntries; DWORD resumeHandle; DWORD currentEntry; + + char* resultBuf; }; // **************************************************************** @@ -219,12 +221,10 @@ public: NTLocalGroup(); ~NTLocalGroup(); - void NewLocalGroup(char* localGroupName); - int DeleteLocalGroup(char* localGroupName); + int NewLocalGroup(char* localGroupName); int RetriveLocalGroupByAccountName(char* localGroupName); int RetriveLocalGroupBySIDHexStr(char* sidHexStr); - int AddLocalGroup(); - int StoreLocalGroup(); + int DeleteLocalGroup(); char* GetAccountName(); char* GetSIDHexStr(); @@ -238,12 +238,14 @@ public: char* NextUserName(); private: - LOCALGROUP_INFO_1* localGroupInfo; + unsigned short* currentAccountName; LOCALGROUP_MEMBERS_INFO_0* usersInfo; DWORD currentUserEntry; DWORD userEntriesRead; DWORD userEntriesTotal; + + char* resultBuf; }; // **************************************************************** @@ -265,4 +267,6 @@ private: DWORD totalEntries; DWORD resumeHandle; DWORD currentEntry; + + char* resultBuf; }; diff --git a/ldap/servers/ntds/wrapper/wrapper.conf b/ldap/servers/ntds/wrapper/wrapper.conf index 38fdbecb9..34d7f017e 100644 --- a/ldap/servers/ntds/wrapper/wrapper.conf +++ b/ldap/servers/ntds/wrapper/wrapper.conf @@ -63,10 +63,10 @@ wrapper.java.library.path.1=../lib #wrapper.java.additional.1= # Initial Java Heap Size (in MB) -#wrapper.java.initmemory=3 +wrapper.java.initmemory=4 # Maximum Java Heap Size (in MB) -#wrapper.java.maxmemory=64 +wrapper.java.maxmemory=256 # Application parameters. Add parameters as needed starting from 1 wrapper.app.parameter.1=org.apache.ldap.server.ServerMain
0
9015bff232df33f9f7230b4b020522f60bbc4da2
389ds/389-ds-base
Issue 4535 - lib389 - Fix log function in backends.py Description: Had a typo for the log function in a lint test that is breaking freeipa healthcheck Relates: https://github.com/389ds/389-ds-base/issues/4535 Reviewed by: mreynolds (one line commit rule)
commit 9015bff232df33f9f7230b4b020522f60bbc4da2 Author: Mark Reynolds <[email protected]> Date: Mon Jan 18 09:54:30 2021 -0500 Issue 4535 - lib389 - Fix log function in backends.py Description: Had a typo for the log function in a lint test that is breaking freeipa healthcheck Relates: https://github.com/389ds/389-ds-base/issues/4535 Reviewed by: mreynolds (one line commit rule) diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py index 10d097c76..e915c0b56 100644 --- a/src/lib389/lib389/backend.py +++ b/src/lib389/lib389/backend.py @@ -527,7 +527,7 @@ class Backend(DSLdapObject): yield report except: # Suffix is not replicated - self.log.debug(f"_lint_cl_trimming - backend ({suffix}) is not replicated") + self._log.debug(f"_lint_cl_trimming - backend ({suffix}) is not replicated") pass def create_sample_entries(self, version):
0
9f49d5494f278f07a0810053eac064f443c0c802
389ds/389-ds-base
Resolves: #230673 Summary: LDAPI: referral mode needs LDAPI socket Problem Description: If you start the server with the referral mode, e.g., like this: ns-slapd refer -D /etc/dirsrv/slapd-test -r ldap://laputa.example.com UNIX socket for LDAPI was not opened since LDAPI configuration parameters are not read from dse.ldif at that moment. Fix Description: adding the code to process nsslapd-ldapifilepath and nsslapd-ldapilisten in slapd_bootstrap_config.
commit 9f49d5494f278f07a0810053eac064f443c0c802 Author: Noriko Hosoi <[email protected]> Date: Wed May 14 18:39:31 2008 +0000 Resolves: #230673 Summary: LDAPI: referral mode needs LDAPI socket Problem Description: If you start the server with the referral mode, e.g., like this: ns-slapd refer -D /etc/dirsrv/slapd-test -r ldap://laputa.example.com UNIX socket for LDAPI was not opened since LDAPI configuration parameters are not read from dse.ldif at that moment. Fix Description: adding the code to process nsslapd-ldapifilepath and nsslapd-ldapilisten in slapd_bootstrap_config. diff --git a/ldap/servers/slapd/config.c b/ldap/servers/slapd/config.c index 757776995..fe62e9b12 100644 --- a/ldap/servers/slapd/config.c +++ b/ldap/servers/slapd/config.c @@ -375,7 +375,33 @@ slapd_bootstrap_config(const char *configdir) CONFIG_SASLPATH_ATTRIBUTE, errorbuf); } } +#if defined(ENABLE_LDAPI) + /* set the ldapi file path; needed in main */ + workpath[0] = '\0'; + if (entry_has_attr_and_value(e, CONFIG_LDAPI_FILENAME_ATTRIBUTE, + workpath, sizeof(workpath))) + { + if (config_set_ldapi_filename(CONFIG_LDAPI_FILENAME_ATTRIBUTE, + workpath, errorbuf, CONFIG_APPLY) != LDAP_SUCCESS) + { + LDAPDebug(LDAP_DEBUG_ANY, "%s: %s: %s. \n", configfile, + CONFIG_LDAPI_FILENAME_ATTRIBUTE, errorbuf); + } + } + /* set the ldapi switch; needed in main */ + workpath[0] = '\0'; + if (entry_has_attr_and_value(e, CONFIG_LDAPI_SWITCH_ATTRIBUTE, + workpath, sizeof(workpath))) + { + if (config_set_ldapi_switch(CONFIG_LDAPI_SWITCH_ATTRIBUTE, + workpath, errorbuf, CONFIG_APPLY) != LDAP_SUCCESS) + { + LDAPDebug(LDAP_DEBUG_ANY, "%s: %s: %s. \n", configfile, + CONFIG_LDAPI_SWITCH_ATTRIBUTE, errorbuf); + } + } +#endif /* see if the entry is a child of the plugin base dn */ if (slapi_sdn_isparent(&plug_dn, slapi_entry_get_sdn_const(e)))
0
76acff12a86110d4165f94e2cba13ef5c7ebc38a
389ds/389-ds-base
Ticket #47739 - directory server is insecurely misinterpreting authzid on a SASL/GSSAPI bind Description: SASL_CB_PROXY_POLICY callback is not needed since we don't support the case authid and authzid do not match. This patch gets rid of the callback function ids_sasl_proxy_policy. https://fedorahosted.org/389/ticket/47739 Reviewed by [email protected] (Thank you, Nathan!!)
commit 76acff12a86110d4165f94e2cba13ef5c7ebc38a Author: Noriko Hosoi <[email protected]> Date: Mon Mar 10 16:12:08 2014 -0700 Ticket #47739 - directory server is insecurely misinterpreting authzid on a SASL/GSSAPI bind Description: SASL_CB_PROXY_POLICY callback is not needed since we don't support the case authid and authzid do not match. This patch gets rid of the callback function ids_sasl_proxy_policy. https://fedorahosted.org/389/ticket/47739 Reviewed by [email protected] (Thank you, Nathan!!) diff --git a/ldap/servers/slapd/saslbind.c b/ldap/servers/slapd/saslbind.c index 22c0588ac..ba606eb96 100644 --- a/ldap/servers/slapd/saslbind.c +++ b/ldap/servers/slapd/saslbind.c @@ -229,34 +229,6 @@ static int ids_sasl_log( return SASL_OK; } -static int ids_sasl_proxy_policy( - sasl_conn_t *conn, - void *context, - const char *requested_user, int rlen, - const char *auth_identity, int alen, - const char *def_realm, int urlen, - struct propctx *propctx -) -{ - int retVal = SASL_OK; - /* do not permit sasl proxy authorization */ - /* if the auth_identity is null or empty string, allow the sasl request to go thru */ - if ( (auth_identity != NULL ) && ( strlen(auth_identity) > 0 ) ) { - Slapi_DN authId , reqUser; - slapi_sdn_init_dn_byref(&authId,auth_identity); - slapi_sdn_init_dn_byref(&reqUser,requested_user); - if (slapi_sdn_compare((const Slapi_DN *)&reqUser,(const Slapi_DN *) &authId) != 0) { - LDAPDebug(LDAP_DEBUG_TRACE, - "sasl proxy auth not permitted authid=%s user=%s\n", - auth_identity, requested_user, 0); - retVal = SASL_NOAUTHZ; - } - slapi_sdn_done(&authId); - slapi_sdn_done(&reqUser); - } - return retVal; -} - static void ids_sasl_user_search( char *basedn, int scope, @@ -568,11 +540,6 @@ static sasl_callback_t ids_sasl_callbacks[] = (IFP) ids_sasl_log, NULL }, - { - SASL_CB_PROXY_POLICY, - (IFP) ids_sasl_proxy_policy, - NULL - }, { SASL_CB_CANON_USER, (IFP) ids_sasl_canon_user,
0
f2b43ded61f6bccad6de70be3927567e4cd49e66
389ds/389-ds-base
Ticket 49275 - fix compiler warns for gcc 7 Bug Description: GCC 7 enables many more stricter warnings about code quality. Fix Description: Fix another set of shadow vars, case fall throughs and unused functions. https://pagure.io/389-ds-base/issue/49275 Author: wibrown Review by: mreynolds (Thanks!)
commit f2b43ded61f6bccad6de70be3927567e4cd49e66 Author: William Brown <[email protected]> Date: Thu Aug 10 12:10:24 2017 +1000 Ticket 49275 - fix compiler warns for gcc 7 Bug Description: GCC 7 enables many more stricter warnings about code quality. Fix Description: Fix another set of shadow vars, case fall throughs and unused functions. https://pagure.io/389-ds-base/issue/49275 Author: wibrown Review by: mreynolds (Thanks!) diff --git a/ldap/servers/plugins/chainingdb/cb_acl.c b/ldap/servers/plugins/chainingdb/cb_acl.c index 2714b4697..0378231f9 100644 --- a/ldap/servers/plugins/chainingdb/cb_acl.c +++ b/ldap/servers/plugins/chainingdb/cb_acl.c @@ -32,13 +32,8 @@ cb_set_acl_policy(Slapi_PBlock *pb) or if the associated backend is disabled */ noacl = !(cb->local_acl) || cb->associated_be_is_disabled; - /* These branches are identical. Can we remove the if condition? */ - if (noacl) { - slapi_pblock_set(pb, SLAPI_PLUGIN_DB_NO_ACL, &noacl); - } else { - /* Be very conservative about acl evaluation */ - slapi_pblock_set(pb, SLAPI_PLUGIN_DB_NO_ACL, &noacl); - } + /* Be very conservative about acl evaluation */ + slapi_pblock_set(pb, SLAPI_PLUGIN_DB_NO_ACL, &noacl); } int diff --git a/ldap/servers/plugins/syntaxes/string.c b/ldap/servers/plugins/syntaxes/string.c index 364276b7a..f50dc1301 100644 --- a/ldap/servers/plugins/syntaxes/string.c +++ b/ldap/servers/plugins/syntaxes/string.c @@ -26,7 +26,7 @@ static int string_filter_approx(struct berval *bvfilter, Slapi_Value **bvals, Slapi_Value **retVal); -static void substring_comp_keys(Slapi_Value ***ivals, int *nsubs, char *str, int lenstr, int prepost, int syntax, char *comp_buf, int *substrlens); +static void substring_comp_keys(Slapi_Value ***ivals, int *nsubs, char *str, int lenstring, int prepost, int syntax, char *comp_buf, int *substrlens); int string_filter_ava(struct berval *bvfilter, Slapi_Value **bvals, int syntax, int ftype, Slapi_Value **retVal) @@ -901,7 +901,7 @@ substring_comp_keys( Slapi_Value ***ivals, int *nsubs, char *str, - int lenstr, + int lenstring, int prepost, int syntax __attribute__((unused)), char *comp_buf, @@ -929,7 +929,7 @@ substring_comp_keys( } substrlen = substrlens[INDEX_SUBSTRMIDDLE]; - for (p = str; p < (str + lenstr - substrlen + 1); p++) { + for (p = str; p < (str + lenstring - substrlen + 1); p++) { for (i = 0; i < substrlen; i++) { comp_buf[i] = p[i]; } @@ -940,7 +940,7 @@ substring_comp_keys( if (prepost == '$') { substrlen = substrlens[INDEX_SUBSTREND]; - p = str + lenstr - substrlen + 1; + p = str + lenstring - substrlen + 1; for (i = 0; i < substrlen - 1; i++) { comp_buf[i] = p[i]; } diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c index 6d23cd58d..d43258d2e 100644 --- a/ldap/servers/slapd/back-ldbm/dblayer.c +++ b/ldap/servers/slapd/back-ldbm/dblayer.c @@ -4634,42 +4634,49 @@ db_strtoul(const char *str, int *err) for (p = (char *)str; p && *p && (*p == ' ' || *p == '\t'); p++) ; if ('-' == *p) { - if (err) + if (err) { *err = ERANGE; + } return val; } val = strtoul(str, &p, 10); if (errno != 0) { - if (err) + if (err) { *err = errno; + } return val; } switch (*p) { case 'g': case 'G': - multiplier *= 1024; + multiplier *= 1024 * 1024 * 1024; + break; case 'm': case 'M': - multiplier *= 1024; + multiplier *= 1024 * 1024; + break; case 'k': case 'K': multiplier *= 1024; p++; - if (*p == 'b' || *p == 'B') + if (*p == 'b' || *p == 'B') { p++; + } if (err) { /* extra chars? */ *err = (*p != '\0') ? EINVAL : 0; } break; case '\0': - if (err) + if (err) { *err = 0; + } break; default: - if (err) + if (err) { *err = EINVAL; + } return val; } diff --git a/ldap/servers/slapd/back-ldbm/sort.c b/ldap/servers/slapd/back-ldbm/sort.c index 45b1b9003..5b84d87f3 100644 --- a/ldap/servers/slapd/back-ldbm/sort.c +++ b/ldap/servers/slapd/back-ldbm/sort.c @@ -163,7 +163,6 @@ sort_candidates(backend *be, int lookthrough_limit, struct timespec *expire_time /* Iterate over the sort types */ for (this_s = s; this_s; this_s = this_s->next) { if (NULL == this_s->matchrule) { - int return_value = 0; return_value = attr_get_value_cmp_fn(&this_s->sattr, &(this_s->compare_fn)); if (return_value != 0) { slapi_log_err(SLAPI_LOG_TRACE, "sort_candidates", diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c index 915b2301c..afca37214 100644 --- a/ldap/servers/slapd/dn.c +++ b/ldap/servers/slapd/dn.c @@ -733,6 +733,7 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len) subtypestart = d; /* prepare for '+' in the nested DN, if any */ } subrdn_av_count = 0; + /* FALLTHRU */ case INVALUE: /* in value; cn=ABC */ /* ^ */ if (ISESCAPE(*s)) { @@ -958,6 +959,7 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len) subtypestart = d; /* prepare for '+' in the quoted value, if any */ } subrdn_av_count = 0; + /* FALLTHRU */ case INQUOTEDVALUE: if (ISQUOTE(*s)) { if (ISESCAPE(*(d - 1))) { /* the quote is escaped */ @@ -2433,10 +2435,10 @@ slapi_sdn_get_parent(const Slapi_DN *sdn, Slapi_DN *sdn_parent) void slapi_sdn_get_backend_parent_ext(const Slapi_DN *sdn, Slapi_DN *sdn_parent, - const Slapi_Backend *backend, + const Slapi_Backend *be, int is_tombstone) { - if (slapi_sdn_isempty(sdn) || slapi_be_issuffix(backend, sdn)) { + if (slapi_sdn_isempty(sdn) || slapi_be_issuffix(be, sdn)) { slapi_sdn_done(sdn_parent); } else { slapi_sdn_get_parent_ext(sdn, sdn_parent, is_tombstone); @@ -2444,9 +2446,9 @@ slapi_sdn_get_backend_parent_ext(const Slapi_DN *sdn, } void -slapi_sdn_get_backend_parent(const Slapi_DN *sdn, Slapi_DN *sdn_parent, const Slapi_Backend *backend) +slapi_sdn_get_backend_parent(const Slapi_DN *sdn, Slapi_DN *sdn_parent, const Slapi_Backend *be) { - slapi_sdn_get_backend_parent_ext(sdn, sdn_parent, backend, 0); + slapi_sdn_get_backend_parent_ext(sdn, sdn_parent, be, 0); } void diff --git a/ldap/servers/slapd/ldaputil.c b/ldap/servers/slapd/ldaputil.c index 6592103e7..fa9d276a3 100644 --- a/ldap/servers/slapd/ldaputil.c +++ b/ldap/servers/slapd/ldaputil.c @@ -1131,11 +1131,11 @@ slapi_ldap_bind( } } if (0 == myerrno) { - struct addrinfo *result = NULL; - gaierr = getaddrinfo(hostname, NULL, NULL, &result); + struct addrinfo *a_result = NULL; + gaierr = getaddrinfo(hostname, NULL, NULL, &a_result); myerrno = errno; - if (result) { - freeaddrinfo(result); + if (a_result) { + freeaddrinfo(a_result); } } @@ -2301,14 +2301,17 @@ mozldap_ldap_explode(const char *dn, const int notypes, const int nametype) plen = LDAP_UTF8LEN(p); break; case '"': - if (state == INQUOTE) + if (state == INQUOTE) { state = OUTQUOTE; - else + } else { state = INQUOTE; + } break; case '+': - if (nametype != LDAP_RDN) + if (nametype != LDAP_RDN) { break; + } + /* FALLTHRU */ case ';': case ',': case '\0': @@ -2334,8 +2337,9 @@ mozldap_ldap_explode(const char *dn, const int notypes, const int nametype) goteq = 0; ++count; if (rdns == NULL) { - if ((rdns = (char **)slapi_ch_malloc(8 * sizeof(char *))) == NULL) + if ((rdns = (char **)slapi_ch_malloc(8 * sizeof(char *))) == NULL) { return (NULL); + } } else if (count >= 8) { if ((rdns = (char **)slapi_ch_realloc( (char *)rdns, (count + 1) * @@ -2345,8 +2349,7 @@ mozldap_ldap_explode(const char *dn, const int notypes, const int nametype) rdns[count] = NULL; endquote = 0; if (notypes) { - for (q = rdnstart; - q < p && *q != '='; ++q) { + for (q = rdnstart; q < p && *q != '='; ++q) { ; } if (q < p) { /* *q == '=' */ @@ -2364,14 +2367,11 @@ mozldap_ldap_explode(const char *dn, const int notypes, const int nametype) } len = p - rdnstart; - if ((rdns[count - 1] = (char *)slapi_ch_calloc( - 1, len + 1)) != NULL) { - memcpy(rdns[count - 1], rdnstart, - len); + if ((rdns[count - 1] = (char *)slapi_ch_calloc(1, len + 1)) != NULL) { + memcpy(rdns[count - 1], rdnstart, len); if (!endquote) { /* trim trailing spaces */ - while (len > 0 && - (rdns[count - 1][len - 1] == ' ')) { + while (len > 0 && (rdns[count - 1][len - 1] == ' ')) { --len; } } @@ -2383,12 +2383,14 @@ mozldap_ldap_explode(const char *dn, const int notypes, const int nametype) * it should be. If we don't, then we will * never get past an "end quote." */ - if (endquote == 1) + if (endquote == 1) { p++; + } rdnstart = *p ? p + 1 : p; - while (ldap_utf8isspace(rdnstart)) + while (ldap_utf8isspace(rdnstart)) { ++rdnstart; + } } break; case '=': diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c index a361886b9..2302ae96b 100644 --- a/ldap/servers/slapd/result.c +++ b/ldap/servers/slapd/result.c @@ -1208,12 +1208,12 @@ send_all_attrs(Slapi_Entry *e, char **attrs, Slapi_Operation *op, Slapi_PBlock * int item_count = 0; int iter = 0; Slapi_DN *namespace_dn; - Slapi_Backend *backend = 0; + Slapi_Backend *be = 0; vattr_context *ctx; /* get the namespace dn */ - slapi_pblock_get(pb, SLAPI_BACKEND, (void *)&backend); - namespace_dn = (Slapi_DN *)slapi_be_getsuffix(backend, 0); + slapi_pblock_get(pb, SLAPI_BACKEND, (void *)&be); + namespace_dn = (Slapi_DN *)slapi_be_getsuffix(be, 0); /* Get the attribute value from the vattr service */ /* ctx will be freed by attr_context_ungrok() */ @@ -1345,7 +1345,7 @@ send_specific_attrs(Slapi_Entry *e, char **attrs, Slapi_Operation *op, Slapi_PBl int item_count = 0; int iter = 0; Slapi_DN *namespace_dn; - Slapi_Backend *backend = 0; + Slapi_Backend *be = 0; /* * Here we call the computed attribute code to see whether @@ -1366,8 +1366,8 @@ send_specific_attrs(Slapi_Entry *e, char **attrs, Slapi_Operation *op, Slapi_PBl } /* get the namespace dn */ - slapi_pblock_get(pb, SLAPI_BACKEND, (void *)&backend); - namespace_dn = (Slapi_DN *)slapi_be_getsuffix(backend, 0); + slapi_pblock_get(pb, SLAPI_BACKEND, (void *)&be); + namespace_dn = (Slapi_DN *)slapi_be_getsuffix(be, 0); /* Get the attribute value from the vattr service */ /* This call handles subtype, as well. diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c index 11d207d15..941d32c3c 100644 --- a/ldap/servers/slapd/ssl.c +++ b/ldap/servers/slapd/ssl.c @@ -2248,6 +2248,7 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) err, prerr, slapd_pr_strerror(prerr)); } #endif + break; /* Give the client a clear opportunity to send her certificate: */ case SLAPD_SSLCLIENTAUTH_REQUIRED: if ((err = SSL_OptionSet(pr_sock, SSL_REQUEST_CERTIFICATE, PR_TRUE)) < 0) { @@ -2256,6 +2257,7 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) "SSL_OptionSet(SSL_REQUEST_CERTIFICATE,PR_TRUE) %d " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", err, prerr, slapd_pr_strerror(prerr)); } + break; default: break; } diff --git a/ldap/servers/slapd/tools/ldclt/ldclt.h b/ldap/servers/slapd/tools/ldclt/ldclt.h index bea17cbb3..1cfae35d7 100644 --- a/ldap/servers/slapd/tools/ldclt/ldclt.h +++ b/ldap/servers/slapd/tools/ldclt/ldclt.h @@ -196,7 +196,7 @@ dd/mm/yy | Author | Comments #define NEGATIVE_MAX_ERROR_NB (LDAP_X_CONNECTING - 1) /* Mininum ldap err number */ #endif #define MAX_IGN_ERRORS 20 /* Max errors ignored */ -#define MAX_FILTER 512 /* Max filters length */ +#define MAX_FILTER 4096 /* Max filters length */ #define MAX_THREADS 1000 /* Max number of threads */ /*JLS 21-11-00*/ #define MAX_SLAVES 20 /* Max number of slaves */ diff --git a/ldap/servers/slapd/utf8.c b/ldap/servers/slapd/utf8.c index dac52191d..b0667c636 100644 --- a/ldap/servers/slapd/utf8.c +++ b/ldap/servers/slapd/utf8.c @@ -68,20 +68,30 @@ ldap_utf8next(char *s) switch (UTF8len[(*next >> 2) & 0x3F]) { case 0: /* erroneous: s points to the middle of a character. */ case 6: - if ((*++next & 0xC0) != 0x80) + if ((*++next & 0xC0) != 0x80) { break; + } + /* FALLTHRU */ case 5: - if ((*++next & 0xC0) != 0x80) + if ((*++next & 0xC0) != 0x80) { break; + } + /* FALLTHRU */ case 4: - if ((*++next & 0xC0) != 0x80) + if ((*++next & 0xC0) != 0x80) { break; + } + /* FALLTHRU */ case 3: - if ((*++next & 0xC0) != 0x80) + if ((*++next & 0xC0) != 0x80) { break; + } + /* FALLTHRU */ case 2: - if ((*++next & 0xC0) != 0x80) + if ((*++next & 0xC0) != 0x80) { break; + } + /* FALLTHRU */ case 1: ++next; } @@ -161,24 +171,34 @@ ldap_utf8copy(char *dst, const char *src) case 0: /* erroneous: s points to the middle of a character. */ case 6: *dst++ = *s++; - if ((*s & 0xC0) != 0x80) + if ((*s & 0xC0) != 0x80) { break; + } + /* FALLTHRU */ case 5: *dst++ = *s++; - if ((*s & 0xC0) != 0x80) + if ((*s & 0xC0) != 0x80) { break; + } + /* FALLTHRU */ case 4: *dst++ = *s++; - if ((*s & 0xC0) != 0x80) + if ((*s & 0xC0) != 0x80) { break; + } + /* FALLTHRU */ case 3: *dst++ = *s++; - if ((*s & 0xC0) != 0x80) + if ((*s & 0xC0) != 0x80) { break; + } + /* FALLTHRU */ case 2: *dst++ = *s++; - if ((*s & 0xC0) != 0x80) + if ((*s & 0xC0) != 0x80) { break; + } + /* FALLTHRU */ case 1: *dst = *s++; } diff --git a/src/libsds/test/test_sds_bpt.c b/src/libsds/test/test_sds_bpt.c index 8c2b09817..b45010981 100644 --- a/src/libsds/test/test_sds_bpt.c +++ b/src/libsds/test/test_sds_bpt.c @@ -162,6 +162,7 @@ test_9_insert_fill_and_split(void **state) } } +#ifdef SDS_DEBUG static void test_10_tamper_with_inst(void **state __attribute__((unused))) { @@ -183,7 +184,9 @@ test_10_tamper_with_inst(void **state __attribute__((unused))) /* If this reports unknown, it means we may not have freed some nodes. */ assert_int_equal(result, SDS_SUCCESS); } +#endif +#ifdef SDS_DEBUG static void test_11_tamper_with_node(void **state __attribute__((unused))) { @@ -198,14 +201,13 @@ test_11_tamper_with_node(void **state __attribute__((unused))) binst->root->keys[0] = (void *)1; -#ifdef SDS_DEBUG result = sds_bptree_verify(binst); assert_int_equal(result, SDS_CHECKSUM_FAILURE); -#endif result = sds_bptree_destroy(binst); assert_int_equal(result, SDS_SUCCESS); } +#endif static void test_12_insert_fill_split_and_grow(void **state)
0
b0e05806232b781eed3ff102485045a358d7659b
389ds/389-ds-base
Ticket 49736 - Hardening of active connection list Bug Description: In case of a bug in the management of the connection refcnt it can happen that there are several attempts to move a connection out of the active list. It triggers a crash because when derefencing c->c_prev. c_prev is never NULL on the active list Fix Description: The fix tests if the connection is already out of the active list. If such case, it just returns. A potential issue that is not addressed by this fix is: Thread A and Thread B are using 'c' but c->refcnt=1 (it should be 2) Thread A "closes" 'c', 'c' is move out of active list (free) because of refcnt=0 A new connection happens selecting the free connection 'c', moving it to the active list. Thread C is using 'c' from the new connection c->refcnt=1 Thread B "closes" 'c', 'c' is moved out of the active list. -> new operation coming on 'c' will not be detected -> Thread C will likely crash when sending result https://pagure.io/389-ds-base/issue/49736 Reviewed by: Mark Reynolds (thanks!) Platforms tested: F26 Flag Day: no Doc impact: no
commit b0e05806232b781eed3ff102485045a358d7659b Author: Thierry Bordaz <[email protected]> Date: Fri Jun 1 16:12:40 2018 +0200 Ticket 49736 - Hardening of active connection list Bug Description: In case of a bug in the management of the connection refcnt it can happen that there are several attempts to move a connection out of the active list. It triggers a crash because when derefencing c->c_prev. c_prev is never NULL on the active list Fix Description: The fix tests if the connection is already out of the active list. If such case, it just returns. A potential issue that is not addressed by this fix is: Thread A and Thread B are using 'c' but c->refcnt=1 (it should be 2) Thread A "closes" 'c', 'c' is move out of active list (free) because of refcnt=0 A new connection happens selecting the free connection 'c', moving it to the active list. Thread C is using 'c' from the new connection c->refcnt=1 Thread B "closes" 'c', 'c' is moved out of the active list. -> new operation coming on 'c' will not be detected -> Thread C will likely crash when sending result https://pagure.io/389-ds-base/issue/49736 Reviewed by: Mark Reynolds (thanks!) Platforms tested: F26 Flag Day: no Doc impact: no diff --git a/ldap/servers/slapd/conntable.c b/ldap/servers/slapd/conntable.c index b9baac117..d4f36d345 100644 --- a/ldap/servers/slapd/conntable.c +++ b/ldap/servers/slapd/conntable.c @@ -243,6 +243,27 @@ connection_table_move_connection_out_of_active_list(Connection_Table *ct, Connec int c_sd; /* for logging */ /* we always have previous element because list contains a dummy header */; PR_ASSERT(c->c_prev); + if (c->c_prev == NULL) { + /* c->c_prev is set when the connection is moved ON the active list + * So this connection is already OUT of the active list + * + * Not sure how to recover from here. + * Considering c->c_prev is NULL we can assume refcnt is now 0 + * and connection_cleanup was already called. + * If it is not the case, then consequences are: + * - Leak some memory (connext, unsent page result entries, various buffers) + * - hanging connection (fd not closed) + * A option would be to call connection_cleanup here. + * + * The logged message helps to know how frequently the problem exists + */ + slapi_log_err(SLAPI_LOG_CRIT, + "connection_table_move_connection_out_of_active_list", + "conn %d is already OUT of the active list (refcnt is %d)\n", + c->c_sd, c->c_refcnt); + + return 0; + } #ifdef FOR_DEBUGGING slapi_log_err(SLAPI_LOG_DEBUG, "connection_table_move_connection_out_of_active_list", "Moving connection out of active list\n");
0
2bee54eb9cba7fae1e2a93452d1adc1d9ca0a356
389ds/389-ds-base
Update rpm.mk for RUST tarballs
commit 2bee54eb9cba7fae1e2a93452d1adc1d9ca0a356 Author: Mark Reynolds <[email protected]> Date: Thu Jan 14 16:47:25 2021 -0500 Update rpm.mk for RUST tarballs diff --git a/rpm.mk b/rpm.mk index f08cf61d7..d06ed32e9 100644 --- a/rpm.mk +++ b/rpm.mk @@ -66,7 +66,7 @@ ifeq ($(COCKPIT_ON), 1) mkdir -p $(NODE_MODULES_TEST) touch -r src/cockpit/389-console/package.json $(NODE_MODULES_TEST) endif - tar cjf $(GIT_TAG).tar.bz2 --transform "s,^,$(GIT_TAG)/," $$(git ls-files) src/cockpit/389-console/cockpit_dist/ src/cockpit/389-console/node_modules + tar cjf $(GIT_TAG).tar.bz2 --transform "s,^,$(GIT_TAG)/," $$(git ls-files) vendor/ src/cockpit/389-console/cockpit_dist/ src/cockpit/389-console/node_modules ifeq ($(COCKPIT_ON), 1) cd src/cockpit/389-console; \ rm -rf node_modules; \ diff --git a/src/Cargo.lock b/src/Cargo.lock index 9db63482b..550545941 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -2,11 +2,13 @@ # It is not intended for manual editing. [[package]] name = "ahash" -version = "0.4.7" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" +checksum = "a75b7e6a93ecd6dbd2c225154d0fa7f86205574ecaa6c87429fb5f66ee677c44" dependencies = [ - "const-random", + "getrandom 0.2.1", + "lazy_static", + "version_check", ] [[package]] @@ -58,9 +60,9 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "byteorder" -version = "1.3.4" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" [[package]] name = "cbindgen" @@ -117,9 +119,9 @@ dependencies = [ [[package]] name = "concread" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14fe52c39ed4e846fb3e6ad4bfe46224ef24db64ff7c5f496d2501c88c270b14" +checksum = "1eef33d95d263ebf9423049e76de365a2788e42c2ea48fd5a9fdb98596ae09cb" dependencies = [ "ahash", "crossbeam", @@ -132,34 +134,18 @@ dependencies = [ ] [[package]] -name = "const-random" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f590d95d011aa80b063ffe3253422ed5aa462af4e9867d43ce8337562bac77c4" -dependencies = [ - "const-random-macro", - "proc-macro-hack", -] - -[[package]] -name = "const-random-macro" -version = "0.1.13" +name = "const_fn" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "615f6e27d000a2bffbc7f2f6a8669179378fa27ee4d0a509e985dfc0a7defb40" -dependencies = [ - "getrandom 0.2.1", - "lazy_static", - "proc-macro-hack", - "tiny-keccak", -] +checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" [[package]] name = "crossbeam" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e" +checksum = "fd01a6eb3daaafa260f6fc94c3a6c36390abc2080e38e3e34ced87393fb77d80" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "crossbeam-channel", "crossbeam-deque", "crossbeam-epoch", @@ -169,68 +155,60 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.4.4" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" +checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775" dependencies = [ + "cfg-if 1.0.0", "crossbeam-utils", - "maybe-uninit", ] [[package]] name = "crossbeam-deque" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" +checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" dependencies = [ + "cfg-if 1.0.0", "crossbeam-epoch", "crossbeam-utils", - "maybe-uninit", ] [[package]] name = "crossbeam-epoch" -version = "0.8.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" +checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d" dependencies = [ - "autocfg", - "cfg-if 0.1.10", + "cfg-if 1.0.0", + "const_fn", "crossbeam-utils", "lazy_static", - "maybe-uninit", "memoffset", "scopeguard", ] [[package]] name = "crossbeam-queue" -version = "0.2.3" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" +checksum = "0f6cb3c7f5b8e51bc3ebb73a2327ad4abdbd119dc13223f14f961d2f38486756" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "crossbeam-utils", - "maybe-uninit", ] [[package]] name = "crossbeam-utils" -version = "0.7.2" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" dependencies = [ "autocfg", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "lazy_static", ] -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - [[package]] name = "entryuuid" version = "0.1.0" @@ -299,7 +277,7 @@ checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.10.1+wasi-snapshot-preview1", ] [[package]] @@ -377,24 +355,18 @@ dependencies = [ [[package]] name = "log" -version = "0.4.11" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" dependencies = [ "cfg-if 0.1.10", ] -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - [[package]] name = "memoffset" -version = "0.5.6" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87" dependencies = [ "autocfg", ] @@ -522,7 +494,7 @@ dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall", + "redox_syscall 0.1.57", "smallvec", "winapi", ] @@ -597,11 +569,10 @@ dependencies = [ [[package]] name = "rand" -version = "0.7.3" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +checksum = "18519b42a40024d661e1714153e9ad0c3de27cd495760ceb09710920f1098b1e" dependencies = [ - "getrandom 0.1.16", "libc", "rand_chacha", "rand_core", @@ -610,9 +581,9 @@ dependencies = [ [[package]] name = "rand_chacha" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" dependencies = [ "ppv-lite86", "rand_core", @@ -620,18 +591,18 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5" dependencies = [ - "getrandom 0.1.16", + "getrandom 0.2.1", ] [[package]] name = "rand_hc" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" dependencies = [ "rand_core", ] @@ -642,6 +613,15 @@ version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" +[[package]] +name = "redox_syscall" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05ec8ca9416c5ea37062b502703cd7fcb207736bc294f6e0cf367ac6fc234570" +dependencies = [ + "bitflags", +] + [[package]] name = "remove_dir_all" version = "0.5.3" @@ -665,18 +645,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.118" +version = "1.0.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c64263859d87aa2eb554587e2d23183398d617427327cf2b3d0ed8c69e4800" +checksum = "9bdd36f49e35b61d49efd8aa7fc068fd295961fd2286d0b2ee9a4c7a14e99cc3" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.118" +version = "1.0.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c84d3526699cd55261af4b941e4e725444df67aa4f9e6a3564f18030d12672df" +checksum = "552954ce79a059ddd5fd68c271592374bd15cab2274970380c000118aeffe1cd" dependencies = [ "proc-macro2", "quote", @@ -712,9 +692,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a55ca5f3b68e41c979bf8c46a6f1da892ca4db8f94023ce0bd32407573b1ac0" +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" [[package]] name = "strsim" @@ -735,14 +715,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", "rand", - "redox_syscall", + "redox_syscall 0.2.4", "remove_dir_all", "winapi", ] @@ -756,15 +736,6 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - [[package]] name = "toml" version = "0.5.8" @@ -788,11 +759,11 @@ checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" [[package]] name = "uuid" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fde2f6a4bea1d6e007c4ad38c6839fa71cbb63b6dbf5b595aa38dc9b1093c11" +checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "rand", + "getrandom 0.2.1", ] [[package]] @@ -807,6 +778,12 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" +[[package]] +name = "version_check" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" + [[package]] name = "wasi" version = "0.9.0+wasi-snapshot-preview1" @@ -815,9 +792,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" +version = "0.10.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9" [[package]] name = "winapi"
0
b32761a5797fa5efb8ab65ae2d2e0c539357e73c
389ds/389-ds-base
Resolves: bug 458507 Bug Description: Memory leak setting new password storage scheme Reviewed by: nkinder (Thanks!) Branch: HEAD Fix Description: In config_set_pw_storagescheme, new_schema is allocated in both the non apply and the apply case, but it is only freed in the apply case. The solution is to free it in the non apply case. Platforms tested: RHEL5, Fedora 8 Flag Day: no Doc impact: no QA impact: should be covered by regular nightly and manual testing New Tests integrated into TET: none
commit b32761a5797fa5efb8ab65ae2d2e0c539357e73c Author: Rich Megginson <[email protected]> Date: Wed Aug 27 21:05:25 2008 +0000 Resolves: bug 458507 Bug Description: Memory leak setting new password storage scheme Reviewed by: nkinder (Thanks!) Branch: HEAD Fix Description: In config_set_pw_storagescheme, new_schema is allocated in both the non apply and the apply case, but it is only freed in the apply case. The solution is to free it in the non apply case. Platforms tested: RHEL5, Fedora 8 Flag Day: no Doc impact: no QA impact: should be covered by regular nightly and manual testing New Tests integrated into TET: none diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c index 21ffa4c24..eef97ecd1 100644 --- a/ldap/servers/slapd/libglobs.c +++ b/ldap/servers/slapd/libglobs.c @@ -1510,6 +1510,8 @@ config_set_pw_storagescheme( const char *attrname, char *value, char *errorbuf, slapdFrontendConfig->pw_storagescheme = new_scheme; CFG_UNLOCK_WRITE(slapdFrontendConfig); + } else { + free_pw_scheme(new_scheme); } slapi_ch_free_string(&scheme_list);
0
0d327240c853bb2c06dc9fdec39e1e649a363c18
389ds/389-ds-base
Bug 690882 - (cov#10636,10637) Useless comparison in attrcrypt The backend attribute encryption code makes a useless comparison when doing error checking. We are comparing an array with NULL, but an array will always be non-NULL, so we can just check if state_priv is NULL.
commit 0d327240c853bb2c06dc9fdec39e1e649a363c18 Author: Nathan Kinder <[email protected]> Date: Fri Mar 25 10:07:14 2011 -0700 Bug 690882 - (cov#10636,10637) Useless comparison in attrcrypt The backend attribute encryption code makes a useless comparison when doing error checking. We are comparing an array with NULL, but an array will always be non-NULL, so we can just check if state_priv is NULL. diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c b/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c index 9f22a5899..f700434a0 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c @@ -1116,7 +1116,7 @@ back_crypt_encrypt_value(void *handle, struct berval *in, struct berval **out) goto bail; } *out = NULL; - if (!state_priv || !state_priv->acs_array) { + if (!state_priv) { goto bail; } invalue = slapi_value_new_berval(in); @@ -1147,7 +1147,7 @@ back_crypt_decrypt_value(void *handle, struct berval *in, struct berval **out) goto bail; } *out = NULL; - if (!state_priv || !state_priv->acs_array) { + if (!state_priv) { goto bail; } invalue = slapi_value_new_berval(in);
0
788a48fa98bb33a6601d7a198aa63b46d12ad78d
389ds/389-ds-base
Ticket #47750 - Creating a glue fails if one above level is a conflict or missing Description: This commit accidentally removed the code to decrementing the reference count for the backend instance. Commit: 160cb3f686e433c01532d28770b2977ec957e73e Ticket #47750 - Creating a glue fails if one above level is a conflict or missing; Thanks to [email protected] for finding this out.
commit 788a48fa98bb33a6601d7a198aa63b46d12ad78d Author: Noriko Hosoi <[email protected]> Date: Mon Jun 30 11:27:15 2014 -0700 Ticket #47750 - Creating a glue fails if one above level is a conflict or missing Description: This commit accidentally removed the code to decrementing the reference count for the backend instance. Commit: 160cb3f686e433c01532d28770b2977ec957e73e Ticket #47750 - Creating a glue fails if one above level is a conflict or missing; Thanks to [email protected] for finding this out. diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c index 39fe70613..a365ce5ed 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c @@ -1385,7 +1385,9 @@ common_return: if (!delete_tombstone_entry) { plugin_call_plugins (pb, SLAPI_PLUGIN_BE_POST_DELETE_FN); } - + if (inst && inst->inst_ref_count) { + slapi_counter_decrement(inst->inst_ref_count); + } if (ruv_c_init) { modify_term(&ruv_c, be); }
0
4aafe7444d983c08b16a84b7c23c8d303de45dc6
389ds/389-ds-base
Ticket #47553 - Enhance ACIs to have more control over MODRDN operations Description: Macro SLAPI_ACL_ALL does not contain SLAPI_ACL_MODDN. Thus, even though all operations are allowed by "allow (all)", just modrdn fails with "Insufficient access (50)". https://fedorahosted.org/389/ticket/47553 Reviewed by [email protected] (Thank you, Thierry!!)
commit 4aafe7444d983c08b16a84b7c23c8d303de45dc6 Author: Noriko Hosoi <[email protected]> Date: Wed Oct 15 16:20:51 2014 -0700 Ticket #47553 - Enhance ACIs to have more control over MODRDN operations Description: Macro SLAPI_ACL_ALL does not contain SLAPI_ACL_MODDN. Thus, even though all operations are allowed by "allow (all)", just modrdn fails with "Insufficient access (50)". https://fedorahosted.org/389/ticket/47553 Reviewed by [email protected] (Thank you, Thierry!!) diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h index 268e46514..975ad04b1 100644 --- a/ldap/servers/slapd/slapi-plugin.h +++ b/ldap/servers/slapd/slapi-plugin.h @@ -237,12 +237,12 @@ NSPR_API(PRUint32) PR_fprintf(struct PRFileDesc* fd, const char *fmt, ...) #define SLAPI_ACL_ADD 0x20 #define SLAPI_ACL_SELF 0x40 #define SLAPI_ACL_PROXY 0x80 -#define SLAPI_ACL_ALL 0x7f /* Values 0x200 and 0x400 are booked (acl.h) by * ACLPB_SLAPI_ACL_WRITE_ADD * ACLPB_SLAPI_ACL_WRITE_DEL */ #define SLAPI_ACL_MODDN 0x0800 +#define SLAPI_ACL_ALL 0x087f /*
0
7272dbda7f43974eed003cbcfc0ddd57fe433687
389ds/389-ds-base
Ticket 47577 - crash when removing entries from cache Bug Description: when the dn of an entry in the cache was adjusted to the parent dn, for soenm time teh dn was not defined, anothe thread accessing the dn of teh chached entry could crash Fix Description: hold the cache mutex when modifyingthe dn of an entry in the cache https://fedorahosted.org/389/ticket/47577 Reviewed by: rmeggins
commit 7272dbda7f43974eed003cbcfc0ddd57fe433687 Author: Ludwig Krispenz <[email protected]> Date: Tue Nov 5 16:58:44 2013 +0100 Ticket 47577 - crash when removing entries from cache Bug Description: when the dn of an entry in the cache was adjusted to the parent dn, for soenm time teh dn was not defined, anothe thread accessing the dn of teh chached entry could crash Fix Description: hold the cache mutex when modifyingthe dn of an entry in the cache https://fedorahosted.org/389/ticket/47577 Reviewed by: rmeggins diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h index 10489e3c7..26e081c04 100644 --- a/ldap/servers/slapd/back-ldbm/back-ldbm.h +++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h @@ -409,6 +409,8 @@ struct cache { #define CACHE_ADD(cache, p, a) cache_add((cache), (void *)(p), (void **)(a)) #define CACHE_RETURN(cache, p) cache_return((cache), (void **)(p)) #define CACHE_REMOVE(cache, p) cache_remove((cache), (void *)(p)) +#define CACHE_LOCK(cache) cache_lock((cache)) +#define CACHE_UNLOCK(cache) cache_unlock((cache)) /* various modules keep private data inside the attrinfo structure */ typedef struct dblayer_private dblayer_private; diff --git a/ldap/servers/slapd/back-ldbm/cache.c b/ldap/servers/slapd/back-ldbm/cache.c index 5fa06fbfc..a73ae6aba 100644 --- a/ldap/servers/slapd/back-ldbm/cache.c +++ b/ldap/servers/slapd/back-ldbm/cache.c @@ -1472,6 +1472,14 @@ int cache_add_tentative(struct cache *cache, struct backentry *e, { return entrycache_add_int(cache, e, ENTRY_STATE_CREATING, alt); } +void cache_lock(struct cache *cache) +{ + PR_Lock(cache->c_mutex); +} +void cache_unlock(struct cache *cache) +{ + PR_Unlock(cache->c_mutex); +} /* locks an entry so that it can be modified (you should have gotten the * entry via cache_find_*). diff --git a/ldap/servers/slapd/back-ldbm/id2entry.c b/ldap/servers/slapd/back-ldbm/id2entry.c index 281bb1b66..e904ae312 100644 --- a/ldap/servers/slapd/back-ldbm/id2entry.c +++ b/ldap/servers/slapd/back-ldbm/id2entry.c @@ -167,10 +167,12 @@ id2entry_add_ext(backend *be, struct backentry *e, back_txn *txn, if (myparentdn && PL_strcmp(parentdn, myparentdn)) { Slapi_DN *sdn = slapi_entry_get_sdn(e->ep_entry); char *newdn = NULL; + CACHE_LOCK(&inst->inst_cache); slapi_sdn_done(sdn); newdn = slapi_ch_smprintf("%s,%s", myrdn, parentdn); slapi_sdn_init_dn_passin(sdn, newdn); slapi_sdn_get_ndn(sdn); /* to set ndn */ + CACHE_UNLOCK(&inst->inst_cache); } slapi_ch_free_string(&myparentdn); } diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h index 7a7b7ff3b..937974e59 100644 --- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h +++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h @@ -76,6 +76,8 @@ void cache_get_stats(struct cache *cache, PRUint64 *hits, PRUint64 *tries, void cache_debug_hash(struct cache *cache, char **out); int cache_remove(struct cache *cache, void *e); void cache_return(struct cache *cache, void **bep); +void cache_lock(struct cache *cache); +void cache_unlock(struct cache *cache); struct backentry *cache_find_dn(struct cache *cache, const char *dn, unsigned long ndnlen); struct backentry *cache_find_id(struct cache *cache, ID id); struct backentry *cache_find_uuid(struct cache *cache, const char *uuid);
0
a640ac21971fef404c690594c83a7f54c334fe90
389ds/389-ds-base
Ticket #369 - restore of replica ldif file on second master after deleting two records shows only 1 deletion Bug Description: If you take a "db2ldif -r" on a consumer and later restore it "ldif2db" any changes made on that consumer after the backup(db2ldif), will not be replayed back to the consumer after it has been restored(ldif2db). Fix Description: When we check if we can skip updates from the change log, check if the consumer csn is "newer" than its current max csn. If it is, then it needs to be replayed back to itself. https://fedorahosted.org/389/ticket/369 Reviewed by: Nathan & Rich (Thanks!)
commit a640ac21971fef404c690594c83a7f54c334fe90 Author: Mark Reynolds <[email protected]> Date: Wed May 30 11:06:55 2012 -0400 Ticket #369 - restore of replica ldif file on second master after deleting two records shows only 1 deletion Bug Description: If you take a "db2ldif -r" on a consumer and later restore it "ldif2db" any changes made on that consumer after the backup(db2ldif), will not be replayed back to the consumer after it has been restored(ldif2db). Fix Description: When we check if we can skip updates from the change log, check if the consumer csn is "newer" than its current max csn. If it is, then it needs to be replayed back to itself. https://fedorahosted.org/389/ticket/369 Reviewed by: Nathan & Rich (Thanks!) diff --git a/ldap/servers/plugins/replication/cl5_clcache.c b/ldap/servers/plugins/replication/cl5_clcache.c index 327cb6f0b..581683794 100644 --- a/ldap/servers/plugins/replication/cl5_clcache.c +++ b/ldap/servers/plugins/replication/cl5_clcache.c @@ -664,13 +664,24 @@ clcache_skip_change ( CLC_Buffer *buf ) rid = csn_get_replicaid ( buf->buf_current_csn ); /* - * Skip CSN that is originated from the consumer. + * Skip CSN that is originated from the consumer, + * unless the CSN is newer than the maxcsn. * If RID==65535, the CSN is originated from a * legacy consumer. In this case the supplier * and the consumer may have the same RID. */ - if (rid == buf->buf_consumer_rid && rid != MAX_REPLICA_ID) + if (rid == buf->buf_consumer_rid && rid != MAX_REPLICA_ID){ + CSN *cons_maxcsn = NULL; + + ruv_get_max_csn(buf->buf_consumer_ruv, &cons_maxcsn); + if ( csn_compare ( buf->buf_current_csn, cons_maxcsn) > 0 ) { + /* + * The consumer must have been "restored" and needs this newer update. + */ + skip = 0; + } break; + } /* Skip helper entry (ENTRY_COUNT, PURGE_RUV and so on) */ if ( cl5HelperEntry ( NULL, buf->buf_current_csn ) == PR_TRUE ) {
0
21b96f1a03b481c365c4a45adbfbd28b5f7709db
389ds/389-ds-base
Ticket 48335 - Add SASL support to lib389 Description: Added SASL(GSSAPI) support to lib389, also added support for doing TLS. Via William Brown, relaxed strict localhost lookup for GSSAPI Fixed pep8 errors in replica.py https://fedorahosted.org/389/ticket/48335 Reviewed by: nhosoi(Thanks!)
commit 21b96f1a03b481c365c4a45adbfbd28b5f7709db Author: Mark Reynolds <[email protected]> Date: Mon Nov 9 16:13:40 2015 -0500 Ticket 48335 - Add SASL support to lib389 Description: Added SASL(GSSAPI) support to lib389, also added support for doing TLS. Via William Brown, relaxed strict localhost lookup for GSSAPI Fixed pep8 errors in replica.py https://fedorahosted.org/389/ticket/48335 Reviewed by: nhosoi(Thanks!) diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py index 604758502..27db583a3 100644 --- a/src/lib389/lib389/__init__.py +++ b/src/lib389/lib389/__init__.py @@ -11,30 +11,25 @@ naming: filterstr, attrlist """ try: - from subprocess import Popen, PIPE, STDOUT + from subprocess import Popen, PIPE HASPOPEN = True except ImportError: import popen2 HASPOPEN = False -import io import sys import os import stat import pwd import grp import os.path -import base64 -import socket import ldif import re import ldap +import ldap.sasl import time -import operator import shutil -import datetime import logging -import decimal import glob import tarfile import subprocess @@ -43,16 +38,12 @@ import six.moves.urllib.request import six.moves.urllib.parse import six.moves.urllib.error import six -from ldapurl import LDAPUrl from ldap.ldapobject import SimpleLDAPObject -from ldap.cidict import cidict -from ldap import LDAPError # file in this package from lib389._constants import * from lib389.properties import * from lib389._entry import Entry -from lib389._replication import CSN, RUV from lib389._ldifconn import LDIFConn from lib389.tools import DirSrvTools from lib389.utils import ( @@ -494,7 +485,7 @@ class DirSrv(SimpleLDAPObject): (self.sslport or self.port))) - def openConnection(self): + def openConnection(self, saslmethod=None, certdir=None): # Open a new connection to our LDAP server server = DirSrv(verbose=False) args_instance[SER_HOST] = self.host @@ -502,7 +493,7 @@ class DirSrv(SimpleLDAPObject): args_instance[SER_SERVERID_PROP] = self.serverid args_standalone = args_instance.copy() server.allocate(args_standalone) - server.open() + server.open(saslmethod, certdir) return server @@ -900,7 +891,7 @@ class DirSrv(SimpleLDAPObject): self.state = DIRSRV_STATE_ALLOCATED - def open(self): + def open(self, saslmethod=None, certdir=None): ''' It opens a ldap bound connection to dirsrv so that online administrative tasks are possible. It binds with the binddn @@ -910,44 +901,65 @@ class DirSrv(SimpleLDAPObject): The state changes -> DIRSRV_STATE_ONLINE @param self - + @param saslmethod - None, or GSSAPI + @param certdir - Certificate directory for TLS @return None - @raise ValueError - if can not find the binddn to bind + @raise LDAPError ''' uri = self.toLDAPURL() - SimpleLDAPObject.__init__(self, uri) - # see if binddn is a dn or a uid that we need to lookup - if self.binddn and not is_a_dn(self.binddn): - self.simple_bind_s("", "") # anon - ent = self.getEntry(CFGSUFFIX, ldap.SCOPE_SUBTREE, - "(uid=%s)" % self.binddn, - ['uid']) - if ent: - self.binddn = ent.dn - else: - raise ValueError("Error: could not find %s under %s" % ( - self.binddn, CFGSUFFIX)) + if certdir: + """ + We have a certificate directory, so lets start up TLS negotiations + """ + try: + self.set_option(ldap.OPT_X_TLS_CACERTFILE, certdir) + self.start_tls_s() + except ldap.LDAPError as e: + log.fatal('TLS negotiation failed: %s' % str(e)) + raise e - needtls = False - while True: + if saslmethod and saslmethod.lower() == 'gssapi': + """ + Perform kerberos/gssapi authentication + """ try: - if needtls: - self.start_tls_s() - try: - self.simple_bind_s(self.binddn, self.bindpw) - except ldap.SERVER_DOWN as e: - # TODO add server info in exception - log.debug("Cannot connect to %r" % uri) - raise e - break - except ldap.CONFIDENTIALITY_REQUIRED: - needtls = True - self.__initPart2() + sasl_auth = ldap.sasl.gssapi("") + self.sasl_interactive_bind_s("", sasl_auth) + except ldap.LOCAL_ERROR as e: + # No Ticket - ultimately invalid credentials + log.debug("Error: No Ticket (%s)" % str(e)) + raise ldap.INVALID_CREDENTIALS + except ldap.LDAPError as e: + log.debug("SASL/GSSAPI Bind Failed: %s" % str(e)) + raise e + + elif saslmethod: + # Unknown or unsupported method + log.debug('Unsupported SASL method: %s' % saslmethod) + raise ldap.UNWILLING_TO_PERFORM + else: + """ + Do a simple bind + """ + try: + self.simple_bind_s(self.binddn, self.bindpw) + except ldap.SERVER_DOWN as e: + # TODO add server info in exception + log.debug("Cannot connect to %r" % uri) + raise e + except ldap.LDAPError as e: + log.debug("Error: Failed to authenticate: %s", str(e)) + raise e + + """ + Authenticated, now finish the initialization + """ + self.__initPart2() self.state = DIRSRV_STATE_ONLINE def close(self): diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py index 13990bbb0..dab410859 100644 --- a/src/lib389/lib389/replica.py +++ b/src/lib389/lib389/replica.py @@ -29,12 +29,14 @@ class Replica(object): def _get_mt_entry(self, suffix): """Return the replica dn of the given suffix.""" - mtent = self.conn.mappingtree.list(suffix=suffix) + mtent = self.conn.mappingtree.list(suffix=suffix)[0] return ','.join(("cn=replica", mtent.dn)) @staticmethod def _valid_role(role): - if role != REPLICAROLE_MASTER and role != REPLICAROLE_HUB and role != REPLICAROLE_CONSUMER: + if role != REPLICAROLE_MASTER and \ + role != REPLICAROLE_HUB and \ + role != REPLICAROLE_CONSUMER: return False else: return True @@ -42,7 +44,9 @@ class Replica(object): @staticmethod def _valid_rid(role, rid=None): if role == REPLICAROLE_MASTER: - if not decimal.Decimal(rid) or (rid <= 0) or (rid >= CONSUMER_REPLICAID): + if not decimal.Decimal(rid) or \ + (rid <= 0) or \ + (rid >= CONSUMER_REPLICAID): return False else: if rid and (rid != CONSUMER_REPLICAID): @@ -52,13 +56,16 @@ class Replica(object): @staticmethod def _set_or_default(attribute, properties, default): ''' - If 'attribute' or '+attribute' or '-attribute' exist in 'properties' - it does nothing. Else it set (ldap.MOD_REPLACE) 'attribute' in 'properties' + If 'attribute' or '+attribute' or '-attribute' exist in + 'properties' it does nothing. Else it set (ldap.MOD_REPLACE) + 'attribute' in 'properties' to the 'default' value ''' add_attribute = "+%s" % attribute del_attribute = "-%s" % attribute - if attribute in properties or add_attribute in properties or del_attribute in properties: + if attribute in properties or \ + add_attribute in properties or \ + del_attribute in properties: return properties[attribute] = default @@ -66,8 +73,10 @@ class Replica(object): ''' Create an entry that will be used to bind as replica manager. - @param repl_manager_dn - DN of the bind entry. If not provided use the default one - @param repl_manager_pw - Password of the entry. If not provide use the default one + @param repl_manager_dn - DN of the bind entry. If not provided use + the default one + @param repl_manager_pw - Password of the entry. If not provide use + the default one @return None @@ -76,42 +85,48 @@ class Replica(object): # check the DN and PW try: - repl_manager_dn = repl_manager_dn or defaultProperties[REPLICATION_BIND_DN] - repl_manager_pw = repl_manager_pw or defaultProperties[REPLICATION_BIND_PW] + repl_manager_dn = repl_manager_dn or \ + defaultProperties[REPLICATION_BIND_DN] + repl_manager_pw = repl_manager_pw or \ + defaultProperties[REPLICATION_BIND_PW] if not repl_manager_dn or not repl_manager_pw: raise KeyError except KeyError: if not repl_manager_pw: - self.log.warning("replica_createReplMgr: bind DN password not specified") + self.log.warning("replica_createReplMgr: bind DN password " + + "not specified") if not repl_manager_dn: - self.log.warning("replica_createReplMgr: bind DN not specified") + self.log.warning("replica_createReplMgr: bind DN not " + + "specified") raise # if the replication manager entry already exists, ust return try: - entries = self.search_s(repl_manager_dn, ldap.SCOPE_BASE, "objectclass=*") + entries = self.search_s(repl_manager_dn, ldap.SCOPE_BASE, + "objectclass=*") if entries: - #it already exist, fine + # it already exist, fine return except ldap.NO_SUCH_OBJECT: pass # ok it does not exist, create it try: - attrs = { - 'nsIdleTimeout': '0', - 'passwordExpirationTime': '20381010000000Z' - } + attrs = {'nsIdleTimeout': '0', + 'passwordExpirationTime': '20381010000000Z'} self.conn.setupBindDN(repl_manager_dn, repl_manager_pw, attrs) except ldap.ALREADY_EXISTS: - self.log.warn("User already exists (weird we just checked: %s " % repl_manager_dn) + self.log.warn("User already exists (weird we just checked: %s " % + repl_manager_dn) def list(self, suffix=None, replica_dn=None): """ Return a list of replica entries. If 'replica_dn' is specified it returns the related entry. - If 'suffix' is specified it returns the replica that is configured for that 'suffix'. - If both 'replica_dn' and 'suffix' are specified it returns the 'replica_dn' entry. + If 'suffix' is specified it returns the replica that is configured + for that 'suffix'. + If both 'replica_dn' and 'suffix' are specified it returns the + 'replica_dn' entry. If none of them are specified, it returns all the replicas @param suffix - suffix of a replica @@ -128,8 +143,9 @@ class Replica(object): filtr = "(objectclass=%s)" % REPLICA_OBJECTCLASS_VALUE elif suffix: base = DN_MAPPING_TREE - filtr = "(&(objectclass=%s)(%s=%s))" % (REPLICA_OBJECTCLASS_VALUE, - REPLICA_PROPNAME_TO_ATTRNAME[REPLICA_SUFFIX], suffix) + filtr = ("(&(objectclass=%s)(%s=%s))" % + (REPLICA_OBJECTCLASS_VALUE, + REPLICA_PROPNAME_TO_ATTRNAME[REPLICA_SUFFIX], suffix)) else: base = DN_MAPPING_TREE filtr = "(objectclass=%s)" % REPLICA_OBJECTCLASS_VALUE @@ -138,11 +154,13 @@ class Replica(object): ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr) return ents - def setProperties(self, suffix=None, replica_dn=None, replica_entry=None, properties=None): + def setProperties(self, suffix=None, replica_dn=None, replica_entry=None, + properties=None): ''' - Set the properties of the replica. If an 'replica_entry' (Entry) is provided, it updates the entry, else - it updates the entry on the server. If the 'replica_dn' is provided it retrieves the entry using it, else - it retrieve the replica using the 'suffix'. + Set the properties of the replica. If an 'replica_entry' (Entry) is + provided, it updates the entry, else it updates the entry on the + server. If the 'replica_dn' is provided it retrieves the entry + using it, else it retrieve the replica using the 'suffix'. @param suffix : suffix stored in that replica (online update) @param replica_dn: DN of the replica (online update) @@ -164,7 +182,8 @@ class Replica(object): @raise ValueError: if unknown properties ValueError: if invalid replica_entry - ValueError: if replica_dn or suffix are not associated to a replica + ValueError: if replica_dn or suffix are not associated to + a replica InvalidArgumentError: If missing mandatory parameter ''' @@ -179,11 +198,13 @@ class Replica(object): if not inProperties(prop, REPLICA_PROPNAME_TO_ATTRNAME): raise ValueError("unknown property: %s" % prop) else: - self.log.debug("setProperties: %s:%s" % (prop, properties[prop])) + self.log.debug("setProperties: %s:%s" % + (prop, properties[prop])) # At least we need to have suffix/replica_dn/replica_entry if not suffix and not replica_dn and not replica_entry: - raise InvalidArgumentError("suffix and replica_dn and replica_entry are missing") + raise InvalidArgumentError("suffix and replica_dn and replica_" + + "entry are missing") # the caller provides a set of properties to set into a replica entry if replica_entry: @@ -195,7 +216,8 @@ class Replica(object): val = rawProperty(prop) # for Entry update it is a replace - replica_entry.update({REPLICA_PROPNAME_TO_ATTRNAME[val]: properties[prop]}) + replica_entry.update({REPLICA_PROPNAME_TO_ATTRNAME[val]: + properties[prop]}) return @@ -220,12 +242,14 @@ class Replica(object): else: op = ldap.MOD_REPLACE - mods.append((op, REPLICA_PROPNAME_TO_ATTRNAME[val], properties[prop])) + mods.append((op, REPLICA_PROPNAME_TO_ATTRNAME[val], + properties[prop])) # that is fine now to apply the MOD self.conn.modify_s(ents[0].dn, mods) - def getProperties(self, suffix=None, replica_dn=None, replica_entry=None, properties=None): + def getProperties(self, suffix=None, replica_dn=None, replica_entry=None, + properties=None): raise NotImplementedError def create(self, suffix=None, role=None, rid=None, args=None): @@ -233,18 +257,21 @@ class Replica(object): Create a replica entry on an existing suffix. @param suffix - dn of suffix - @param role - REPLICAROLE_MASTER, REPLICAROLE_HUB or REPLICAROLE_CONSUMER - @param rid - number that identify the supplier replica (role=REPLICAROLE_MASTER) in the topology. - For hub/consumer (role=REPLICAROLE_HUB or REPLICAROLE_CONSUMER), rid value is not used. - This parameter is mandatory for supplier. + @param role - REPLICAROLE_MASTER, REPLICAROLE_HUB or + REPLICAROLE_CONSUMER + @param rid - number that identify the supplier replica + (role=REPLICAROLE_MASTER) in the topology. For + hub/consumer (role=REPLICAROLE_HUB or + REPLICAROLE_CONSUMER), rid value is not used. This + parameter is mandatory for supplier. @param args - dictionnary of initial replica's properties Supported properties are: REPLICA_SUFFIX REPLICA_ID REPLICA_TYPE - REPLICA_LEGACY_CONS ['off'] - REPLICA_BINDDN [defaultProperties[REPLICATION_BIND_DN]] + REPLICA_LEGACY_CONS ['off'] + REPLICA_BINDDN [defaultProperties[REPLICATION_BIND_DN]] REPLICA_PURGE_INTERVAL REPLICA_PURGE_DELAY REPLICA_PRECISE_PURGING @@ -259,16 +286,19 @@ class Replica(object): """ # Check validity of role if not role: - self.log.fatal("Replica.create: replica role not specify (REPLICAROLE_*)") + self.log.fatal("Replica.create: replica role not specify" + + " (REPLICAROLE_*)") raise InvalidArgumentError("role missing") if not Replica._valid_role(role): - self.log.fatal("enableReplication: replica role invalid (%s) " % role) + self.log.fatal("enableReplication: replica role invalid (%s) " % + role) raise ValueError("invalid role: %s" % role) # check the validity of 'rid' if not Replica._valid_rid(role, rid=rid): - self.log.fatal("Replica.create: replica role is master but 'rid' is missing or invalid value") + self.log.fatal("Replica.create: replica role is master but 'rid'" + + " is missing or invalid value") raise InvalidArgumentError("rid missing or invalid value") # check the validity of the suffix @@ -286,13 +316,14 @@ class Replica(object): # Set the properties provided as mandatory parameter # The attribute name is not prefixed '+'/'-' => ldap.MOD_REPLACE - properties = {REPLICA_SUFFIX: nsuffix, - REPLICA_ID: str(rid), - REPLICA_TYPE: str(rtype)} + properties = {REPLICA_SUFFIX: nsuffix, + REPLICA_ID: str(rid), + REPLICA_TYPE: str(rtype)} # If the properties in args are valid # add them to the 'properties' dictionary - # The attribute name may be prefixed '+'/'-' => keep MOD type as provided in args + # The attribute name may be prefixed '+'/'-' => keep MOD type as + # provided in args if args: for prop in args: if not inProperties(prop, REPLICA_PROPNAME_TO_ATTRNAME): @@ -301,7 +332,8 @@ class Replica(object): # Now set default values of unset properties Replica._set_or_default(REPLICA_LEGACY_CONS, properties, 'off') - Replica._set_or_default(REPLICA_BINDDN, properties, [defaultProperties[REPLICATION_BIND_DN]]) + Replica._set_or_default(REPLICA_BINDDN, properties, + [defaultProperties[REPLICATION_BIND_DN]]) if role != REPLICAROLE_CONSUMER: properties[REPLICA_FLAGS] = "1" @@ -314,7 +346,8 @@ class Replica(object): entry = self.conn.getEntry(dn_replica, ldap.SCOPE_BASE) self.log.warn("Already setup replica for suffix %r" % nsuffix) self.conn.suffixes.setdefault(nsuffix, {}) - self.conn.replica.setProperties(replica_dn=dn_replica, properties=properties) + self.conn.replica.setProperties(replica_dn=dn_replica, + properties=properties) return dn_replica except ldap.NO_SUCH_OBJECT: entry = None @@ -323,8 +356,10 @@ class Replica(object): # Now create the replica entry # entry = Entry(dn_replica) - entry.setValues("objectclass", "top", REPLICA_OBJECTCLASS_VALUE, "extensibleobject") - self.conn.replica.setProperties(replica_entry=entry, properties=properties) + entry.setValues("objectclass", "top", REPLICA_OBJECTCLASS_VALUE, + "extensibleobject") + self.conn.replica.setProperties(replica_entry=entry, + properties=properties) self.conn.add_s(entry) # check if the entry exists TODO better to raise! @@ -356,20 +391,22 @@ class Replica(object): try: self.conn.delete_s(agmt.dn) except ldap.LDAPError as e: - self.log.fatal('Failed to delete replica agreement (%s), error: %s' % - (admt.dn, str(e))) + self.log.fatal('Failed to delete replica agreement (%s),' + + ' error: %s' % + (admt.dn, str(e))) raise except ldap.LDAPError as e: - self.log.fatal('Failed to search for replication agreements under (%s), error: %s' % - (dn_replica, str(e))) + self.log.fatal('Failed to search for replication agreements ' + + 'under (%s), error: %s' % (dn_replica, str(e))) raise def disableReplication(self, suffix=None): ''' Delete a replica related to the provided suffix. - If this replica role was REPLICAROLE_HUB or REPLICAROLE_MASTER, it also deletes the changelog - associated to that replica. - If it exists some replication agreement below that replica, they are deleted. + If this replica role was REPLICAROLE_HUB or REPLICAROLE_MASTER, it + also deletes the changelog associated to that replica. If it + exists some replication agreement below that replica, they are + deleted. @param suffix - dn of suffix @return None @@ -401,16 +438,19 @@ class Replica(object): try: self.conn.delete_s(dn_replica) except ldap.LDAPError as e: - self.log.fatal('Failed to delete replica configuration (%s), error: %s' % (dn_replica, str(e))) + self.log.fatal('Failed to delete replica configuration ' + + '(%s), error: %s' % (dn_replica, str(e))) raise - def enableReplication(self, suffix=None, role=None, replicaId=CONSUMER_REPLICAID, binddn=None): + def enableReplication(self, suffix=None, role=None, + replicaId=CONSUMER_REPLICAID, binddn=None): if not suffix: self.log.fatal("enableReplication: suffix not specified") raise ValueError("suffix missing") if not role: - self.log.fatal("enableReplication: replica role not specify (REPLICAROLE_*)") + self.log.fatal("enableReplication: replica role not specify " + + "(REPLICAROLE_*)") raise ValueError("role missing") # @@ -418,47 +458,53 @@ class Replica(object): # # First role and replicaID - if role != REPLICAROLE_MASTER and role != REPLICAROLE_HUB and role != REPLICAROLE_CONSUMER: - self.log.fatal("enableReplication: replica role invalid (%s) " % role) + if role != REPLICAROLE_MASTER and \ + role != REPLICAROLE_HUB and \ + role != REPLICAROLE_CONSUMER: + self.log.fatal("enableReplication: replica role invalid (%s) " % + role) raise ValueError("invalid role: %s" % role) - # master - replica_type = REPLICA_RDWR_TYPE - else: - # hub or consumer - replica_type = REPLICA_RDONLY_TYPE - if role == REPLICAROLE_MASTER: # check the replicaId [1..CONSUMER_REPLICAID[ - if not decimal.Decimal(replicaId) or (replicaId <= 0) or (replicaId >= CONSUMER_REPLICAID): - self.log.fatal("enableReplication: invalid replicaId (%s) for a RW replica" % replicaId) - raise ValueError("invalid replicaId %d (expected [1..CONSUMER_REPLICAID[" % replicaId) + if not decimal.Decimal(replicaId) or \ + (replicaId <= 0) or \ + (replicaId >= CONSUMER_REPLICAID): + self.log.fatal("enableReplication: invalid replicaId (%s) " + + "for a RW replica" % replicaId) + raise ValueError("invalid replicaId %d (expected [1.." + + "CONSUMER_REPLICAID[" % replicaId) elif replicaId != CONSUMER_REPLICAID: # check the replicaId is CONSUMER_REPLICAID - self.log.fatal("enableReplication: invalid replicaId (%s) for a Read replica (expected %d)" % - (replicaId, CONSUMER_REPLICAID)) - raise ValueError("invalid replicaId: %d for HUB/CONSUMER replicaId is CONSUMER_REPLICAID" % replicaId) + self.log.fatal("enableReplication: invalid replicaId (%s) for a " + + "Read replica (expected %d)" % + (replicaId, CONSUMER_REPLICAID)) + raise ValueError("invalid replicaId: %d for HUB/CONSUMER " + + "replicaId is CONSUMER_REPLICAID" % replicaId) # Now check we have a suffix entries_backend = self.conn.backend.list(suffix=suffix) if not entries_backend: - self.log.fatal("enableReplication: enable to retrieve the backend for %s" % suffix) + self.log.fatal("enableReplication: enable to retrieve the " + + "backend for %s" % suffix) raise ValueError("no backend for suffix %s" % suffix) ent = entries_backend[0] if normalizeDN(suffix) != normalizeDN(ent.getValue('nsslapd-suffix')): - self.log.warning("enableReplication: suffix (%s) and backend suffix (%s) differs" % - (suffix, entries_backend[0].nsslapd - suffix)) + self.log.warning("enableReplication: suffix (%s) and backend " + + "suffix (%s) differs" % + (suffix, entries_backend[0].nsslapd - suffix)) pass # Now prepare the bindDN property if not binddn: binddn = defaultProperties.get(REPLICATION_BIND_DN, None) if not binddn: - # weird, internal error we do not retrieve the default replication bind DN - # this replica will not be updatable through replication until the binddn - # property will be set - self.log.warning("enableReplication: binddn not provided and default value unavailable") + # weird, internal error we do not retrieve the default + # replication bind DN this replica will not be updatable + # through replication until the binddn property will be set + self.log.warning("enableReplication: binddn not provided and" + + " default value unavailable") pass # Now do the effectif job @@ -467,28 +513,35 @@ class Replica(object): self.conn.changelog.create() # Second create the default replica manager entry if it does not exist - # it should not be called from here but for the moment I am unsure when to create it elsewhere + # it should not be called from here but for the moment I am unsure when + # to create it elsewhere self.conn.replica.create_repl_manager() # then enable replication properties = {REPLICA_BINDDN: [binddn]} - ret = self.conn.replica.create(suffix=suffix, role=role, rid=replicaId, args=properties) + ret = self.conn.replica.create(suffix=suffix, role=role, rid=replicaId, + args=properties) return ret def check_init(self, agmtdn): - """returns tuple - first element is done/not done, 2nd is no error/has error + """returns tuple - first element is done/not done, 2nd is no error/has + error @param agmtdn - the agreement dn """ done, hasError = False, 0 - attrlist = ['cn', 'nsds5BeginReplicaRefresh', 'nsds5replicaUpdateInProgress', - 'nsds5ReplicaLastInitStatus', 'nsds5ReplicaLastInitStart', + attrlist = ['cn', + 'nsds5BeginReplicaRefresh', + 'nsds5replicaUpdateInProgress', + 'nsds5ReplicaLastInitStatus', + 'nsds5ReplicaLastInitStart', 'nsds5ReplicaLastInitEnd'] try: entry = self.conn.getEntry( agmtdn, ldap.SCOPE_BASE, "(objectclass=*)", attrlist) except NoSuchEntryError: - self.log.exception("Error reading status from agreement %r" % agmtdn) + self.log.exception("Error reading status from agreement %r" % + agmtdn) hasError = 1 else: refresh = entry.nsds5BeginReplicaRefresh @@ -505,7 +558,8 @@ class Replica(object): print("Update succeeded: status ", status) done = True elif inprogress.lower() == 'true': - print("Update in progress yet not in progress: status ", status) + print("Update in progress yet not in progress: status ", + status) else: print("Update failed: status", status) hasError = 1 @@ -548,7 +602,8 @@ class Replica(object): @param agmtdn - """ self.log.info("Setting agreement for continuous replication") - raise NotImplementedError("Check nsds5replicaupdateschedule before writing!") + raise NotImplementedError("Check nsds5replicaupdateschedule before " + + "writing!") def ruv(self, suffix, tryrepl=False): """return a replica update vector for the given suffix. @@ -557,17 +612,20 @@ class Replica(object): @raises NoSuchEntryError if missing """ - filt = "(&(nsUniqueID=%s)(objectclass=%s))" % (REPLICA_RUV_UUID, REPLICA_OC_TOMBSTONE) + filt = "(&(nsUniqueID=%s)(objectclass=%s))" % (REPLICA_RUV_UUID, + REPLICA_OC_TOMBSTONE) attrs = ['nsds50ruv', 'nsruvReplicaLastModified'] ents = self.conn.search_s(suffix, ldap.SCOPE_SUBTREE, filt, attrs) ent = None if ents and (len(ents) > 0): ent = ents[0] elif tryrepl: - self.log.warn("Could not get RUV from %r entry - trying cn=replica" % suffix) + self.log.warn("Could not get RUV from %r entry -" + + " trying cn=replica" % suffix) ensuffix = escapeDNValue(normalizeDN(suffix)) dn = ','.join(("cn=replica", "cn=%s" % ensuffix, DN_MAPPING_TREE)) - ents = self.conn.search_s(dn, ldap.SCOPE_BASE, "objectclass=*", attrs) + ents = self.conn.search_s(dn, ldap.SCOPE_BASE, "objectclass=*", + attrs) if ents and (len(ents) > 0): ent = ents[0] @@ -575,6 +633,3 @@ class Replica(object): return RUV(ent) raise NoSuchEntryError("RUV not found: suffix: %r" % suffix) - - - diff --git a/src/lib389/lib389/tools.py b/src/lib389/lib389/tools.py index af72a61f3..7d6c9fdae 100644 --- a/src/lib389/lib389/tools.py +++ b/src/lib389/lib389/tools.py @@ -884,7 +884,8 @@ class DirSrvTools(object): line = hostfp.readline() if line.find(loopbackIpPattern) >= 0: words = line.split() - assert(words[1] == expectedHost) + # We just want to make sure it's in there somewhere. + assert(expectedHost in words) done = True except AssertionError: raise AssertionError("Error: /etc/hosts should contains 'localhost.localdomain' as first host for %s" %
0
252edd216b7a12175ae6f9f2aab538748db4d1a0
389ds/389-ds-base
Issue 93 - Fix test cases in ctl_dbtasks_test.py Description: There's a typo in ctl_dbtasks_test.py making 2 test cases to fail. https://pagure.io/lib389/issue/93 Author: Ilias95 Review by: spichugi, wibrown (Thanks!)
commit 252edd216b7a12175ae6f9f2aab538748db4d1a0 Author: Ilias Stamatis <[email protected]> Date: Mon Aug 14 19:35:33 2017 +0300 Issue 93 - Fix test cases in ctl_dbtasks_test.py Description: There's a typo in ctl_dbtasks_test.py making 2 test cases to fail. https://pagure.io/lib389/issue/93 Author: Ilias95 Review by: spichugi, wibrown (Thanks!) diff --git a/src/lib389/lib389/tests/cli/ctl_dbtasks_test.py b/src/lib389/lib389/tests/cli/ctl_dbtasks_test.py index f8abb5303..f2c31e51b 100644 --- a/src/lib389/lib389/tests/cli/ctl_dbtasks_test.py +++ b/src/lib389/lib389/tests/cli/ctl_dbtasks_test.py @@ -39,7 +39,7 @@ def test_ldif2db_db2ldif_no_repl(topology_be_latest): args = FakeArgs() args.backend = 'userRoot' args.ldif = os.path.join(standalone.get_ldif_dir(), "test.ldif") - args.encrypt = False + args.encrypted = False args.replication = False # Stop the instance dbtasks_db2ldif(standalone, topology_be_latest.logcap.log, args) @@ -57,7 +57,7 @@ def test_ldif2db_db2ldif_repl(topology_be_latest): args = FakeArgs() args.backend = 'userRoot' args.ldif = os.path.join(standalone.get_ldif_dir(), "test.ldif") - args.encrypt = False + args.encrypted = False args.replication = False args.archive = os.path.join(standalone.get_ldif_dir(), "test.ldif") # Stop the instance
0
3963b020232c93c6d67c5759a445bab1dda42fa0
389ds/389-ds-base
Issue: 50686 - Port fractional replication test cases from TET to python3 final Bug Description: Port fractional replication test cases from TET to python3 final Fixes: https://pagure.io/389-ds-base/issue/50686 Author: aborah Reviewed by: Viktor Ashirov
commit 3963b020232c93c6d67c5759a445bab1dda42fa0 Author: Anuj Borah <[email protected]> Date: Wed Feb 12 17:38:30 2020 +0530 Issue: 50686 - Port fractional replication test cases from TET to python3 final Bug Description: Port fractional replication test cases from TET to python3 final Fixes: https://pagure.io/389-ds-base/issue/50686 Author: aborah Reviewed by: Viktor Ashirov diff --git a/dirsrvtests/tests/suites/fractional/fractional_test.py b/dirsrvtests/tests/suites/fractional/fractional_test.py index 32dee16fe..0d118e91a 100644 --- a/dirsrvtests/tests/suites/fractional/fractional_test.py +++ b/dirsrvtests/tests/suites/fractional/fractional_test.py @@ -17,6 +17,9 @@ from lib389._constants import DEFAULT_SUFFIX from lib389.idm.user import UserAccounts, UserAccount from lib389.replica import ReplicationManager from lib389.agreement import Agreements +from lib389.plugins import MemberOfPlugin +from lib389.idm.group import Groups +from lib389.config import Config import ldap pytestmark = pytest.mark.tier1 @@ -262,6 +265,140 @@ def test_fewer_changes_in_single_operation(_create_entries): assert UserAccount(ints, user.dn).get_attr_val_utf8('sn') == 'Oak' [email protected](scope="function") +def _add_user_clean(request): + # Enabling memberOf plugin and then adding few groups with member attributes. + MemberOfPlugin(MASTER1).enable() + for instance in (MASTER1, MASTER2): + instance.restart() + user1 = UserAccounts(MASTER1, DEFAULT_SUFFIX).create_test_user() + for attribute, value in [("displayName", "Anuj Borah"), + ("givenName", "aborah"), + ("telephoneNumber", "+1 555 999 333"), + ("roomnumber", "123"), + ("manager", f'uid=dsmith,ou=People,{DEFAULT_SUFFIX}')]: + user1.set(attribute, value) + grp = Groups(MASTER1, DEFAULT_SUFFIX).create(properties={ + "cn": "bug739172_01group", + "member": f'uid=test_user_1000,ou=People,{DEFAULT_SUFFIX}' + }) + + def final_call(): + """ + Removes User and Group after the test. + """ + user1.delete() + grp.delete() + request.addfinalizer(final_call) + + [email protected] +def test_newly_added_attribute_nsds5replicatedattributelisttotal(_create_entries, _add_user_clean): + """This test case is to test the newly added attribute nsds5replicatedattributelistTotal. + + :id: 2df5971c-38eb-11ea-9e8e-8c16451d917b + :setup: Master and Consumer + :steps: + 1. Enabling memberOf plugin and then adding few groups with member attributes. + 2. No memberOf plugin enabled on read only replicas + 3. The attributes mentioned in the nsds5replicatedattributelist + excluded from incremental updates. + :expected results: + 1. Success + 2. Success + 3. Success + """ + check_all_replicated() + user = f'uid=test_user_1000,ou=People,{DEFAULT_SUFFIX}' + for instance in (MASTER1, MASTER2, CONSUMER1, CONSUMER2): + assert Groups(instance, DEFAULT_SUFFIX).list()[0].get_attr_val_utf8("member") == user + assert UserAccount(instance, user).get_attr_val_utf8("sn") == "test_user_1000" + # The attributes mentioned in the nsds5replicatedattributelist + # excluded from incremental updates. + for instance in (CONSUMER1, CONSUMER2): + for value in ("roomnumber", "manager", "telephoneNumber"): + assert not UserAccount(instance, user).get_attr_val_utf8(value) + + [email protected] +def test_attribute_nsds5replicatedattributelisttotal(_create_entries, _add_user_clean): + """This test case is to test the newly added attribute nsds5replicatedattributelistTotal. + + :id: 35de9ff0-38eb-11ea-b420-8c16451d917b + :setup: Master and Consumer + :steps: + 1. Add a new entry to MASTER1. + 2. Enabling memberOf plugin and then adding few groups with member attributes. + 3. No memberOf plugin enabled in other consumers,ie., the read only replicas + won't get incremental updates for the attributes mentioned in the list. + 4. Run total update and verify the same attributes added/modified in the read-only replicas. + :expected results: + 1. Success + 2. Success + 3. Success + 4. Success + """ + # Run total update and verify the same attributes added/modified in the read-only replicas. + user = f'uid=test_user_1000,ou=People,{DEFAULT_SUFFIX}' + for agreement in Agreements(MASTER1).list(): + agreement.begin_reinit() + agreement.wait_reinit() + check_all_replicated() + for instance in (MASTER1, MASTER2): + assert Groups(MASTER1, DEFAULT_SUFFIX).list()[0].get_attr_val_utf8("member") == user + assert UserAccount(instance, user).get_attr_val_utf8("sn") == "test_user_1000" + for instance in (CONSUMER1, CONSUMER2): + for value in ("memberOf", "manager", "sn"): + assert UserAccount(instance, user).get_attr_val_utf8(value) + + [email protected] +def test_implicit_replication_of_password_policy(_create_entries): + """For bug 800173, we want to cause the implicit replication of password policy + attributes due to failed bind operations + we want to make sure that replication still works despite + the policy attributes being removed from the update leaving an empty + modify operation + + :id: 3f4affe8-38eb-11ea-8936-8c16451d917b + :setup: Master and Consumer + :steps: + 1. Add a new entry to MASTER1. + 2. Try binding user with correct password + 3. Try binding user with incorrect password (twice) + 4. Make sure user got locked + 5. Run total update and verify the same attributes added/modified in the read-only replicas. + :expected results: + 1. Success + 2. Success + 3. FAIL(ldap.INVALID_CREDENTIALS) + 4. Success + 5. Success + """ + for attribute, value in [("passwordlockout", "on"), + ("passwordmaxfailure", "1")]: + Config(MASTER1).set(attribute, value) + user = UserAccounts(MASTER1, DEFAULT_SUFFIX).create_test_user() + user.set("userpassword", "ItsmeAnuj") + check_all_replicated() + assert UserAccount(MASTER2, user.dn).get_attr_val_utf8("uid") == "test_user_1000" + # Try binding user with correct password + conn = UserAccount(MASTER2, user.dn).bind("ItsmeAnuj") + with pytest.raises(ldap.INVALID_CREDENTIALS): + UserAccount(MASTER1, user.dn).bind("badpass") + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + UserAccount(MASTER1, user.dn).bind("badpass") + # asserting user got locked + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + conn = UserAccount(MASTER1, user.dn).bind("ItsmeAnuj") + check_all_replicated() + # modify user and verify that replication is still working + user.replace("seealso", "cn=seealso") + check_all_replicated() + for instance in (MASTER1, MASTER2): + assert UserAccount(instance, user.dn).get_attr_val_utf8("seealso") == "cn=seealso" + + if __name__ == '__main__': CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE)
0
46e28cb4229f590c225f2a52bc8169e6fcc2d65b
389ds/389-ds-base
Issue 50041 - Add CLI functionality for special plugins Description: Add the functionality for account-policy, attr-uniq, automember, dna, linked-attr, managed-entries, memberof, pass-through-auth, refer-init, retro-changelog, root-dn, usn commands. Make DSLdapObject create an entry with only DN and attributes (cases when RDN is not specified). Fix two small typos in pwpolicy CLI's arguments. Port test for DNA plugin. https://pagure.io/389-ds-base/issue/50041 Reviewed by: wibrown, mreynolds, mhonek (Thanks!)
commit 46e28cb4229f590c225f2a52bc8169e6fcc2d65b Author: Simon Pichugin <[email protected]> Date: Mon Feb 18 22:45:01 2019 +0100 Issue 50041 - Add CLI functionality for special plugins Description: Add the functionality for account-policy, attr-uniq, automember, dna, linked-attr, managed-entries, memberof, pass-through-auth, refer-init, retro-changelog, root-dn, usn commands. Make DSLdapObject create an entry with only DN and attributes (cases when RDN is not specified). Fix two small typos in pwpolicy CLI's arguments. Port test for DNA plugin. https://pagure.io/389-ds-base/issue/50041 Reviewed by: wibrown, mreynolds, mhonek (Thanks!) diff --git a/dirsrvtests/tests/suites/plugins/dna_test.py b/dirsrvtests/tests/suites/plugins/dna_test.py new file mode 100644 index 000000000..341804825 --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/dna_test.py @@ -0,0 +1,84 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +"""Test DNA plugin functionality""" + +import logging +import pytest +from lib389._constants import DEFAULT_SUFFIX +from lib389.plugins import DNAPlugin, DNAPluginSharedConfigs, DNAPluginConfigs +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.user import UserAccounts +from lib389.topologies import topology_st +import ldap + +log = logging.getLogger(__name__) + + [email protected] +def test_dnatype_only_valid(topology_st): + """Test that DNA plugin only accepts valid attributes for "dnaType" + + :id: 0878ecff-5fdc-47d7-8c8f-edf4556f9746 + :setup: Standalone Instance + :steps: + 1. Create a use entry + 2. Create DNA shared config entry container + 3. Create DNA shared config entry + 4. Add DNA plugin config entry + 5. Enable DNA plugin + 6. Restart the instance + 7. Replace dnaType with invalid value + :expectedresults: + 1. Successful + 2. Successful + 3. Successful + 4. Successful + 5. Successful + 6. Successful + 7. Unwilling to perform exception should be raised + """ + + inst = topology_st.standalone + plugin = DNAPlugin(inst) + + log.info("Creating an entry...") + users = UserAccounts(inst, DEFAULT_SUFFIX) + users.create_test_user(uid=1) + + log.info("Creating \"ou=ranges\"...") + ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) + ou_ranges = ous.create(properties={'ou': 'ranges'}) + ou_people = ous.get("People") + + log.info("Creating DNA shared config entry...") + shared_configs = DNAPluginSharedConfigs(inst, ou_ranges.dn) + shared_configs.create(properties={'dnaHostName': str(inst.host), + 'dnaPortNum': str(inst.port), + 'dnaRemainingValues': '9501'}) + + log.info("Add dna plugin config entry...") + configs = DNAPluginConfigs(inst, plugin.dn) + config = configs.create(properties={'cn': 'dna config', + 'dnaType': 'description', + 'dnaMaxValue': '10000', + 'dnaMagicRegen': '0', + 'dnaFilter': '(objectclass=top)', + 'dnaScope': ou_people.dn, + 'dnaNextValue': '500', + 'dnaSharedCfgDN': ou_ranges.dn}) + + log.info("Enable the DNA plugin...") + plugin.enable() + + log.info("Restarting the server...") + inst.restart() + + log.info("Apply an invalid attribute to the DNA config(dnaType: foo)...") + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + config.replace('dnaType', 'foo') diff --git a/dirsrvtests/tests/tickets/ticket47937_test.py b/dirsrvtests/tests/tickets/ticket47937_test.py deleted file mode 100644 index 0a4c18db8..000000000 --- a/dirsrvtests/tests/tickets/ticket47937_test.py +++ /dev/null @@ -1,122 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2016 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import logging -import time - -import ldap -import pytest -from lib389 import Entry -from lib389._constants import * -from lib389.topologies import topology_st - -log = logging.getLogger(__name__) - - -def test_ticket47937(topology_st): - """ - Test that DNA plugin only accepts valid attributes for "dnaType" - """ - - log.info("Creating \"ou=people\"...") - try: - topology_st.standalone.add_s(Entry(('ou=people,' + SUFFIX, { - 'objectclass': 'top organizationalunit'.split(), - 'ou': 'people' - }))) - - except ldap.ALREADY_EXISTS: - pass - except ldap.LDAPError as e: - log.error('Failed to add ou=people org unit: error ' + e.args[0]['desc']) - assert False - - log.info("Creating \"ou=ranges\"...") - try: - topology_st.standalone.add_s(Entry(('ou=ranges,' + SUFFIX, { - 'objectclass': 'top organizationalunit'.split(), - 'ou': 'ranges' - }))) - - except ldap.LDAPError as e: - log.error('Failed to add ou=ranges org unit: error ' + e.args[0]['desc']) - assert False - - log.info("Creating \"cn=entry\"...") - try: - topology_st.standalone.add_s(Entry(('cn=entry,ou=people,' + SUFFIX, { - 'objectclass': 'top groupofuniquenames'.split(), - 'cn': 'entry' - }))) - - except ldap.LDAPError as e: - log.error('Failed to add test entry: error ' + e.args[0]['desc']) - assert False - - log.info("Creating DNA shared config entry...") - try: - topology_st.standalone.add_s(Entry(('dnaHostname=localhost.localdomain+dnaPortNum=389,ou=ranges,%s' % SUFFIX, { - 'objectclass': 'top dnaSharedConfig'.split(), - 'dnaHostname': 'localhost.localdomain', - 'dnaPortNum': '389', - 'dnaSecurePortNum': '636', - 'dnaRemainingValues': '9501' - }))) - - except ldap.LDAPError as e: - log.error('Failed to add shared config entry: error ' + e.args[0]['desc']) - assert False - - log.info("Add dna plugin config entry...") - try: - topology_st.standalone.add_s( - Entry(('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config', { - 'objectclass': 'top dnaPluginConfig'.split(), - 'dnaType': 'description', - 'dnaMaxValue': '10000', - 'dnaMagicRegen': '0', - 'dnaFilter': '(objectclass=top)', - 'dnaScope': 'ou=people,%s' % SUFFIX, - 'dnaNextValue': '500', - 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX - }))) - - except ldap.LDAPError as e: - log.error('Failed to add DNA config entry: error ' + e.args[0]['desc']) - assert False - - log.info("Enable the DNA plugin...") - try: - topology_st.standalone.plugins.enable(name=PLUGIN_DNA) - except e: - log.error("Failed to enable DNA Plugin: error " + e.args[0]['desc']) - assert False - - log.info("Restarting the server...") - topology_st.standalone.stop(timeout=120) - time.sleep(1) - topology_st.standalone.start(timeout=120) - time.sleep(3) - - log.info("Apply an invalid attribute to the DNA config(dnaType: foo)...") - - try: - topology_st.standalone.modify_s('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config', - [(ldap.MOD_REPLACE, 'dnaType', b'foo')]) - except ldap.LDAPError as e: - log.info('Operation failed as expected (error: %s)' % e.args[0]['desc']) - else: - log.error('Operation incorectly succeeded! Test Failed!') - assert False - - -if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) - pytest.main("-s %s" % CURRENT_FILE) diff --git a/src/cockpit/389-console/src/lib/plugins/accountPolicy.jsx b/src/cockpit/389-console/src/lib/plugins/accountPolicy.jsx index 90ff50149..fae8652e1 100644 --- a/src/cockpit/389-console/src/lib/plugins/accountPolicy.jsx +++ b/src/cockpit/389-console/src/lib/plugins/accountPolicy.jsx @@ -13,7 +13,7 @@ class AccountPolicy extends React.Component { serverId={this.props.serverId} cn="Account Policy Plugin" pluginName="Account Policy" - cmdName="accountpolicy" + cmdName="account-policy" savePluginHandler={this.props.savePluginHandler} pluginListHandler={this.props.pluginListHandler} addNotification={this.props.addNotification} diff --git a/src/cockpit/389-console/src/lib/plugins/attributeUniqueness.jsx b/src/cockpit/389-console/src/lib/plugins/attributeUniqueness.jsx index 3d708de7d..0521a8990 100644 --- a/src/cockpit/389-console/src/lib/plugins/attributeUniqueness.jsx +++ b/src/cockpit/389-console/src/lib/plugins/attributeUniqueness.jsx @@ -13,7 +13,7 @@ class AttributeUniqueness extends React.Component { serverId={this.props.serverId} cn="attribute uniqueness" pluginName="Attribute Uniqueness" - cmdName="attruniq" + cmdName="attr-uniq" savePluginHandler={this.props.savePluginHandler} pluginListHandler={this.props.pluginListHandler} addNotification={this.props.addNotification} diff --git a/src/cockpit/389-console/src/lib/plugins/linkedAttributes.jsx b/src/cockpit/389-console/src/lib/plugins/linkedAttributes.jsx index 5982fcc79..5216b1548 100644 --- a/src/cockpit/389-console/src/lib/plugins/linkedAttributes.jsx +++ b/src/cockpit/389-console/src/lib/plugins/linkedAttributes.jsx @@ -13,7 +13,7 @@ class LinkedAttributes extends React.Component { serverId={this.props.serverId} cn="Linked Attributes" pluginName="Linked Attributes" - cmdName="linkedattr" + cmdName="linked-attr" savePluginHandler={this.props.savePluginHandler} pluginListHandler={this.props.pluginListHandler} addNotification={this.props.addNotification} diff --git a/src/cockpit/389-console/src/lib/plugins/managedEntries.jsx b/src/cockpit/389-console/src/lib/plugins/managedEntries.jsx index 4bd565743..11771b735 100644 --- a/src/cockpit/389-console/src/lib/plugins/managedEntries.jsx +++ b/src/cockpit/389-console/src/lib/plugins/managedEntries.jsx @@ -13,7 +13,7 @@ class ManagedEntries extends React.Component { serverId={this.props.serverId} cn="Managed Entries" pluginName="Managed Entries" - cmdName="managedentries" + cmdName="managed-entries" savePluginHandler={this.props.savePluginHandler} pluginListHandler={this.props.pluginListHandler} addNotification={this.props.addNotification} diff --git a/src/cockpit/389-console/src/lib/plugins/memberOf.jsx b/src/cockpit/389-console/src/lib/plugins/memberOf.jsx index 51ecd5998..d8380543f 100644 --- a/src/cockpit/389-console/src/lib/plugins/memberOf.jsx +++ b/src/cockpit/389-console/src/lib/plugins/memberOf.jsx @@ -369,7 +369,7 @@ class MemberOf extends React.Component { } editConfig() { - this.cmdOperation("edit"); + this.cmdOperation("set"); } handleCheckboxChange(e) { @@ -473,7 +473,7 @@ class MemberOf extends React.Component { "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", "plugin", "memberof", - "edit", + "set", "--scope", memberOfEntryScope || "delete", "--exclude", diff --git a/src/cockpit/389-console/src/lib/plugins/passthroughAuthentication.jsx b/src/cockpit/389-console/src/lib/plugins/passthroughAuthentication.jsx index dfa08c7c3..5b6f76c06 100644 --- a/src/cockpit/389-console/src/lib/plugins/passthroughAuthentication.jsx +++ b/src/cockpit/389-console/src/lib/plugins/passthroughAuthentication.jsx @@ -13,7 +13,7 @@ class PassthroughAuthentication extends React.Component { serverId={this.props.serverId} cn="Pass Through Authentication" pluginName="Pass Through Authentication" - cmdName="passthroughauth" + cmdName="pass-through-auth" savePluginHandler={this.props.savePluginHandler} pluginListHandler={this.props.pluginListHandler} addNotification={this.props.addNotification} diff --git a/src/cockpit/389-console/src/lib/plugins/referentialIntegrity.jsx b/src/cockpit/389-console/src/lib/plugins/referentialIntegrity.jsx index 20e97ff96..96e8464fa 100644 --- a/src/cockpit/389-console/src/lib/plugins/referentialIntegrity.jsx +++ b/src/cockpit/389-console/src/lib/plugins/referentialIntegrity.jsx @@ -13,7 +13,7 @@ class ReferentialIntegrity extends React.Component { serverId={this.props.serverId} cn="referential integrity postoperation" pluginName="Referential Integrity" - cmdName="referint" + cmdName="referential-integrity" savePluginHandler={this.props.savePluginHandler} pluginListHandler={this.props.pluginListHandler} addNotification={this.props.addNotification} diff --git a/src/cockpit/389-console/src/lib/plugins/retroChangelog.jsx b/src/cockpit/389-console/src/lib/plugins/retroChangelog.jsx index 51d7bb4f4..4e3490b1a 100644 --- a/src/cockpit/389-console/src/lib/plugins/retroChangelog.jsx +++ b/src/cockpit/389-console/src/lib/plugins/retroChangelog.jsx @@ -13,7 +13,7 @@ class RetroChangelog extends React.Component { serverId={this.props.serverId} cn="Retro Changelog Plugin" pluginName="Retro Changelog" - cmdName="retrochangelog" + cmdName="retro-changelog" savePluginHandler={this.props.savePluginHandler} pluginListHandler={this.props.pluginListHandler} addNotification={this.props.addNotification} diff --git a/src/cockpit/389-console/src/lib/plugins/rootDNAccessControl.jsx b/src/cockpit/389-console/src/lib/plugins/rootDNAccessControl.jsx index 27c1d37ac..3e4d82021 100644 --- a/src/cockpit/389-console/src/lib/plugins/rootDNAccessControl.jsx +++ b/src/cockpit/389-console/src/lib/plugins/rootDNAccessControl.jsx @@ -13,7 +13,7 @@ class RootDNAccessControl extends React.Component { serverId={this.props.serverId} cn="RootDN Access Control" pluginName="RootDN Access Control" - cmdName="rootdn" + cmdName="root-dn" savePluginHandler={this.props.savePluginHandler} pluginListHandler={this.props.pluginListHandler} addNotification={this.props.addNotification} diff --git a/src/cockpit/389-console/src/plugins.jsx b/src/cockpit/389-console/src/plugins.jsx index d2b693243..5481d1ac1 100644 --- a/src/cockpit/389-console/src/plugins.jsx +++ b/src/cockpit/389-console/src/plugins.jsx @@ -196,7 +196,7 @@ export class Plugins extends React.Component { "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", "plugin", - "edit", + "set", data.name, "--type", data.type || "delete", diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py index f79b3443b..f35b19e1f 100644 --- a/src/lib389/lib389/_mapped_object.py +++ b/src/lib389/lib389/_mapped_object.py @@ -705,7 +705,11 @@ class DSLdapObject(DSLogging): if self._must_attributes is not None: for attr in self._must_attributes: if properties.get(attr, None) is None: - raise ldap.UNWILLING_TO_PERFORM('Attribute %s must not be None' % attr) + # Put RDN to properties + if attr == self._rdn_attribute and rdn is not None: + properties[self._rdn_attribute] = ldap.dn.str2dn(rdn)[0][0][1] + else: + raise ldap.UNWILLING_TO_PERFORM('Attribute %s must not be None' % attr) # Make sure the naming attribute is present if properties.get(self._rdn_attribute, None) is None and rdn is None: diff --git a/src/lib389/lib389/cli_conf/__init__.py b/src/lib389/lib389/cli_conf/__init__.py index 836e05d33..0ceb0664b 100644 --- a/src/lib389/lib389/cli_conf/__init__.py +++ b/src/lib389/lib389/cli_conf/__init__.py @@ -1,5 +1,5 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). @@ -23,19 +23,30 @@ def _args_to_attrs(args, arg_to_attr): return attrs -def generic_object_add(dsldap_object, log, args, arg_to_attr, props={}): - """Create an entry using DSLdapObject interface +def generic_object_add(dsldap_objects_class, inst, log, args, arg_to_attr, dn=None, basedn=None, props={}): + """Create an entry using DSLdapObjects interface - dsldap_object should be a single instance of DSLdapObject with a set dn + dsldap_objects should be a class inherited from the DSLdapObjects class """ log = log.getChild('generic_object_add') # Gather the attributes attrs = _args_to_attrs(args, arg_to_attr) - # Update the parameters (which should have at least 'cn') with arg attributes props.update({attr: value for (attr, value) in attrs.items() if value != ""}) - new_object = dsldap_object.create(properties=props) + + # Get RDN attribute and Base DN from the DN if Base DN is not specified + if dn is not None and basedn is None: + dn_parts = ldap.dn.explode_dn(dn) + + rdn = dn_parts[0] + basedn = ",".join(dn_parts[1:]) + else: + raise ValueError('If Base DN is not specified - DN parameter should be') + + new_object = dsldap_objects_class(inst, dn=dn) + new_object.create(rdn=rdn, basedn=basedn, properties=props) log.info("Successfully created the %s", new_object.dn) + return new_object def generic_object_edit(dsldap_object, log, args, arg_to_attr): diff --git a/src/lib389/lib389/cli_conf/plugin.py b/src/lib389/lib389/cli_conf/plugin.py index 5cc7c8c8c..9509b84dd 100644 --- a/src/lib389/lib389/cli_conf/plugin.py +++ b/src/lib389/lib389/cli_conf/plugin.py @@ -17,7 +17,6 @@ from lib389.cli_conf import generic_object_edit from lib389.cli_conf.plugins import memberof as cli_memberof from lib389.cli_conf.plugins import usn as cli_usn from lib389.cli_conf.plugins import rootdn_ac as cli_rootdn_ac -from lib389.cli_conf.plugins import whoami as cli_whoami from lib389.cli_conf.plugins import referint as cli_referint from lib389.cli_conf.plugins import accountpolicy as cli_accountpolicy from lib389.cli_conf.plugins import attruniq as cli_attruniq @@ -42,7 +41,8 @@ arg_to_attr = { 'vendor': 'nsslapd-pluginVendor', 'description': 'nsslapd-pluginDescription', 'depends_on_type': 'nsslapd-plugin-depends-on-type', - 'depends_on_named': 'nsslapd-plugin-depends-on-named' + 'depends_on_named': 'nsslapd-plugin-depends-on-named', + 'precedence': 'nsslapd-pluginPrecedence' } @@ -111,16 +111,15 @@ def create_parser(subparsers): cli_managedentries.create_parser(subcommands) cli_passthroughauth.create_parser(subcommands) cli_retrochangelog.create_parser(subcommands) - cli_whoami.create_parser(subcommands) list_parser = subcommands.add_parser('list', help="List current configured (enabled and disabled) plugins") list_parser.set_defaults(func=plugin_list) - get_parser = subcommands.add_parser('get', help='Get the plugin data') + get_parser = subcommands.add_parser('show', help='Show the plugin data') get_parser.set_defaults(func=plugin_get) get_parser.add_argument('selector', nargs='?', help='The plugin to search for') - edit_parser = subcommands.add_parser('edit', help='Edit the plugin') + edit_parser = subcommands.add_parser('set', help='Edit the plugin') edit_parser.set_defaults(func=plugin_edit) edit_parser.add_argument('selector', nargs='?', help='The plugin to edit') edit_parser.add_argument('--type', help='The type of plugin.') @@ -138,3 +137,4 @@ def create_parser(subparsers): edit_parser.add_argument('--depends-on-named', help='The plug-in name matching one of the following values will be ' 'started by the server prior to this plug-in') + edit_parser.add_argument('--precedence', help='The priority it has in the execution order of plug-ins') diff --git a/src/lib389/lib389/cli_conf/plugins/accountpolicy.py b/src/lib389/lib389/cli_conf/plugins/accountpolicy.py index d33e054f9..e2fa1e186 100644 --- a/src/lib389/lib389/cli_conf/plugins/accountpolicy.py +++ b/src/lib389/lib389/cli_conf/plugins/accountpolicy.py @@ -1,16 +1,118 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- -from lib389.plugins import AccountPolicyPlugin -from lib389.cli_conf import add_generic_plugin_parsers +import ldap +from lib389.plugins import AccountPolicyPlugin, AccountPolicyConfigs, AccountPolicyConfig +from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add + +arg_to_attr = { + 'config_entry': 'nsslapd-pluginConfigArea' +} + +arg_to_attr_config = { + 'alt_state_attr': 'altstateattrname', + 'always_record_login': 'alwaysRecordLogin', + 'always_record_login_attr': 'alwaysRecordLoginAttr', + 'limit_attr': 'limitattrname', + 'spec_attr': 'specattrname', + 'state_attr': 'stateattrname' +} + +def accountpolicy_edit(inst, basedn, log, args): + log = log.getChild('accountpolicy_edit') + plugin = AccountPolicyPlugin(inst) + generic_object_edit(plugin, log, args, arg_to_attr) + + +def accountpolicy_add_config(inst, basedn, log, args): + log = log.getChild('accountpolicy_add_config') + targetdn = args.DN + config = generic_object_add(AccountPolicyConfig, inst, log, args, arg_to_attr_config, dn=targetdn) + plugin = AccountPolicyPlugin(inst) + plugin.replace('nsslapd_pluginConfigArea', config.dn) + log.info('Account Policy attribute nsslapd-pluginConfigArea (config_entry) ' + 'was set in the main plugin config') + + +def accountpolicy_edit_config(inst, basedn, log, args): + log = log.getChild('accountpolicy_edit_config') + targetdn = args.DN + config = AccountPolicyConfig(inst, targetdn) + generic_object_edit(config, log, args, arg_to_attr_config) + + +def accountpolicy_show_config(inst, basedn, log, args): + log = log.getChild('accountpolicy_show_config') + targetdn = args.DN + config = AccountPolicyConfig(inst, targetdn) + + if not config.exists(): + raise ldap.NO_SUCH_OBJECT("Entry %s doesn't exists" % targetdn) + if args and args.json: + o_str = config.get_all_attrs_json() + print(o_str) + else: + print(config.display()) + + +def accountpolicy_del_config(inst, basedn, log, args): + log = log.getChild('accountpolicy_del_config') + targetdn = args.DN + config = AccountPolicyConfig(inst, targetdn) + config.delete() + log.info("Successfully deleted the %s", targetdn) + + +def _add_parser_args(parser): + parser.add_argument('--always-record-login', choices=['yes', 'no'], + help='Sets that every entry records its last login time (alwaysRecordLogin)') + parser.add_argument('--alt-state-attr', + help='Provides a backup attribute for the server to reference ' + 'to evaluate the expiration time (altStateAttrName)') + parser.add_argument('--always-record-login-attr', + help='Specifies the attribute to store the time of the last successful ' + 'login in this attribute in the users directory entry (alwaysRecordLoginAttr)') + parser.add_argument('--limit-attr', + help='Specifies the attribute within the policy to use ' + 'for the account inactivation limit (limitAttrName)') + parser.add_argument('--spec-attr', + help='Specifies the attribute to identify which entries ' + 'are account policy configuration entries (specAttrName)') + parser.add_argument('--state-attr', + help='Specifies the primary time attribute used to evaluate an account policy (stateAttrName)') def create_parser(subparsers): - accountpolicy_parser = subparsers.add_parser('accountpolicy', help='Manage and configure Account Policy plugin') - subcommands = accountpolicy_parser.add_subparsers(help='action') + accountpolicy = subparsers.add_parser('account-policy', help='Manage and configure Account Policy plugin') + subcommands = accountpolicy.add_subparsers(help='action') add_generic_plugin_parsers(subcommands, AccountPolicyPlugin) + + edit = subcommands.add_parser('set', help='Edit the plugin') + edit.set_defaults(func=accountpolicy_edit) + edit.add_argument('--config-entry', help='The value to set as nsslapd-pluginConfigArea') + + config = subcommands.add_parser('config-entry', help='Manage the config entry') + config_subcommands = config.add_subparsers(help='action') + + add_config = config_subcommands.add_parser('add', help='Add the config entry') + add_config.set_defaults(func=accountpolicy_add_config) + add_config.add_argument('DN', help='The config entry full DN') + _add_parser_args(add_config) + + edit_config = config_subcommands.add_parser('set', help='Edit the config entry') + edit_config.set_defaults(func=accountpolicy_edit_config) + edit_config.add_argument('DN', help='The config entry full DN') + _add_parser_args(edit_config) + + show_config_parser = config_subcommands.add_parser('show', help='Display the config entry') + show_config_parser.set_defaults(func=accountpolicy_show_config) + show_config_parser.add_argument('DN', help='The config entry full DN') + + del_config_parser = config_subcommands.add_parser('delete', help='Delete the config entry') + del_config_parser.set_defaults(func=accountpolicy_del_config) + del_config_parser.add_argument('DN', help='The config entry full DN') diff --git a/src/lib389/lib389/cli_conf/plugins/attruniq.py b/src/lib389/lib389/cli_conf/plugins/attruniq.py index 4c04b05bc..17dac1516 100644 --- a/src/lib389/lib389/cli_conf/plugins/attruniq.py +++ b/src/lib389/lib389/cli_conf/plugins/attruniq.py @@ -1,16 +1,122 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- -from lib389.plugins import AttributeUniquenessPlugin -from lib389.cli_conf import add_generic_plugin_parsers +import json +import ldap +from lib389.plugins import AttributeUniquenessPlugin, AttributeUniquenessPlugins +from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add +from lib389._constants import DN_PLUGIN + +arg_to_attr = { + 'attr-name': 'uniqueness-attribute-name', + 'subtree': 'uniqueness-subtrees', + 'across-all-subtrees': 'uniqueness-across-all-subtrees', + 'top-entry-oc': 'uniqueness-top-entry-oc', + 'subtree-entries-oc': 'uniqueness-subtree-entries-oc' +} + + +def attruniq_list(inst, basedn, log, args): + log = log.getChild('attruniq_list') + plugins = AttributeUniquenessPlugins(inst) + result = [] + result_json = [] + for plugin in plugins.list(): + if args.json: + result_json.append(plugin.get_all_attrs_json()) + else: + result.append(plugin.rdn) + if args.json: + print(json.dumps({"type": "list", "items": result_json})) + else: + if len(result) > 0: + for i in result: + print(i) + else: + print("No Attribute Uniqueness plugin instances") + + +def attruniq_add(inst, basedn, log, args): + log = log.getChild('attruniq_add') + props = {'cn': args.NAME} + generic_object_add(AttributeUniquenessPlugin, inst, log, args, arg_to_attr, basedn=DN_PLUGIN, props=props) + + +def attruniq_edit(inst, basedn, log, args): + log = log.getChild('attruniq_edit') + plugins = AttributeUniquenessPlugins(inst) + plugin = plugins.get(args.NAME) + generic_object_edit(plugin, log, args, arg_to_attr) + + +def attruniq_show(inst, basedn, log, args): + log = log.getChild('attruniq_show') + plugins = AttributeUniquenessPlugins(inst) + plugin = plugins.get(args.NAME) + + if not plugin.exists(): + raise ldap.NO_SUCH_OBJECT("Entry %s doesn't exists" % args.name) + if args and args.json: + o_str = plugin.get_all_attrs_json() + print(o_str) + else: + print(plugin.display()) + + +def attruniq_del(inst, basedn, log, args): + log = log.getChild('attruniq_del') + plugins = AttributeUniquenessPlugins(inst) + plugin = plugins.get(args.NAME) + plugin.delete() + log.info("Successfully deleted the %s", plugin.dn) + + +def _add_parser_args(parser): + parser.add_argument('NAME', help='Sets the name of the plug-in configuration record. (cn) You can use any string, ' + 'but "attribute_name Attribute Uniqueness" is recommended.') + parser.add_argument('--attr-name', nargs='+', + help='Sets the name of the attribute whose values must be unique. ' + 'This attribute is multi-valued. (uniqueness-attribute-name)') + parser.add_argument('--subtree', nargs='+', + help='Sets the DN under which the plug-in checks for uniqueness of ' + 'the attributes value. This attribute is multi-valued (uniqueness-subtrees)') + parser.add_argument('--across-all-subtrees', choices=['on', 'off'], + help='If enabled (on), the plug-in checks that the attribute is unique across all subtrees ' + 'set. If you set the attribute to off, uniqueness is only enforced within the subtree ' + 'of the updated entry (uniqueness-across-all-subtrees)') + parser.add_argument('--top-entry-oc', + help='Verifies that the value of the attribute set in uniqueness-attribute-name ' + 'is unique in this subtree (uniqueness-top-entry-oc)') + parser.add_argument('--subtree-entries-oc', + help='Verifies if an attribute is unique, if the entry contains the object class ' + 'set in this parameter (uniqueness-subtree-entries-oc)') def create_parser(subparsers): - attruniq_parser = subparsers.add_parser('attruniq', help='Manage and configure Attribute Uniqueness plugin') - subcommands = attruniq_parser.add_subparsers(help='action') + attruniq = subparsers.add_parser('attr-uniq', help='Manage and configure Attribute Uniqueness plugin') + subcommands = attruniq.add_subparsers(help='action') add_generic_plugin_parsers(subcommands, AttributeUniquenessPlugin) + + list = subcommands.add_parser('list', help='List available plugin configs') + list.set_defaults(func=attruniq_list) + + add = subcommands.add_parser('add', help='Add the config entry') + add.set_defaults(func=attruniq_add) + _add_parser_args(add) + + edit = subcommands.add_parser('set', help='Edit the config entry') + edit.set_defaults(func=attruniq_edit) + _add_parser_args(edit) + + show = subcommands.add_parser('show', help='Display the config entry') + show.add_argument('NAME', help='The name of the plug-in configuration record') + show.set_defaults(func=attruniq_show) + + delete = subcommands.add_parser('delete', help='Delete the config entry') + delete.add_argument('NAME', help='Sets the name of the plug-in configuration record') + delete.set_defaults(func=attruniq_del) diff --git a/src/lib389/lib389/cli_conf/plugins/automember.py b/src/lib389/lib389/cli_conf/plugins/automember.py index a4e757e17..d9fe1dd74 100644 --- a/src/lib389/lib389/cli_conf/plugins/automember.py +++ b/src/lib389/lib389/cli_conf/plugins/automember.py @@ -1,5 +1,5 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). @@ -8,165 +8,226 @@ import ldap import json -from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions -from lib389.cli_conf import add_generic_plugin_parsers +from lib389.plugins import (AutoMembershipPlugin, AutoMembershipDefinition, AutoMembershipDefinitions, + AutoMembershipRegexRule, AutoMembershipRegexRules) +from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add -def list_definition(inst, basedn, log, args): - """List automember definition if instance name - is given else show all automember definitions. +arg_to_attr_definition = { + 'default-group': 'autoMemberDefaultGroup', + 'filter': 'autoMemberFilter', + 'grouping-attr': 'autoMemberGroupingAttr', + 'scope': 'autoMemberScope' +} - :param name: An instance - :type name: lib389.DirSrv - """ - - automembers = AutoMembershipDefinitions(inst) - - if args.name is not None: - if args.json: - print(automembers.get_all_attrs_json(args.name)) - else: - automember = automembers.get(args.name) - log.info(automember.display()) - else: - all_definitions = automembers.list() - if args.json: - result = {'type': 'list', 'items': []} - if len(all_definitions) > 0: - for definition in all_definitions: - if args.json: - result['items'].append(definition) - else: - log.info(definition.display()) - else: - log.info("No automember definitions were found") - - if args.json: - print(json.dumps(result)) - - -def create_definition(inst, basedn, log, args): - """ - Create automember definition. - - :param name: An instance - :type name: lib389.DirSrv - :param groupattr: autoMemberGroupingAttr value - :type groupattr: str - :param defaultgroup: autoMemberDefaultGroup value - :type defaultgroup: str - :param scope: autoMemberScope value - :type scope: str - :param filter: autoMemberFilter value - :type filter: str - - """ - automember_prop = { - 'cn': args.name, - 'autoMemberScope': args.scope, - 'autoMemberFilter': args.filter, - 'autoMemberDefaultGroup': args.defaultgroup, - 'autoMemberGroupingAttr': args.groupattr, - } - - plugin = AutoMembershipPlugin(inst) - plugin.enable() - - automembers = AutoMembershipDefinitions(inst) - - try: - automember = automembers.create(properties=automember_prop) - log.info("Automember definition created successfully!") - except Exception as e: - log.info("Failed to create Automember definition: {}".format(str(e))) - raise e - - -def edit_definition(inst, basedn, log, args): - """ - Edit automember definition - - :param name: An instance - :type name: lib389.DirSrv - :param groupattr: autoMemberGroupingAttr value - :type groupattr: str - :param defaultgroup: autoMemberDefaultGroup value - :type defaultgroup: str - :param scope: autoMemberScope value - :type scope: str - :param filter: autoMemberFilter value - :type filter: str - - """ - automembers = AutoMembershipDefinitions(inst) - automember = automembers.get(args.name) +arg_to_attr_regex = { + 'exclusive': 'autoMemberExclusiveRegex', + 'inclusive': 'autoMemberInclusiveRegex', + 'target-group': 'autoMemberTargetGroup' +} - if args.scope is not None: - automember.replace("automemberscope", args.scope) - if args.filter is not None: - automember.replace("automemberfilter", args.filter) - if args.defaultgroup is not None: - automember.replace("automemberdefaultgroup", args.defaultgroup) - if args.groupattr is not None: - automember.replace("automembergroupingattr", args.groupattr) - log.info("Definition updated successfully.") - -def remove_definition(inst, basedn, log, args): - """ - Remove automember definition for the given - instance. - - :param name: An instance - :type name: lib389.DirSrv - - """ +def definition_list(inst, basedn, log, args): automembers = AutoMembershipDefinitions(inst) - automember = automembers.get(args.name) - - automember.delete() - log.info("Definition deleted successfully.") + all_definitions = automembers.list() + if args.json: + result = {'type': 'list', 'items': []} + if len(all_definitions) > 0: + for definition in all_definitions: + if args.json: + result['items'].append(definition) + else: + log.info(definition.rdn) + else: + log.info("No automember definitions were found") + + if args.json: + print(json.dumps(result)) + + +def definition_add(inst, basedn, log, args): + log = log.getChild('definition_add') + plugin = AutoMembershipPlugin(inst) + props = {'cn': args.DEF_NAME} + generic_object_add(AutoMembershipDefinition, inst, log, args, arg_to_attr_definition, basedn=plugin.dn, props=props) + + +def definition_edit(inst, basedn, log, args): + log = log.getChild('definition_edit') + definitions = AutoMembershipDefinitions(inst) + definition = definitions.get(args.DEF_NAME) + generic_object_edit(definition, log, args, arg_to_attr_definition) + + +def definition_show(inst, basedn, log, args): + log = log.getChild('definition_show') + definitions = AutoMembershipDefinitions(inst) + definition = definitions.get(args.DEF_NAME) + + if not definition.exists(): + raise ldap.NO_SUCH_OBJECT("Entry %s doesn't exists" % args.name) + if args and args.json: + o_str = definition.get_all_attrs_json() + print(o_str) + else: + print(definition.display()) + + +def definition_del(inst, basedn, log, args): + log = log.getChild('definition_del') + definitions = AutoMembershipDefinitions(inst) + definition = definitions.get(args.DEF_NAME) + definition.delete() + log.info("Successfully deleted the %s definition", args.name) + + +def regex_list(inst, basedn, log, args): + definitions = AutoMembershipDefinitions(inst) + definition = definitions.get(args.DEF_NAME) + regexes = AutoMembershipRegexRules(inst, definition.dn) + all_regexes = regexes.list() + if args.json: + result = {'type': 'list', 'items': []} + if len(all_regexes) > 0: + for regex in all_regexes: + if args.json: + result['items'].append(regex) + else: + log.info(regex.rdn) + else: + log.info("No automember regexes were found") + + if args.json: + print(json.dumps(result)) + + +def regex_add(inst, basedn, log, args): + log = log.getChild('regex_add') + definitions = AutoMembershipDefinitions(inst) + definition = definitions.get(args.DEF_NAME) + props = {'cn': args.REGEX_NAME} + generic_object_add(AutoMembershipRegexRule, inst, log, args, arg_to_attr_regex, basedn=definition.dn, props=props) + + +def regex_edit(inst, basedn, log, args): + log = log.getChild('regex_edit') + definitions = AutoMembershipDefinitions(inst) + definition = definitions.get(args.DEF_NAME) + regexes = AutoMembershipRegexRules(inst, definition.dn) + regex = regexes.get(args.REGEX_NAME) + generic_object_edit(regex, log, args, arg_to_attr_regex) + + +def regex_show(inst, basedn, log, args): + log = log.getChild('regex_show') + definitions = AutoMembershipDefinitions(inst) + definition = definitions.get(args.DEF_NAME) + regexes = AutoMembershipRegexRules(inst, definition.dn) + regex = regexes.get(args.REGEX_NAME) + + if not regex.exists(): + raise ldap.NO_SUCH_OBJECT("Entry %s doesn't exists" % args.name) + if args and args.json: + o_str = regex.get_all_attrs_json() + print(o_str) + else: + print(regex.display()) + + +def regex_del(inst, basedn, log, args): + log = log.getChild('regex_del') + definitions = AutoMembershipDefinitions(inst) + definition = definitions.get(args.DEF_NAME) + regexes = AutoMembershipRegexRules(inst, definition.dn) + regex = regexes.get(args.REGEX_NAME) + regex.delete() + log.info("Successfully deleted the %s regex", regex.dn) + + +def fixup(inst, basedn, log, args): + plugin = AutoMembershipPlugin(inst) + log.info('Attempting to add task entry... This will fail if Automembership plug-in is not enabled.') + if not plugin.status(): + log.error("'%s' is disabled. Rebuild membership task can't be executed" % plugin.rdn) + fixup_task = plugin.fixup(args.DN, args.filter) + fixup_task.wait() + exitcode = fixup_task.get_exit_code() + if exitcode != 0: + log.error('Rebuild membership task for %s has failed. Please, check logs') + else: + log.info('Successfully added task entry') + + +def _add_parser_args_definition(parser): + parser.add_argument('--grouping-attr', + help='Specifies the name of the member attribute in the group entry and ' + 'the attribute in the object entry that supplies the member attribute value, ' + 'in the format group_member_attr:entry_attr (autoMemberGroupingAttr)') + parser.add_argument('--default-group', required=True, + help='Sets default or fallback group to add the entry to as a member ' + 'member attribute in group entry (autoMemberDefaultGroup)') + parser.add_argument('--scope', required=True, + help='Sets the subtree DN to search for entries (autoMemberScope)') + parser.add_argument('--filter', + help='Sets a standard LDAP search filter to use to search for ' + 'matching entries (autoMemberFilter)') + + +def _add_parser_args_regex(parser): + parser.add_argument("--exclusive", + help='Sets a single regular expression to use to identify ' + 'entries to exclude (autoMemberExclusiveRegex)') + parser.add_argument('--inclusive', required=True, + help='Sets a single regular expression to use to identify ' + 'entries to include (autoMemberInclusiveRegex)') + parser.add_argument('--target-group', required=True, + help='Sets which group to add the entry to as a member, if it meets ' + 'the regular expression conditions (autoMemberTargetGroup)') def create_parser(subparsers): - automember_parser = subparsers.add_parser('automember', help="Manage and configure automember plugin") - - subcommands = automember_parser.add_subparsers(help='action') - + automember = subparsers.add_parser('automember', help="Manage and configure Automembership plugin") + subcommands = automember.add_subparsers(help='action') add_generic_plugin_parsers(subcommands, AutoMembershipPlugin) - create_parser = subcommands.add_parser('create', help='Create automember definition.') - create_parser.set_defaults(func=create_definition) - - create_parser.add_argument("name", help='Set cn for group entry.') - - create_parser.add_argument("--groupattr", help='Set member attribute in group entry.', default='member:dn') - - create_parser.add_argument('--defaultgroup', required=True, help='Set default group to add member to.') - - create_parser.add_argument('--scope', required=True, help='Set automember scope.') - - create_parser.add_argument('--filter', help='Set automember filter.', default= '(objectClass=*)') - - show_parser = subcommands.add_parser('list', help='List automember definition.') - show_parser.set_defaults(func=list_definition) - - show_parser.add_argument("--name", help='Set cn for group entry. If not specified show all automember definitions.') - - edit_parser = subcommands.add_parser('edit', help='Edit automember definition.') - edit_parser.set_defaults(func=edit_definition) - - edit_parser.add_argument("name", help='Set cn for group entry.') - - edit_parser.add_argument("--groupattr", help='Set member attribute in group entry.') - - edit_parser.add_argument('--defaultgroup', help='Set default group to add member to.') - - edit_parser.add_argument('--scope', help='Set automember scope.') - - edit_parser.add_argument('--filter', help='Set automember filter.') - - remove_parser = subcommands.add_parser('remove', help='Remove automember definition.') - remove_parser.set_defaults(func=remove_definition) + list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.') + subcommands_list = list.add_subparsers(help='action') + list_definitions = subcommands_list.add_parser('definitions', help='List Automembership definitions.') + list_definitions.set_defaults(func=definition_list) + list_regexes = subcommands_list.add_parser('regexes', help='List Automembership regex rules.') + list_regexes.add_argument('DEF-NAME', help='The definition entry CN.') + list_regexes.set_defaults(func=regex_list) + + definition = subcommands.add_parser('definition', help='Manage Automembership definition.') + definition.add_argument('DEF-NAME', help='The definition entry CN.') + subcommands_definition = definition.add_subparsers(help='action') + + add_def = subcommands_definition.add_parser('add', help='Create Automembership definition.') + add_def.set_defaults(func=definition_add) + _add_parser_args_definition(add_def) + edit_def = subcommands_definition.add_parser('set', help='Edit Automembership definition.') + edit_def.set_defaults(func=definition_edit) + _add_parser_args_definition(edit_def) + delete_def = subcommands_definition.add_parser('delete', help='Remove Automembership definition.') + delete_def.set_defaults(func=definition_del) + + regex = subcommands_definition.add_parser('regex', help='Manage Automembership regex rules.') + regex.add_argument('REGEX-NAME', help='The regex entry CN.') + subcommands_regex = regex.add_subparsers(help='action') + + add_regex = subcommands_regex.add_parser('add', help='Create Automembership regex.') + add_regex.set_defaults(func=regex_add) + _add_parser_args_definition(add_regex) + edit_regex = subcommands_regex.add_parser('set', help='Edit Automembership regex.') + edit_regex.set_defaults(func=regex_edit) + _add_parser_args_definition(edit_regex) + delete_regex = subcommands_regex.add_parser('delete', help='Remove Automembership regex.') + delete_regex.set_defaults(func=regex_del) + + fixup = subcommands.add_parser('fixup', help='Run a rebuild membership task.') + fixup.set_defaults(func=fixup) + fixup.add_argument('DN', help="Base DN that contains entries to fix up") + fixup.add_argument('-f', '--filter', required=True, help='LDAP filter for entries to fix up.') + fixup.add_argument('-s', '--scope', required=True, choices=['sub', 'base', 'one'], type=str.lower, + help='LDAP search scope for entries to fix up') - remove_parser.add_argument("name", help='Set cn for group entry.') diff --git a/src/lib389/lib389/cli_conf/plugins/dna.py b/src/lib389/lib389/cli_conf/plugins/dna.py index 50dd37fc3..08f66a446 100644 --- a/src/lib389/lib389/cli_conf/plugins/dna.py +++ b/src/lib389/lib389/cli_conf/plugins/dna.py @@ -1,16 +1,246 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- -from lib389.plugins import DNAPlugin -from lib389.cli_conf import add_generic_plugin_parsers +import json +import ldap +from lib389.plugins import DNAPlugin, DNAPluginConfig, DNAPluginConfigs, DNAPluginSharedConfig, DNAPluginSharedConfigs +from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add, _args_to_attrs + +arg_to_attr = { + 'type': 'dnaType', + 'prefix': 'dnaPrefix', + 'next_value': 'dnaNextValue', + 'max_value': 'dnaMaxValue', + 'interval': 'dnaInterval', + 'magic_regen': 'dnaMagicRegen', + 'filter': 'dnaFilter', + 'scope': 'dnaScope', + 'remote_bind_dn': 'dnaRemoteBindDN', + 'remote_bind_cred': 'dnaRemoteBindCred', + 'shared_config_entry': 'dnaSharedCfgDN', + 'threshold': 'dnaThreshold', + 'next_range': 'dnaNextRange', + 'range_request_timeout': 'dnaRangeRequestTimeout' +} + +arg_to_attr_config = { + 'hostname': 'dnaHostname', + 'port': 'dnaPortNum', + 'secure_port': 'dnaSecurePortNum', + 'remaining_values': 'dnaRemainingValues', + 'remote_bind_method': 'dnaRemoteBindMethod', + 'remote_conn_protocol': 'dnaRemoteConnProtocol' +} + + +def dna_list(inst, basedn, log, args): + log = log.getChild('dna_list') + configs = DNAPluginConfigs(inst) + config_list = configs.list() + if args.json: + result = {'type': 'list', 'items': []} + if len(config_list) > 0: + for config in config_list: + if args.json: + result['items'].append(config) + else: + log.info(config.rdn) + else: + log.info("No DNA configurations were found") + + if args.json: + print(json.dumps(result)) + + +def dna_add(inst, basedn, log, args): + log = log.getChild('dna_add') + plugin = DNAPlugin(inst) + props = {'cn': args.NAME} + generic_object_add(DNAPluginConfig, inst, log, args, arg_to_attr, basedn=plugin.dn, props=props) + + +def dna_edit(inst, basedn, log, args): + log = log.getChild('dna_edit') + configs = DNAPluginConfigs(inst) + config = configs.get(args.NAME) + generic_object_edit(config, log, args, arg_to_attr) + + +def dna_show(inst, basedn, log, args): + log = log.getChild('dna_show') + configs = DNAPluginConfigs(inst) + config = configs.get(args.NAME) + + if not config.exists(): + raise ldap.NO_SUCH_OBJECT("Entry %s doesn't exists" % args.NAME) + if args and args.json: + o_str = config.get_all_attrs_json() + print(o_str) + else: + print(config.display()) + + +def dna_del(inst, basedn, log, args): + log = log.getChild('dna_del') + configs = DNAPluginConfigs(inst) + config = configs.get(args.NAME) + config.delete() + log.info("Successfully deleted the %s", config.dn) + + +def dna_config_list(inst, basedn, log, args): + log = log.getChild('dna_list') + configs = DNAPluginSharedConfigs(inst, args.BASEDN) + config_list = configs.list() + if args.json: + result = {'type': 'list', 'items': []} + if len(config_list) > 0: + for config in config_list: + if args.json: + result['items'].append(config.get_all_attrs_json()) + else: + log.info(config.dn) + else: + log.info("No DNA shared configurations were found") + + if args.json: + print(json.dumps(result)) + + +def dna_config_add(inst, basedn, log, args): + log = log.getChild('dna_config_add') + targetdn = args.BASEDN + + shared_configs = DNAPluginSharedConfigs(inst, targetdn) + attrs = _args_to_attrs(args, arg_to_attr_config) + props = {attr: value for (attr, value) in attrs.items() if value != ""} + + shared_config = shared_configs.create(properties=props) + log.info("Successfully created the %s" % shared_config.dn) + + configs = DNAPluginConfigs(inst) + config = configs.get(args.NAME) + config.replace('dnaSharedCfgDN', config.dn) + log.info('DNA attribute dnaSharedCfgDN (shared-config-entry) ' + 'was set in the %s plugin config' % config.rdn) + + +def dna_config_edit(inst, basedn, log, args): + log = log.getChild('dna_config_edit') + targetdn = args.DN + shared_config = DNAPluginSharedConfig(inst, targetdn) + generic_object_edit(shared_config, log, args, arg_to_attr_config) + + +def dna_config_show(inst, basedn, log, args): + log = log.getChild('dna_config_show') + targetdn = args.DN + shared_config = DNAPluginSharedConfig(inst, targetdn) + + if not shared_config.exists(): + raise ldap.NO_SUCH_OBJECT("Entry %s doesn't exists" % targetdn) + if args and args.json: + o_str = shared_config.get_all_attrs_json() + print(o_str) + else: + print(shared_config.display()) + + +def dna_config_del(inst, basedn, log, args): + log = log.getChild('dna_config_del') + targetdn = args.DN + shared_config = DNAPluginSharedConfig(inst, targetdn) + shared_config.delete() + log.info("Successfully deleted the %s", targetdn) + + +def _add_parser_args(parser): + parser.add_argument('--type', help='Sets which attributes have unique numbers being generated for them (dnaType)') + parser.add_argument('--prefix', help='Defines a prefix that can be prepended to the generated ' + 'number values for the attribute (dnaPrefix)') + parser.add_argument('--next-value', help='Gives the next available number which can be assigned (dnaNextValue)') + parser.add_argument('--max-value', help='Sets the maximum value that can be assigned for the range (dnaMaxValue)') + parser.add_argument('--interval', help='Sets an interval to use to increment through numbers in a range (dnaInterval)') + parser.add_argument('--magic-regen', help='Sets a user-defined value that instructs the plug-in ' + 'to assign a new value for the entry (dnaMagicRegen)') + parser.add_argument('--filter', help='Sets an LDAP filter to use to search for and identify the entries ' + 'to which to apply the distributed numeric assignment range (dnaFilter)') + parser.add_argument('--scope', help='Sets the base DN to search for entries to which ' + 'to apply the distributed numeric assignment (dnaScope)') + parser.add_argument('--remote-bind-dn', help='Specifies the Replication Manager DN (dnaRemoteBindDN)') + parser.add_argument('--remote-bind-cred', help='Specifies the Replication Manager\'s password (dnaRemoteBindCred)') + parser.add_argument('--shared-config-entry', help='Defines a shared identity that the servers can use ' + 'to transfer ranges to one another (dnaSharedCfgDN)') + parser.add_argument('--threshold', help='Sets a threshold of remaining available numbers in the range. When the ' + 'server hits the threshold, it sends a request for a new range (dnaThreshold)') + parser.add_argument('--next-range', + help='Defines the next range to use when the current range is exhausted (dnaNextRange)') + parser.add_argument('--range-request-timeout', + help='sets a timeout period, in seconds, for range requests so that the server ' + 'does not stall waiting on a new range from one server and ' + 'can request a range from a new server (dnaRangeRequestTimeout)') + + +def _add_parser_args_config(parser): + parser.add_argument('--hostname', + help='Identifies the host name of a server in a shared range, as part of the DNA ' + 'range configuration for that specific host in multi-master replication (dnaHostname)') + parser.add_argument('--port', help='Gives the standard port number to use to connect to ' + 'the host identified in dnaHostname (dnaPortNum)') + parser.add_argument('--secure-port', help='Gives the secure (TLS) port number to use to connect ' + 'to the host identified in dnaHostname (dnaSecurePortNum)') + parser.add_argument('--remote-bind-method', help='Specifies the remote bind method (dnaRemoteBindMethod)') + parser.add_argument('--remote-conn-protocol', help='Specifies the remote connection protocol (dnaRemoteConnProtocol)') + parser.add_argument('--remaining-values', help='Contains the number of values that are remaining and ' + 'available to a server to assign to entries (dnaRemainingValues)') def create_parser(subparsers): - dna_parser = subparsers.add_parser('dna', help='Manage and configure DNA plugin') - subcommands = dna_parser.add_subparsers(help='action') + dna = subparsers.add_parser('dna', help='Manage and configure DNA plugin') + subcommands = dna.add_subparsers(help='action') add_generic_plugin_parsers(subcommands, DNAPlugin) + + list = subcommands.add_parser('list', help='List available plugin configs') + subcommands_list = list.add_subparsers(help='action') + list_configs = subcommands_list.add_parser('configs', help='List main DNA plugin config entries') + list_configs.set_defaults(func=dna_list) + list_shared_configs = subcommands_list.add_parser('shared-configs', help='List DNA plugin shared config entries') + list_shared_configs.add_argument('BASEDN', help='The search DN') + list_shared_configs.set_defaults(func=dna_config_list) + + config = subcommands.add_parser('config', help='Manage plugin configs') + config.add_argument('NAME', help='The DNA configuration name') + config_subcommands = config.add_subparsers(help='action') + add = config_subcommands.add_parser('add', help='Add the config entry') + add.set_defaults(func=dna_add) + _add_parser_args(add) + edit = config_subcommands.add_parser('set', help='Edit the config entry') + edit.set_defaults(func=dna_edit) + _add_parser_args(edit) + show = config_subcommands.add_parser('show', help='Display the config entry') + show.set_defaults(func=dna_show) + delete = config_subcommands.add_parser('delete', help='Delete the config entry') + delete.set_defaults(func=dna_del) + shared_config = config_subcommands.add_parser('shared-config-entry', help='Manage the shared config entry') + shared_config_subcommands = shared_config.add_subparsers(help='action') + + add_config = shared_config_subcommands.add_parser('add', help='Add the shared config entry') + add_config.add_argument('BASEDN', help='The shared config entry BASE DN. The new DN will be constructed with ' + 'dnaHostname and dnaPortNum') + add_config.set_defaults(func=dna_config_add) + _add_parser_args_config(add_config) + edit_config = shared_config_subcommands.add_parser('edit', help='Edit the shared config entry') + edit_config.add_argument('DN', help='The shared config entry DN') + edit_config.set_defaults(func=dna_config_edit) + _add_parser_args_config(edit_config) + show_config_parser = shared_config_subcommands.add_parser('show', help='Display the shared config entry') + show_config_parser.add_argument('DN', help='The shared config entry DN') + show_config_parser.set_defaults(func=dna_config_show) + del_config_parser = shared_config_subcommands.add_parser('delete', help='Delete the shared config entry') + del_config_parser.add_argument('DN', help='The shared config entry DN') + del_config_parser.set_defaults(func=dna_config_del) diff --git a/src/lib389/lib389/cli_conf/plugins/linkedattr.py b/src/lib389/lib389/cli_conf/plugins/linkedattr.py index 3f9a6acb9..1a1c6ecef 100644 --- a/src/lib389/lib389/cli_conf/plugins/linkedattr.py +++ b/src/lib389/lib389/cli_conf/plugins/linkedattr.py @@ -1,4 +1,5 @@ # --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. # Copyright (C) 2019 William Brown <[email protected]> # All rights reserved. # @@ -6,8 +7,72 @@ # See LICENSE for details. # --- END COPYRIGHT BLOCK --- -from lib389.plugins import LinkedAttributesPlugin -from lib389.cli_conf import add_generic_plugin_parsers +import json +import ldap +from lib389.plugins import LinkedAttributesPlugin, LinkedAttributesConfig, LinkedAttributesConfigs +from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add + +arg_to_attr = { + 'link_type': 'linkType', + 'managed_type': 'managedType', + 'link_scope': 'linkScope', +} + + +def linkedattr_list(inst, basedn, log, args): + log = log.getChild('linkedattr_list') + configs = LinkedAttributesConfigs(inst) + result = [] + result_json = [] + for config in configs.list(): + if args.json: + result_json.append(config.get_all_attrs_json()) + else: + result.append(config.rdn) + if args.json: + print(json.dumps({"type": "list", "items": result_json})) + else: + if len(result) > 0: + for i in result: + print(i) + else: + print("No Linked Attributes plugin instances") + + +def linkedattr_add(inst, basedn, log, args): + log = log.getChild('linkedattr_add') + plugin = LinkedAttributesPlugin(inst) + props = {'cn': args.NAME} + generic_object_add(LinkedAttributesConfig, inst, log, args, arg_to_attr, basedn=plugin.dn, props=props) + + +def linkedattr_edit(inst, basedn, log, args): + log = log.getChild('linkedattr_edit') + configs = LinkedAttributesConfigs(inst) + config = configs.get(args.NAME) + generic_object_edit(config, log, args, arg_to_attr) + + +def linkedattr_show(inst, basedn, log, args): + log = log.getChild('linkedattr_show') + configs = LinkedAttributesConfigs(inst) + config = configs.get(args.NAME) + + if not config.exists(): + raise ldap.NO_SUCH_OBJECT("Entry %s doesn't exists" % args.name) + if args and args.json: + o_str = config.get_all_attrs_json() + print(o_str) + else: + print(config.display()) + + +def linkedattr_del(inst, basedn, log, args): + log = log.getChild('linkedattr_del') + configs = LinkedAttributesConfigs(inst) + config = configs.get(args.NAME) + config.delete() + log.info("Successfully deleted the %s", config.dn) def fixup(inst, basedn, log, args): @@ -24,8 +89,17 @@ def fixup(inst, basedn, log, args): log.info('Successfully added fixup task') +def _add_parser_args(parser): + parser.add_argument('--link-type', + help='Sets the attribute that is managed manually by administrators (linkType)') + parser.add_argument('--managed-type', + help='Sets the attribute that is created dynamically by the plugin (managedType)') + parser.add_argument('--link-scope', + help='Sets the scope that restricts the plugin to a specific part of the directory tree (linkScope)') + + def create_parser(subparsers): - linkedattr_parser = subparsers.add_parser('linkedattr', help='Manage and configure Linked Attributes plugin') + linkedattr_parser = subparsers.add_parser('linked-attr', help='Manage and configure Linked Attributes plugin') subcommands = linkedattr_parser.add_subparsers(help='action') add_generic_plugin_parsers(subcommands, LinkedAttributesPlugin) @@ -33,3 +107,20 @@ def create_parser(subparsers): fixup_parser.add_argument('basedn', help="basedn that contains entries to fix up") fixup_parser.add_argument('-f', '--filter', help='Filter for entries to fix up linked attributes.') fixup_parser.set_defaults(func=fixup) + + list = subcommands.add_parser('list', help='List available plugin configs') + list.set_defaults(func=linkedattr_list) + + config = subcommands.add_parser('config', help='Manage plugin configs') + config.add_argument('NAME', help='The Linked Attributes configuration name') + config_subcommands = config.add_subparsers(help='action') + add = config_subcommands.add_parser('add', help='Add the config entry') + add.set_defaults(func=linkedattr_add) + _add_parser_args(add) + edit = config_subcommands.add_parser('set', help='Edit the config entry') + edit.set_defaults(func=linkedattr_edit) + _add_parser_args(edit) + show = config_subcommands.add_parser('show', help='Display the config entry') + show.set_defaults(func=linkedattr_show) + delete = config_subcommands.add_parser('delete', help='Delete the config entry') + delete.set_defaults(func=linkedattr_del) diff --git a/src/lib389/lib389/cli_conf/plugins/managedentries.py b/src/lib389/lib389/cli_conf/plugins/managedentries.py index 18dca1b06..cb5235b1c 100644 --- a/src/lib389/lib389/cli_conf/plugins/managedentries.py +++ b/src/lib389/lib389/cli_conf/plugins/managedentries.py @@ -1,16 +1,231 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- -from lib389.plugins import ManagedEntriesPlugin -from lib389.cli_conf import add_generic_plugin_parsers +import ldap +import json +from lib389.plugins import ManagedEntriesPlugin, MEPConfig, MEPConfigs, MEPTemplate, MEPTemplates +from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add + +arg_to_attr = { + 'config_area': 'nsslapd-pluginConfigArea' +} + +arg_to_attr_config = { + 'scope': 'originScope', + 'filter': 'originFilter', + 'managed_base': 'managedBase', + 'managed_template': 'managedTemplate' +} + +arg_to_attr_template = { + 'rdn_attr': 'mepRDNAttr', + 'static_attr': 'mepStaticAttr', + 'mapped_attr': 'mepMappedAttr' +} + + +def mep_edit(inst, basedn, log, args): + log = log.getChild('mep_edit') + plugin = ManagedEntriesPlugin(inst) + generic_object_edit(plugin, log, args, arg_to_attr) + + +def mep_config_list(inst, basedn, log, args): + log = log.getChild('mep_config_list') + plugin = ManagedEntriesPlugin(inst) + config_area = plugin.get_attr_val_utf8_l('nsslapd-pluginConfigArea') + configs = MEPConfigs(inst, config_area) + result = [] + result_json = [] + for config in configs.list(): + if args.json: + result_json.append(config.get_all_attrs_json()) + else: + result.append(config.rdn) + if args.json: + print(json.dumps({"type": "list", "items": result_json})) + else: + if len(result) > 0: + for i in result: + print(i) + else: + print("No Linked Attributes plugin instances") + + +def mep_config_add(inst, basedn, log, args): + log = log.getChild('mep_config_add') + plugin = ManagedEntriesPlugin(inst) + config_area = plugin.get_attr_val_utf8_l('nsslapd-pluginConfigArea') + if config_area is None: + config_area = plugin.dn + props = {'cn': args.NAME} + generic_object_add(MEPConfig, inst, log, args, arg_to_attr_config, basedn=config_area, props=props) + + +def mep_config_edit(inst, basedn, log, args): + log = log.getChild('mep_config_edit') + plugin = ManagedEntriesPlugin(inst) + config_area = plugin.get_attr_val_utf8_l('nsslapd-pluginConfigArea') + configs = MEPConfigs(inst, config_area) + config = configs.get(args.NAME) + generic_object_edit(config, log, args, arg_to_attr_config) + + +def mep_config_show(inst, basedn, log, args): + log = log.getChild('mep_config_show') + plugin = ManagedEntriesPlugin(inst) + config_area = plugin.get_attr_val_utf8_l('nsslapd-pluginConfigArea') + configs = MEPConfigs(inst, config_area) + config = configs.get(args.NAME) + + if not config.exists(): + raise ldap.NO_SUCH_OBJECT("Entry %s doesn't exists" % args.name) + if args and args.json: + o_str = config.get_all_attrs_json() + print(o_str) + else: + print(config.display()) + + +def mep_config_del(inst, basedn, log, args): + log = log.getChild('mep_config_del') + plugin = ManagedEntriesPlugin(inst) + config_area = plugin.get_attr_val_utf8_l('nsslapd-pluginConfigArea') + configs = MEPConfigs(inst, config_area) + config = configs.get(args.NAME) + config.delete() + log.info("Successfully deleted the %s", config.dn) + + +def mep_template_list(inst, basedn, log, args): + log = log.getChild('mep_template_list') + templates = MEPTemplates(inst, args.BASEDN) + result = [] + result_json = [] + for template in templates.list(): + if args.json: + result_json.append(template.get_all_attrs_json()) + else: + result.append(template.rdn) + if args.json: + print(json.dumps({"type": "list", "items": result_json})) + else: + if len(result) > 0: + for i in result: + print(i) + else: + print("No Linked Attributes plugin instances") + + +def mep_template_add(inst, basedn, log, args): + log = log.getChild('mep_template_add') + targetdn = args.DN + generic_object_add(MEPTemplate, inst, log, args, arg_to_attr_config, dn=targetdn) + log.info('Don\'t forget to assign the template to Managed Entry Plugin config ' + 'attribute - managedTemplate') + + +def mep_template_edit(inst, basedn, log, args): + log = log.getChild('mep_template_edit') + targetdn = args.DN + templates = MEPTemplates(inst) + template = templates.get(targetdn) + generic_object_edit(template, log, args, arg_to_attr_config) + + +def mep_template_show(inst, basedn, log, args): + log = log.getChild('mep_template_show') + targetdn = args.DN + templates = MEPTemplates(inst) + template = templates.get(targetdn) + + if not template.exists(): + raise ldap.NO_SUCH_OBJECT("Entry %s doesn't exists" % targetdn) + if args and args.json: + o_str = template.get_all_attrs_json() + print(o_str) + else: + print(template.display()) + + +def mep_template_del(inst, basedn, log, args): + log = log.getChild('mep_template_del') + targetdn = args.DN + templates = MEPTemplates(inst) + template = templates.get(targetdn) + template.delete() + log.info("Successfully deleted the %s", targetdn) + + +def _add_parser_args_config(parser): + parser.add_argument('--scope', help='Sets the scope of the search to use to see ' + 'which entries the plug-in monitors (originScope)') + parser.add_argument('--filter', help='Sets the search filter to use to search for and identify the entries ' + 'within the subtree which require a managed entry (originFilter)') + parser.add_argument('--managed-base', help='Sets the subtree under which to create ' + 'the managed entries (managedBase)') + parser.add_argument('--managed-template', help='Identifies the template entry to use to create ' + 'the managed entry (managedTemplate)') + + +def _add_parser_args_template(parser): + parser.add_argument('--rdn-attr', help='Sets which attribute to use as the naming attribute ' + 'in the automatically-generated entry (mepRDNAttr)') + parser.add_argument('--static-attr', help='Sets an attribute with a defined value that must be added ' + 'to the automatically-generated entry (mepStaticAttr)') + parser.add_argument('--mapped-attr', nargs='+', + help='Sets an attribute in the Managed Entries template entry which must exist ' + 'in the generated entry (mepMappedAttr)') def create_parser(subparsers): - managedentries_parser = subparsers.add_parser('managedentries', help='Manage and configure Managed Entries plugin') - subcommands = managedentries_parser.add_subparsers(help='action') + mep = subparsers.add_parser('managed-entries', help='Manage and configure Managed Entries Plugin') + subcommands = mep.add_subparsers(help='action') add_generic_plugin_parsers(subcommands, ManagedEntriesPlugin) + + edit = subcommands.add_parser('set', help='Edit the plugin') + edit.set_defaults(func=mep_edit) + edit.add_argument('--config-area', help='The value to set as nsslapd-pluginConfigArea') + + list = subcommands.add_parser('list', help='List Managed Entries Plugin configs and templates') + subcommands_list = list.add_subparsers(help='action') + list_configs = subcommands_list.add_parser('configs', help='List Managed Entries Plugin configs (list config-area ' + 'if specified in the main plugin entry)') + list_configs.set_defaults(func=mep_config_list) + list_templates = subcommands_list.add_parser('templates', + help='List Managed Entries Plugin templates in the directory') + list_templates.add_argument('BASEDN', help='The base DN where to search the templates.') + list_templates.set_defaults(func=mep_template_list) + + config = subcommands.add_parser('config', help='Handle Managed Entries Plugin configs') + config.add_argument('NAME', help='The config entry CN.') + config_subcommands = config.add_subparsers(help='action') + add = config_subcommands.add_parser('add', help='Add the config entry') + add.set_defaults(func=mep_config_add) + _add_parser_args_config(add) + edit = config_subcommands.add_parser('set', help='Edit the config entry') + edit.set_defaults(func=mep_config_edit) + _add_parser_args_config(edit) + show = config_subcommands.add_parser('show', help='Display the config entry') + show.set_defaults(func=mep_config_show) + delete = config_subcommands.add_parser('delete', help='Delete the config entry') + delete.set_defaults(func=mep_config_del) + + template = subcommands.add_parser('template', help='Handle Managed Entries Plugin templates') + template.add_argument('DN', help='The template entry DN.') + template_subcommands = template.add_subparsers(help='action') + add = template_subcommands.add_parser('add', help='Add the template entry') + add.set_defaults(func=mep_template_add) + _add_parser_args_template(add) + edit = template_subcommands.add_parser('set', help='Edit the template entry') + edit.set_defaults(func=mep_template_edit) + _add_parser_args_template(edit) + show = template_subcommands.add_parser('show', help='Display the template entry') + show.set_defaults(func=mep_template_show) + delete = template_subcommands.add_parser('delete', help='Delete the template entry') + delete.set_defaults(func=mep_template_del) diff --git a/src/lib389/lib389/cli_conf/plugins/memberof.py b/src/lib389/lib389/cli_conf/plugins/memberof.py index 90fb77412..fe54d0271 100644 --- a/src/lib389/lib389/cli_conf/plugins/memberof.py +++ b/src/lib389/lib389/cli_conf/plugins/memberof.py @@ -1,5 +1,5 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2019 Red Hat, Inc. # Copyright (C) 2019 William Brown <[email protected]> # All rights reserved. # @@ -8,7 +8,7 @@ # --- END COPYRIGHT BLOCK --- import ldap -from lib389.plugins import MemberOfPlugin, Plugins, MemberOfSharedConfig +from lib389.plugins import MemberOfPlugin, Plugins, MemberOfSharedConfig, MemberOfSharedConfigs from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add arg_to_attr = { @@ -26,18 +26,15 @@ arg_to_attr = { def memberof_edit(inst, basedn, log, args): log = log.getChild('memberof_edit') - plugins = Plugins(inst) - plugin = plugins.get("MemberOf Plugin") + plugin = MemberOfPlugin(inst) generic_object_edit(plugin, log, args, arg_to_attr) def memberof_add_config(inst, basedn, log, args): log = log.getChild('memberof_add_config') targetdn = args.DN - config = MemberOfSharedConfig(inst, targetdn) - generic_object_add(config, log, args, arg_to_attr) - plugins = Plugins(inst) - plugin = plugins.get("MemberOf Plugin") + config = generic_object_add(MemberOfSharedConfig, inst, log, args, arg_to_attr, dn=targetdn) + plugin = MemberOfPlugin(inst) plugin.replace('nsslapd-pluginConfigArea', config.dn) log.info('MemberOf attribute nsslapd-pluginConfigArea (config-entry) ' 'was set in the main plugin config') @@ -87,50 +84,60 @@ def fixup(inst, basedn, log, args): def _add_parser_args(parser): - parser.add_argument('--attr', nargs='+', help='The value to set as memberOfAttr') - parser.add_argument('--groupattr', nargs='+', help='The value to set as memberOfGroupAttr') + parser.add_argument('--attr', nargs='+', + help='Specifies the attribute in the user entry for the Directory Server ' + 'to manage to reflect group membership (memberOfAttr)') + parser.add_argument('--groupattr', nargs='+', + help='Specifies the attribute in the group entry to use to identify ' + 'the DNs of group members (memberOfGroupAttr)') parser.add_argument('--allbackends', choices=['on', 'off'], type=str.lower, - help='The value to set as memberOfAllBackends') + help='Specifies whether to search the local suffix for user entries on ' + 'all available suffixes (memberOfAllBackends)') parser.add_argument('--skipnested', choices=['on', 'off'], type=str.lower, - help='The value to set as memberOfSkipNested') - parser.add_argument('--scope', help='The value to set as memberOfEntryScope') - parser.add_argument('--exclude', help='The value to set as memberOfEntryScopeExcludeSubtree') - parser.add_argument('--autoaddoc', type=str.lower, help='The value to set as memberOfAutoAddOC') + help='Specifies wherher to skip nested groups or not (memberOfSkipNested)') + parser.add_argument('--scope', help='Specifies backends or multiple-nested suffixes ' + 'for the MemberOf plug-in to work on (memberOfEntryScope)') + parser.add_argument('--exclude', help='Specifies backends or multiple-nested suffixes ' + 'for the MemberOf plug-in to exclude (memberOfEntryScopeExcludeSubtree)') + parser.add_argument('--autoaddoc', type=str.lower, + help='If an entry does not have an object class that allows the memberOf attribute ' + 'then the memberOf plugin will automatically add the object class listed ' + 'in the memberOfAutoAddOC parameter') def create_parser(subparsers): - memberof_parser = subparsers.add_parser('memberof', help='Manage and configure MemberOf plugin') + memberof = subparsers.add_parser('memberof', help='Manage and configure MemberOf plugin') - subcommands = memberof_parser.add_subparsers(help='action') + subcommands = memberof.add_subparsers(help='action') add_generic_plugin_parsers(subcommands, MemberOfPlugin) - edit_parser = subcommands.add_parser('edit', help='Edit the plugin') - edit_parser.set_defaults(func=memberof_edit) - _add_parser_args(edit_parser) - edit_parser.add_argument('--config-entry', help='The value to set as nsslapd-pluginConfigArea') - - config_parser = subcommands.add_parser('config-entry', help='Manage the config entry') - config_subcommands = config_parser.add_subparsers(help='action') - add_config_parser = config_subcommands.add_parser('add', help='Add the config entry') - add_config_parser.set_defaults(func=memberof_add_config) - add_config_parser.add_argument('DN', help='The config entry full DN') - _add_parser_args(add_config_parser) - edit_config_parser = config_subcommands.add_parser('edit', help='Edit the config entry') - edit_config_parser.set_defaults(func=memberof_edit_config) - edit_config_parser.add_argument('DN', help='The config entry full DN') - _add_parser_args(edit_config_parser) - show_config_parser = config_subcommands.add_parser('show', help='Display the config entry') - show_config_parser.set_defaults(func=memberof_show_config) - show_config_parser.add_argument('DN', help='The config entry full DN') - del_config_parser = config_subcommands.add_parser('delete', help='Delete the config entry') - del_config_parser.set_defaults(func=memberof_del_config) - del_config_parser.add_argument('DN', help='The config entry full DN') - - fixup_parser = subcommands.add_parser('fixup', help='Run the fix-up task for memberOf plugin') - fixup_parser.set_defaults(func=fixup) - fixup_parser.add_argument('DN', help="base DN that contains entries to fix up") - fixup_parser.add_argument('-f', '--filter', - help='Filter for entries to fix up.\n If omitted, all entries with objectclass ' - 'inetuser/inetadmin/nsmemberof under the specified base will have ' - 'their memberOf attribute regenerated.') + edit = subcommands.add_parser('set', help='Edit the plugin') + edit.set_defaults(func=memberof_edit) + _add_parser_args(edit) + edit.add_argument('--config-entry', help='The value to set as nsslapd-pluginConfigArea') + + config = subcommands.add_parser('config-entry', help='Manage the config entry') + config_subcommands = config.add_subparsers(help='action') + add_config = config_subcommands.add_parser('add', help='Add the config entry') + add_config.set_defaults(func=memberof_add_config) + add_config.add_argument('DN', help='The config entry full DN') + _add_parser_args(add_config) + edit_config = config_subcommands.add_parser('set', help='Edit the config entry') + edit_config.set_defaults(func=memberof_edit_config) + edit_config.add_argument('DN', help='The config entry full DN') + _add_parser_args(edit_config) + show_config = config_subcommands.add_parser('show', help='Display the config entry') + show_config.set_defaults(func=memberof_show_config) + show_config.add_argument('DN', help='The config entry full DN') + del_config_ = config_subcommands.add_parser('delete', help='Delete the config entry') + del_config_.set_defaults(func=memberof_del_config) + del_config_.add_argument('DN', help='The config entry full DN') + + fixup = subcommands.add_parser('fixup', help='Run the fix-up task for memberOf plugin') + fixup.set_defaults(func=fixup) + fixup.add_argument('DN', help="Base DN that contains entries to fix up") + fixup.add_argument('-f', '--filter', + help='Filter for entries to fix up.\n If omitted, all entries with objectclass ' + 'inetuser/inetadmin/nsmemberof under the specified base will have ' + 'their memberOf attribute regenerated.') diff --git a/src/lib389/lib389/cli_conf/plugins/passthroughauth.py b/src/lib389/lib389/cli_conf/plugins/passthroughauth.py index ef6729e24..616119a77 100644 --- a/src/lib389/lib389/cli_conf/plugins/passthroughauth.py +++ b/src/lib389/lib389/cli_conf/plugins/passthroughauth.py @@ -1,16 +1,88 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- +import json +import ldap from lib389.plugins import PassThroughAuthenticationPlugin from lib389.cli_conf import add_generic_plugin_parsers +def pta_list(inst, basedn, log, args): + log = log.getChild('pta_list') + plugin = PassThroughAuthenticationPlugin(inst) + result = [] + urls = plugin.get_urls() + if args.json: + print(json.dumps({"type": "list", "items": urls})) + else: + if len(urls) > 0: + for i in result: + print(i) + else: + print("No Pass Through Auth attributes were found") + + +def pta_add(inst, basedn, log, args): + log = log.getChild('pta_add') + plugin = PassThroughAuthenticationPlugin(inst) + urls = list(map(lambda url: url.lower(), plugin.get_urls())) + if args.URL.lower() in urls: + raise ldap.ALREADY_EXISTS("Entry %s already exists" % args.URL) + plugin.add("nsslapd-pluginarg%s" % len(urls), args.URL) + + +def pta_edit(inst, basedn, log, args): + log = log.getChild('pta_edit') + plugin = PassThroughAuthenticationPlugin(inst) + urls = list(map(lambda url: url.lower(), plugin.get_urls())) + old_url_l = args.OLD_URL.lower() + if old_url_l not in urls: + log.info("Entry %s doesn't exists. Adding a new value." % args.OLD_URL) + url_num = len(urls) + else: + url_num = urls.index(old_url_l) + plugin.remove("nsslapd-pluginarg%s" % url_num, old_url_l) + plugin.add("nsslapd-pluginarg%s" % url_num, args.NEW_URL) + + +def pta_del(inst, basedn, log, args): + log = log.getChild('pta_del') + plugin = PassThroughAuthenticationPlugin(inst) + urls = list(map(lambda url: url.lower(), plugin.get_urls())) + old_url_l = args.URL.lower() + if old_url_l not in urls: + raise ldap.NO_SUCH_OBJECT("Entry %s doesn't exists" % args.URL) + + plugin.remove_all("nsslapd-pluginarg%s" % urls.index(old_url_l)) + log.info("Successfully deleted %s", args.URL) + + def create_parser(subparsers): - passthroughauth_parser = subparsers.add_parser('passthroughauth', help='Manage and configure Pass-Through Authentication plugin') + passthroughauth_parser = subparsers.add_parser('pass-through-auth', help='Manage and configure Pass-Through Authentication plugin') subcommands = passthroughauth_parser.add_subparsers(help='action') add_generic_plugin_parsers(subcommands, PassThroughAuthenticationPlugin) + + list = subcommands.add_parser('list', help='List available plugin configs') + list.set_defaults(func=pta_list) + + add = subcommands.add_parser('add', help='Add the config entry') + add.add_argument('URL', help='The full LDAP URL in format ' + '"ldap|ldaps://authDS/subtree maxconns,maxops,timeout,ldver,connlifetime,startTLS". ' + 'If one optional parameter is specified the rest should be specified too') + add.set_defaults(func=pta_add) + + edit = subcommands.add_parser('modify', help='Edit the config entry') + edit.add_argument('OLD_URL', help='The full LDAP URL you get from the "list" command') + edit.add_argument('NEW_URL', help='The full LDAP URL in format ' + '"ldap|ldaps://authDS/subtree maxconns,maxops,timeout,ldver,connlifetime,startTLS". ' + 'If one optional parameter is specified the rest should be specified too') + edit.set_defaults(func=pta_edit) + + delete = subcommands.add_parser('delete', help='Delete the config entry') + delete.add_argument('URL', help='The full LDAP URL you get from the "list" command') + delete.set_defaults(func=pta_del) diff --git a/src/lib389/lib389/cli_conf/plugins/referint.py b/src/lib389/lib389/cli_conf/plugins/referint.py index bf4d07c58..9482a14b4 100644 --- a/src/lib389/lib389/cli_conf/plugins/referint.py +++ b/src/lib389/lib389/cli_conf/plugins/referint.py @@ -1,197 +1,57 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- -import ldap - from lib389.plugins import ReferentialIntegrityPlugin -from lib389.cli_conf import add_generic_plugin_parsers - - -def manage_update_delay(inst, basedn, log, args): - plugin = ReferentialIntegrityPlugin(inst) - if args.value is None: - val = plugin.get_update_delay_formatted() - log.info(val) - else: - plugin.set_update_delay(args.value) - log.info('referint-update-delay set to "{}"'.format(args.value)) - -def display_membership_attr(inst, basedn, log, args): - plugin = ReferentialIntegrityPlugin(inst) - log.info(plugin.get_membership_attr_formatted()) - -def add_membership_attr(inst, basedn, log, args): - plugin = ReferentialIntegrityPlugin(inst) - try: - plugin.add_membership_attr(args.value) - except ldap.TYPE_OR_VALUE_EXISTS: - log.info('Value "{}" already exists.'.format(args.value)) - else: - log.info('successfully added membership attribute "{}"'.format(args.value)) - -def remove_membership_attr(inst, basedn, log, args): - plugin = ReferentialIntegrityPlugin(inst) - try: - plugin.remove_membership_attr(args.value) - except ldap.OPERATIONS_ERROR: - log.error("Error: Failed to delete. At least one value for membership attribute should exist.") - except ldap.NO_SUCH_ATTRIBUTE: - log.error('Error: Failed to delete. No value "{0}" found.'.format(args.value)) - else: - log.info('successfully removed membership attribute "{}"'.format(args.value)) - -def display_scope(inst, basedn, log, args): - plugin = ReferentialIntegrityPlugin(inst) - val = plugin.get_entryscope_formatted() - if not val: - log.info("nsslapd-pluginEntryScope is not set") - else: - log.info(val) - -def add_scope(inst, basedn, log, args): - plugin = ReferentialIntegrityPlugin(inst) - try: - plugin.add_entryscope(args.value) - except ldap.TYPE_OR_VALUE_EXISTS: - log.info('Value "{}" already exists.'.format(args.value)) - else: - log.info('successfully added nsslapd-pluginEntryScope value "{}"'.format(args.value)) - -def remove_scope(inst, basedn, log, args): - plugin = ReferentialIntegrityPlugin(inst) - try: - plugin.remove_entryscope(args.value) - except ldap.NO_SUCH_ATTRIBUTE: - log.error('Error: Failed to delete. No value "{0}" found.'.format(args.value)) - else: - log.info('successfully removed nsslapd-pluginEntryScope value "{}"'.format(args.value)) - -def remove_all_scope(inst, basedn, log, args): - plugin = ReferentialIntegrityPlugin(inst) - plugin.remove_all_entryscope() - log.info('successfully removed all nsslapd-pluginEntryScope values') +from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit -def display_excludescope(inst, basedn, log, args): - plugin = ReferentialIntegrityPlugin(inst) - val = plugin.get_excludescope_formatted() - if not val: - log.info("nsslapd-pluginExcludeEntryScope is not set") - else: - log.info(val) +arg_to_attr = { + 'update_delay': 'referint-update-delay', + 'membership_attr': 'referint-membership-attr', + 'entry_scope': 'nsslapd-pluginEntryScope', + 'exclude_entry_scope': 'nsslapd-pluginExcludeEntryScope', + 'container_scope': 'nsslapd-pluginContainerScope', +} -def add_excludescope(inst, basedn, log, args): - plugin = ReferentialIntegrityPlugin(inst) - try: - plugin.add_excludescope(args.value) - except ldap.TYPE_OR_VALUE_EXISTS: - log.info('Value "{}" already exists.'.format(args.value)) - else: - log.info('successfully added nsslapd-pluginExcludeEntryScope value "{}"'.format(args.value)) -def remove_excludescope(inst, basedn, log, args): +def referint_edit(inst, basedn, log, args): + log = log.getChild('referint_edit') plugin = ReferentialIntegrityPlugin(inst) - try: - plugin.remove_excludescope(args.value) - except ldap.NO_SUCH_ATTRIBUTE: - log.error('Error: Failed to delete. No value "{0}" found.'.format(args.value)) - else: - log.info('successfully removed nsslapd-pluginExcludeEntryScope value "{}"'.format(args.value)) + generic_object_edit(plugin, log, args, arg_to_attr) -def remove_all_excludescope(inst, basedn, log, args): - plugin = ReferentialIntegrityPlugin(inst) - plugin.remove_all_excludescope() - log.info('successfully removed all nsslapd-pluginExcludeEntryScope values') -def display_container_scope(inst, basedn, log, args): - plugin = ReferentialIntegrityPlugin(inst) - val = plugin.get_container_scope_formatted() - if not val: - log.info("nsslapd-pluginContainerScope is not set") - else: - log.info(val) - -def add_container_scope(inst, basedn, log, args): - plugin = ReferentialIntegrityPlugin(inst) - try: - plugin.add_container_scope(args.value) - except ldap.TYPE_OR_VALUE_EXISTS: - log.info('Value "{}" already exists.'.format(args.value)) - else: - log.info('successfully added nsslapd-pluginContainerScope value "{}"'.format(args.value)) - -def remove_container_scope(inst, basedn, log, args): - plugin = ReferentialIntegrityPlugin(inst) - try: - plugin.remove_container_scope(args.value) - except ldap.NO_SUCH_ATTRIBUTE: - log.error('Error: Failed to delete. No value "{0}" found.'.format(args.value)) - else: - log.info('successfully removed nsslapd-pluginContainerScope value "{}"'.format(args.value)) - -def remove_all_container_scope(inst, basedn, log, args): - plugin = ReferentialIntegrityPlugin(inst) - plugin.remove_all_container_scope() - log.info('successfully removed all nsslapd-pluginContainerScope values') +def _add_parser_args(parser): + parser.add_argument('--update-delay', + help='Sets the update interval. Special values: 0 - The check is performed immediately, ' + '-1 - No check is performed (referint-update-delay)') + parser.add_argument('--membership-attr', nargs='+', + help='Specifies attributes to check for and update (referint-membership-attr)') + parser.add_argument('--entry-scope', + help='Defines the subtree in which the plug-in looks for the delete ' + 'or rename operations of a user entry (nsslapd-pluginEntryScope)') + parser.add_argument('--exclude-entry-scope', + help='Defines the subtree in which the plug-in ignores any operations ' + 'for deleting or renaming a user (nsslapd-pluginExcludeEntryScope)') + parser.add_argument('--container_scope', + help='Specifies which branch the plug-in searches for the groups to which the user belongs. ' + 'It only updates groups that are under the specified container branch, ' + 'and leaves all other groups not updated (nsslapd-pluginContainerScope)') def create_parser(subparsers): - referint_parser = subparsers.add_parser('referint', help='Manage and configure Referential Integrity plugin') + referint = subparsers.add_parser('referential-integrity', + help='Manage and configure Referential Integrity Postoperation plugin') - subcommands = referint_parser.add_subparsers(help='action') + subcommands = referint.add_subparsers(help='action') add_generic_plugin_parsers(subcommands, ReferentialIntegrityPlugin) - delay_parser = subcommands.add_parser('delay', help='get or set update delay') - delay_parser.set_defaults(func=manage_update_delay) - delay_parser.add_argument('value', nargs='?', help='The value to set as update delay') - - attr_parser = subcommands.add_parser('attrs', help='get or manage membership attributes') - attr_parser.set_defaults(func=display_membership_attr) - attr_subcommands = attr_parser.add_subparsers(help='action') - add_attr_parser = attr_subcommands.add_parser('add', help='add membership attribute') - add_attr_parser.set_defaults(func=add_membership_attr) - add_attr_parser.add_argument('value', help='membership attribute to add') - del_attr_parser = attr_subcommands.add_parser('del', help='remove membership attribute') - del_attr_parser.set_defaults(func=remove_membership_attr) - del_attr_parser.add_argument('value', help='membership attribute to remove') - - scope_parser = subcommands.add_parser('scope', help='get or manage referint scope') - scope_parser.set_defaults(func=display_scope) - scope_subcommands = scope_parser.add_subparsers(help='action') - add_scope_parser = scope_subcommands.add_parser('add', help='add entry scope value') - add_scope_parser.set_defaults(func=add_scope) - add_scope_parser.add_argument('value', help='The value to add in referint entry scope') - del_scope_parser = scope_subcommands.add_parser('del', help='remove entry scope value') - del_scope_parser.set_defaults(func=remove_scope) - del_scope_parser.add_argument('value', help='The value to remove from entry scope') - delall_scope_parser = scope_subcommands.add_parser('delall', help='remove all entry scope values') - delall_scope_parser.set_defaults(func=remove_all_scope) + edit = subcommands.add_parser('set', help='Edit the plugin') + edit.set_defaults(func=referint_edit) + _add_parser_args(edit) - exclude_parser = subcommands.add_parser('exclude', help='get or manage referint exclude scope') - exclude_parser.set_defaults(func=display_excludescope) - exclude_subcommands = exclude_parser.add_subparsers(help='action') - add_exclude_parser = exclude_subcommands.add_parser('add', help='add exclude scope value') - add_exclude_parser.set_defaults(func=add_excludescope) - add_exclude_parser.add_argument('value', help='The value to add in exclude scope') - del_exclude_parser = exclude_subcommands.add_parser('del', help='remove exclude scope value') - del_exclude_parser.set_defaults(func=remove_excludescope) - del_exclude_parser.add_argument('value', help='The value to remove from exclude scope') - delall_exclude_parser = exclude_subcommands.add_parser('delall', help='remove all exclude scope values') - delall_exclude_parser.set_defaults(func=remove_all_excludescope) - container_parser = subcommands.add_parser('container', help='get or manage referint container scope') - container_parser.set_defaults(func=display_container_scope) - container_subcommands = container_parser.add_subparsers(help='action') - add_container_parser = container_subcommands.add_parser('add', help='add container scope value') - add_container_parser.set_defaults(func=add_container_scope) - add_container_parser.add_argument('value', help='The value to add in container scope') - del_container_parser = container_subcommands.add_parser('del', help='remove container scope value') - del_container_parser.set_defaults(func=remove_container_scope) - del_container_parser.add_argument('value', help='The value to remove from container scope') - delall_container_parser = container_subcommands.add_parser('delall', help='remove all container scope values') - delall_container_parser.set_defaults(func=remove_all_container_scope) diff --git a/src/lib389/lib389/cli_conf/plugins/retrochangelog.py b/src/lib389/lib389/cli_conf/plugins/retrochangelog.py index 133d8117f..912c12761 100644 --- a/src/lib389/lib389/cli_conf/plugins/retrochangelog.py +++ b/src/lib389/lib389/cli_conf/plugins/retrochangelog.py @@ -1,5 +1,5 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). @@ -7,10 +7,44 @@ # --- END COPYRIGHT BLOCK --- from lib389.plugins import RetroChangelogPlugin -from lib389.cli_conf import add_generic_plugin_parsers +from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit + +arg_to_attr = { + 'is-replicated': 'isReplicated', + 'attribute': 'nsslapd-attribute', + 'directory': 'nsslapd-changelogdir', + 'max-age': 'nsslapd-changelogmaxage', +} + + +def retrochangelog_edit(inst, basedn, log, args): + log = log.getChild('retrochangelog_edit') + plugin = RetroChangelogPlugin(inst) + generic_object_edit(plugin, log, args, arg_to_attr) + + +def _add_parser_args(parser): + parser.add_argument('--is-replicated', choices=['true', 'false'], + help='Sets a flag to indicate on a change in the changelog whether the change is newly made ' + 'on that server or whether it was replicated over from another server (isReplicated)') + parser.add_argument('--attribute', + help='Specifies another Directory Server attribute which must be included in ' + 'the retro changelog entries (nsslapd-attribute)') + parser.add_argument('--directory', + help='Specifies the name of the directory in which the changelog database ' + 'is created the first time the plug-in is run') + parser.add_argument('--max-age', + help='This attribute specifies the maximum age of any entry ' + 'in the changelog (nsslapd-changelogmaxage)') def create_parser(subparsers): - retrochangelog_parser = subparsers.add_parser('retrochangelog', help='Manage and configure Retro Changelog plugin') - subcommands = retrochangelog_parser.add_subparsers(help='action') + retrochangelog = subparsers.add_parser('retro-changelog', help='Manage and configure Retro Changelog plugin') + subcommands = retrochangelog.add_subparsers(help='action') add_generic_plugin_parsers(subcommands, RetroChangelogPlugin) + + edit = subcommands.add_parser('set', help='Edit the plugin') + edit.set_defaults(func=retrochangelog_edit) + _add_parser_args(edit) + + diff --git a/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py b/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py index 7e1c017e6..63838a98f 100644 --- a/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py +++ b/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py @@ -1,229 +1,68 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- -import ldap - from lib389.plugins import RootDNAccessControlPlugin -from lib389.cli_conf import add_generic_plugin_parsers - - -def display_time(inst, basedn, log, args): - plugin = RootDNAccessControlPlugin(inst) - val = plugin.get_open_time_formatted() - if not val: - log.info("rootdn-open-time is not set") - else: - log.info(val) - val = plugin.get_close_time_formatted() - if not val: - log.info("rootdn-close-time is not set") - else: - log.info(val) - -def set_open_time(inst, basedn, log, args): - plugin = RootDNAccessControlPlugin(inst) - plugin.set_open_time(args.value) - log.info('rootdn-open-time set to "{}"'.format(args.value)) - -def set_close_time(inst, basedn, log, args): - plugin = RootDNAccessControlPlugin(inst) - plugin.set_close_time(args.value) - log.info('rootdn-close-time set to "{}"'.format(args.value)) - -def clear_time(inst, basedn, log, args): - plugin = RootDNAccessControlPlugin(inst) - plugin.remove_open_time() - plugin.remove_close_time() - log.info('time-based policy was cleared') - -def display_ips(inst, basedn, log, args): - plugin = RootDNAccessControlPlugin(inst) - allowed_ips = plugin.get_allow_ip_formatted() - denied_ips = plugin.get_deny_ip_formatted() - if not allowed_ips and not denied_ips: - log.info("No ip-based access control policy has been configured") - else: - log.info(allowed_ips) - log.info(denied_ips) - -def allow_ip(inst, basedn, log, args): - plugin = RootDNAccessControlPlugin(inst) - - # remove ip from denied ips - try: - plugin.remove_deny_ip(args.value) - except ldap.NO_SUCH_ATTRIBUTE: - pass - - try: - plugin.add_allow_ip(args.value) - except ldap.TYPE_OR_VALUE_EXISTS: - pass - log.info('{} added to rootdn-allow-ip'.format(args.value)) - -def deny_ip(inst, basedn, log, args): - plugin = RootDNAccessControlPlugin(inst) - - # remove ip from allowed ips - try: - plugin.remove_allow_ip(args.value) - except ldap.NO_SUCH_ATTRIBUTE: - pass - - try: - plugin.add_deny_ip(args.value) - except ldap.TYPE_OR_VALUE_EXISTS: - pass - log.info('{} added to rootdn-deny-ip'.format(args.value)) - -def clear_all_ips(inst, basedn, log, args): - plugin = RootDNAccessControlPlugin(inst) - plugin.remove_all_allow_ip() - plugin.remove_all_deny_ip() - log.info('ip-based policy was cleared') - -def display_hosts(inst, basedn, log, args): - plugin = RootDNAccessControlPlugin(inst) - allowed_hosts = plugin.get_allow_host_formatted() - denied_hosts = plugin.get_deny_host_formatted() - if not allowed_hosts and not denied_hosts: - log.info("No host-based access control policy has been configured") - else: - log.info(allowed_hosts) - log.info(denied_hosts) - -def allow_host(inst, basedn, log, args): - plugin = RootDNAccessControlPlugin(inst) - - # remove host from denied hosts - try: - plugin.remove_deny_host(args.value) - except ldap.NO_SUCH_ATTRIBUTE: - pass - - try: - plugin.add_allow_host(args.value) - except ldap.TYPE_OR_VALUE_EXISTS: - pass - log.info('{} added to rootdn-allow-host'.format(args.value)) - -def deny_host(inst, basedn, log, args): - plugin = RootDNAccessControlPlugin(inst) - - # remove host from allowed hosts - try: - plugin.remove_allow_host(args.value) - except ldap.NO_SUCH_ATTRIBUTE: - pass - - try: - plugin.add_deny_host(args.value) - except ldap.TYPE_OR_VALUE_EXISTS: - pass - log.info('{} added to rootdn-deny-host'.format(args.value)) - -def clear_all_hosts(inst, basedn, log, args): - plugin = RootDNAccessControlPlugin(inst) - plugin.remove_all_allow_host() - plugin.remove_all_deny_host() - log.info('host-based policy was cleared') - -def display_days(inst, basedn, log, args): - plugin = RootDNAccessControlPlugin(inst) - days = plugin.get_days_allowed_formatted() - if not days: - log.info("No day-based access control policy has been configured") - else: - log.info(days) - -def allow_day(inst, basedn, log, args): - plugin = RootDNAccessControlPlugin(inst) - args.value = args.value[0:3] - plugin.add_allow_day(args.value) - log.info('{} added to rootdn-days-allowed'.format(args.value)) - -def deny_day(inst, basedn, log, args): - plugin = RootDNAccessControlPlugin(inst) - args.value = args.value[0:3] - plugin.remove_allow_day(args.value) - log.info('{} removed from rootdn-days-allowed'.format(args.value)) - -def clear_all_days(inst, basedn, log, args): - plugin = RootDNAccessControlPlugin(inst) - plugin.remove_days_allowed() - log.info('day-based policy was cleared') +from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit + +arg_to_attr = { + 'allow_host': 'rootdn-allow-host', + 'deny_host': 'rootdn-deny-host', + 'allow_ip': 'rootdn-allow-ip', + 'deny_ip': 'rootdn-deny-ip', + 'open_time': 'rootdn-open-time', + 'close_time': 'rootdn-close-time', + 'days_allowed': 'rootdn-days-allowed' +} + + +def rootdn_edit(inst, basedn, log, args): + log = log.getChild('rootdn_edit') + plugin = RootDNAccessControlPlugin(inst) + generic_object_edit(plugin, log, args, arg_to_attr) + + +def _add_parser_args(parser): + parser.add_argument('--allow-host', + help='Sets what hosts, by fully-qualified domain name, the root user is allowed to use ' + 'to access the Directory Server. Any hosts not listed are implicitly denied ' + '(rootdn-allow-host)') + parser.add_argument('--deny-host', + help='Sets what hosts, by fully-qualified domain name, the root user is not allowed to use ' + 'to access the Directory Server Any hosts not listed are implicitly allowed ' + '(rootdn-deny-host). If an host address is listed in both the rootdn-allow-host and ' + 'rootdn-deny-host attributes, it is denied access.') + parser.add_argument('--allow-ip', + help='Sets what IP addresses, either IPv4 or IPv6, for machines the root user is allowed ' + 'to use to access the Directory Server Any IP addresses not listed are implicitly ' + 'denied (rootdn-allow-ip)') + parser.add_argument('--deny-ip', + help='Sets what IP addresses, either IPv4 or IPv6, for machines the root user is not allowed ' + 'to use to access the Directory Server. Any IP addresses not listed are implicitly ' + 'allowed (rootdn-deny-ip) If an IP address is listed in both the rootdn-allow-ip and ' + 'rootdn-deny-ip attributes, it is denied access.') + parser.add_argument('--open-time', + help='Sets part of a time period or range when the root user is allowed to access ' + 'the Directory Server. This sets when the time-based access begins (rootdn-open-time)') + parser.add_argument('--close-time', + help='Sets part of a time period or range when the root user is allowed to access ' + 'the Directory Server. This sets when the time-based access ends (rootdn-close-time)') + parser.add_argument('--days-allowed', + help='Gives a comma-separated list of what days the root user is allowed to use to access ' + 'the Directory Server. Any days listed are implicitly denied (rootdn-days-allowed)') def create_parser(subparsers): - rootdnac_parser = subparsers.add_parser('rootdn', help='Manage and configure RootDN Access Control plugin') + rootdnac_parser = subparsers.add_parser('root-dn', help='Manage and configure RootDN Access Control plugin') subcommands = rootdnac_parser.add_subparsers(help='action') add_generic_plugin_parsers(subcommands, RootDNAccessControlPlugin) - time_parser = subcommands.add_parser('time', help='get or set rootdn open and close times') - time_parser.set_defaults(func=display_time) - - time_subcommands = time_parser.add_subparsers(help='action') - - open_time_parser = time_subcommands.add_parser('open', help='set open time value') - open_time_parser.set_defaults(func=set_open_time) - open_time_parser.add_argument('value', help='Value to set as open time') - - close_time_parser = time_subcommands.add_parser('close', help='set close time value') - close_time_parser.set_defaults(func=set_close_time) - close_time_parser.add_argument('value', help='Value to set as close time') - - time_clear_parser = time_subcommands.add_parser('clear', help='reset time-based access policy') - time_clear_parser.set_defaults(func=clear_time) - - ip_parser = subcommands.add_parser('ip', help='get or set ip access policy') - ip_parser.set_defaults(func=display_ips) - - ip_subcommands = ip_parser.add_subparsers(help='action') - - ip_allow_parser = ip_subcommands.add_parser('allow', help='allow IP addr or IP addr range') - ip_allow_parser.set_defaults(func=allow_ip) - ip_allow_parser.add_argument('value', help='IP addr or IP addr range') - - ip_deny_parser = ip_subcommands.add_parser('deny', help='deny IP addr or IP addr range') - ip_deny_parser.set_defaults(func=deny_ip) - ip_deny_parser.add_argument('value', help='IP addr or IP addr range') - - ip_clear_parser = ip_subcommands.add_parser('clear', help='reset IP-based access policy') - ip_clear_parser.set_defaults(func=clear_all_ips) - - host_parser = subcommands.add_parser('host', help='get or set host access policy') - host_parser.set_defaults(func=display_hosts) - - host_subcommands = host_parser.add_subparsers(help='action') - - host_allow_parser = host_subcommands.add_parser('allow', help='allow host address') - host_allow_parser.set_defaults(func=allow_host) - host_allow_parser.add_argument('value', help='host address') - - host_deny_parser = host_subcommands.add_parser('deny', help='deny host address') - host_deny_parser.set_defaults(func=deny_host) - host_deny_parser.add_argument('value', help='host address') - - host_clear_parser = host_subcommands.add_parser('clear', help='reset host-based access policy') - host_clear_parser.set_defaults(func=clear_all_hosts) - - day_parser = subcommands.add_parser('day', help='get or set days access policy') - day_parser.set_defaults(func=display_days) - - day_subcommands = day_parser.add_subparsers(help='action') - - day_allow_parser = day_subcommands.add_parser('allow', help='allow day of the week') - day_allow_parser.set_defaults(func=allow_day) - day_allow_parser.add_argument('value', type=str.capitalize, help='day of the week') + edit = subcommands.add_parser('set', help='Edit the plugin') + edit.set_defaults(func=rootdn_edit) + _add_parser_args(edit) - day_deny_parser = day_subcommands.add_parser('deny', help='deny day of the week') - day_deny_parser.set_defaults(func=deny_day) - day_deny_parser.add_argument('value', type=str.capitalize, help='day of the week') - day_clear_parser = day_subcommands.add_parser('clear', help='reset day-based access policy') - day_clear_parser.set_defaults(func=clear_all_days) diff --git a/src/lib389/lib389/cli_conf/plugins/usn.py b/src/lib389/lib389/cli_conf/plugins/usn.py index 59349fe16..634ca7fe0 100644 --- a/src/lib389/lib389/cli_conf/plugins/usn.py +++ b/src/lib389/lib389/cli_conf/plugins/usn.py @@ -1,5 +1,5 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). @@ -17,40 +17,55 @@ def display_usn_mode(inst, basedn, log, args): else: log.info("USN global mode is disabled") + def enable_global_mode(inst, basedn, log, args): plugin = USNPlugin(inst) plugin.enable_global_mode() log.info("USN global mode enabled") + def disable_global_mode(inst, basedn, log, args): plugin = USNPlugin(inst) plugin.disable_global_mode() log.info("USN global mode disabled") + def tombstone_cleanup(inst, basedn, log, args): plugin = USNPlugin(inst) - log.info('Attempting to add task entry... This will fail if replication is enabled or if USN plug-in is disabled.') + log.info('Attempting to add task entry...') + if not plugin.status(): + log.error("'%s' is disabled. Fix up task can't be executed" % plugin.rdn) task = plugin.cleanup(args.suffix, args.backend, args.maxusn) - log.info('Successfully added task entry ' + task.dn) + task.wait() + exitcode = task.get_exit_code() + if exitcode != 0: + log.error('USM tombstone cleanup task has failed. Please, check logs') + else: + log.info('Successfully added task entry') + def create_parser(subparsers): usn_parser = subparsers.add_parser('usn', help='Manage and configure USN plugin') - subcommands = usn_parser.add_subparsers(help='action') - add_generic_plugin_parsers(subcommands, USNPlugin) - global_mode_parser = subcommands.add_parser('global', help='get or manage global usn mode') + global_mode_parser = subcommands.add_parser('global', help='Get or manage global usn mode (nsslapd-entryusn-global)') global_mode_parser.set_defaults(func=display_usn_mode) global_mode_subcommands = global_mode_parser.add_subparsers(help='action') - on_global_mode_parser = global_mode_subcommands.add_parser('on', help='enable usn global mode') + on_global_mode_parser = global_mode_subcommands.add_parser('on', help='Enable usn global mode') on_global_mode_parser.set_defaults(func=enable_global_mode) - off_global_mode_parser = global_mode_subcommands.add_parser('off', help='disable usn global mode') + off_global_mode_parser = global_mode_subcommands.add_parser('off', help='Disable usn global mode') off_global_mode_parser.set_defaults(func=disable_global_mode) - cleanup_parser = subcommands.add_parser('cleanup', help='run the USN tombstone cleanup task') + cleanup_parser = subcommands.add_parser('cleanup', help='Run the USN tombstone cleanup task') cleanup_parser.set_defaults(func=tombstone_cleanup) cleanup_group = cleanup_parser.add_mutually_exclusive_group(required=True) - cleanup_group.add_argument('-s', '--suffix', help="suffix where USN tombstone entries are cleaned up") - cleanup_group.add_argument('-n', '--backend', help="backend instance in which USN tombstone entries are cleaned up (alternative to suffix)") - cleanup_parser.add_argument('-m', '--maxusn', type=int, help="USN tombstone entries are deleted up to the entry with maxusn") + cleanup_group.add_argument('-s', '--suffix', + help='Gives the suffix or subtree in the Directory Server to run the cleanup operation ' + 'against. If the suffix is not specified, then the back end must be given (suffix)') + cleanup_group.add_argument('-n', '--backend', + help='Gives the Directory Server instance back end, or database, to run the cleanup ' + 'operation against. If the back end is not specified, then the suffix must be ' + 'specified.Backend instance in which USN tombstone entries (backend)') + cleanup_parser.add_argument('-m', '--maxusn', type=int, help='Gives the highest USN value to delete when ' + 'removing tombstone entries (max_usn_to_delete)') diff --git a/src/lib389/lib389/cli_conf/plugins/whoami.py b/src/lib389/lib389/cli_conf/plugins/whoami.py deleted file mode 100644 index 2c3e62a81..000000000 --- a/src/lib389/lib389/cli_conf/plugins/whoami.py +++ /dev/null @@ -1,16 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2018 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- - -from lib389.plugins import WhoamiPlugin -from lib389.cli_conf import add_generic_plugin_parsers - - -def create_parser(subparsers): - whoami_parser = subparsers.add_parser('whoami', help='Manage and configure whoami plugin') - subcommands = whoami_parser.add_subparsers(help='action') - add_generic_plugin_parsers(subcommands, WhoamiPlugin) diff --git a/src/lib389/lib389/cli_conf/pwpolicy.py b/src/lib389/lib389/cli_conf/pwpolicy.py index 1413f3049..b972aeb63 100644 --- a/src/lib389/lib389/cli_conf/pwpolicy.py +++ b/src/lib389/lib389/cli_conf/pwpolicy.py @@ -199,8 +199,8 @@ def create_parser(subparsers): set_parser.add_argument('--pwdlockout', help="Set to \"on\" to enable account lockout") set_parser.add_argument('--pwdunlock', help="Set to \"on\" to allow an account to become unlocked after the lockout duration") set_parser.add_argument('--pwdlockoutduration', help="The number of seconds an account stays locked out") - set_parser.add_argument('--pwdmaxfailures', help="The maximum number of allowed failed password attempts beforet the acocunt gets locked") - set_parser.add_argument('--pwdresetfailcount', help="The number of secondsto wait before reducingthe failed login count on an account") + set_parser.add_argument('--pwdmaxfailures', help="The maximum number of allowed failed password attempts before the account gets locked") + set_parser.add_argument('--pwdresetfailcount', help="The number of seconds to wait before reducing the failed login count on an account") # Syntax settings set_parser.add_argument('--pwdchecksyntax', help="Set to \"on\" to Enable password syntax checking") set_parser.add_argument('--pwdminlen', help="The minimum number of characters required in a password") diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py index c8b024be3..18a24826c 100644 --- a/src/lib389/lib389/plugins.py +++ b/src/lib389/lib389/plugins.py @@ -6,6 +6,7 @@ # See LICENSE for details. # --- END COPYRIGHT BLOCK --- +import collections import ldap import copy import os.path @@ -13,6 +14,7 @@ import os.path from lib389 import tasks from lib389._mapped_object import DSLdapObjects, DSLdapObject from lib389.lint import DSRILE0001 +from lib389.utils import ensure_str, ensure_list_bytes from lib389._constants import DN_PLUGIN from lib389.properties import ( PLUGINS_OBJECTCLASS_VALUE, PLUGIN_PROPNAME_TO_ATTRNAME, @@ -158,6 +160,72 @@ class AttributeUniquenessPlugin(Plugin): self.set('uniqueness-across-all-subtrees', 'off') +class AttributeUniquenessPlugins(DSLdapObjects): + """A DSLdapObjects entity which represents Attribute Uniqueness plugin instances + + :param instance: An instance + :type instance: lib389.DirSrv + :param basedn: Base DN for all account entries below + :type basedn: str + """ + + def __init__(self, instance, basedn="cn=plugins,cn=config"): + super(Plugins, self).__init__(instance=instance) + self._objectclasses = ['top', 'nsslapdplugin'] + self._filterattrs = ['cn', 'nsslapd-pluginPath'] + self._childobject = AttributeUniquenessPlugin + self._basedn = basedn + # This is used to allow entry to instance to work + self._list_attrlist = ['dn', 'nsslapd-pluginPath'] + self._search_filter = "(nsslapd-pluginId=NSUniqueAttr)" + + def list(self): + """Get a list of all plugin instances where nsslapd-pluginId: NSUniqueAttr + + :returns: A list of children entries + """ + + try: + results = self._instance.search_ext_s( + base=self._basedn, + scope=ldap.SCOPE_ONELEVEL, + filterstr=self._search_filter, + attrlist=self._list_attrlist, + serverctrls=self._server_controls, clientctrls=self._client_controls + ) + insts = [self._entry_to_instance(dn=r.dn, entry=r) for r in results] + except ldap.NO_SUCH_OBJECT: + # There are no objects to select from, se we return an empty array + insts = [] + return insts + + def _get_dn(self, dn): + # This will yield and & filter for objectClass with as many terms as needed. + self._log.debug('_gen_dn filter = %s' % self._search_filter) + self._log.debug('_gen_dn dn = %s' % dn) + return self._instance.search_ext_s( + base=dn, + scope=ldap.SCOPE_BASE, + filterstr=self._search_filter, + attrlist=self._list_attrlist, + serverctrls=self._server_controls, clientctrls=self._client_controls + ) + + def _get_selector(self, selector): + # Filter based on the objectclasses and the basedn + # Based on the selector, we should filter on that too. + # This will yield and & filter for objectClass with as many terms as needed. + filterstr = "&(cn=%s)%s" % (selector, self._search_filter) + self._log.debug('_gen_selector filter = %s' % filterstr) + return self._instance.search_ext_s( + base=self._basedn, + scope=self._scope, + filterstr=filterstr, + attrlist=self._list_attrlist, + serverctrls=self._server_controls, clientctrls=self._client_controls + ) + + class LdapSSOTokenPlugin(Plugin): """An instance of ldapssotoken plugin entry @@ -222,11 +290,14 @@ class MEPConfigs(DSLdapObjects): :type basedn: str """ - def __init__(self, instance, basedn="cn=managed entries,cn=plugins,cn=config"): + def __init__(self, instance, basedn=None): super(MEPConfigs, self).__init__(instance) self._objectclasses = ['top', 'extensibleObject'] self._filterattrs = ['cn'] self._childobject = MEPConfig + # So we can set the configArea easily + if basedn is None: + basedn = "cn=managed entries,cn=plugins,cn=config" self._basedn = basedn @@ -243,7 +314,7 @@ class MEPTemplate(DSLdapObject): super(MEPTemplate, self).__init__(instance, dn) self._rdn_attribute = 'cn' self._must_attributes = ['cn'] - self._create_objectclasses = ['top', 'extensibleObject', 'mepTemplateEntry'] + self._create_objectclasses = ['top', 'mepTemplateEntry'] self._protected = False @@ -258,7 +329,7 @@ class MEPTemplates(DSLdapObjects): def __init__(self, instance, basedn): super(MEPTemplates, self).__init__(instance) - self._objectclasses = ['top', 'extensibleObject'] + self._objectclasses = ['top', 'mepTemplateEntry'] self._filterattrs = ['cn'] self._childobject = MEPTemplate self._basedn = basedn @@ -774,6 +845,23 @@ class MemberOfSharedConfig(DSLdapObject): self._exit_code = None +class MemberOfSharedConfigs(DSLdapObjects): + """A DSLdapObjects entity which represents MemberOf config entry + + :param instance: An instance + :type instance: lib389.DirSrv + :param basedn: Base DN for all account entries below + :type basedn: str + """ + + def __init__(self, instance, basedn=None): + super(MemberOfSharedConfigs, self).__init__(instance) + self._objectclasses = ['top', 'extensibleObject'] + self._filterattrs = ['cn'] + self._childobject = MemberOfSharedConfig + self._basedn = basedn + + class RetroChangelogPlugin(Plugin): """An instance of Retro Changelog plugin entry @@ -1163,6 +1251,19 @@ class PassThroughAuthenticationPlugin(Plugin): def __init__(self, instance, dn="cn=Pass Through Authentication,cn=plugins,cn=config"): super(PassThroughAuthenticationPlugin, self).__init__(instance, dn) + def get_urls(self): + """Get all URLs from nsslapd-pluginargNUM attributes + + :returns: a list + """ + + attr_dict = collections.OrderedDict(sorted(self.get_all_attrs().items())) + result = [] + for attr, value in attr_dict.items(): + if attr.startswith("nsslapd-pluginarg"): + result.append(ensure_str(value[0])) + return result + class USNPlugin(Plugin): """A single instance of USN (Update Sequence Number) plugin entry @@ -1629,6 +1730,107 @@ class DNAPluginConfigs(DSLdapObjects): self._basedn = basedn +class DNAPluginSharedConfig(DSLdapObject): + """A single instance of DNA Plugin config entry + + :param instance: An instance + :type instance: lib389.DirSrv + :param dn: Entry DN + :type dn: str + """ + + def __init__(self, instance, dn=None): + super(DNAPluginSharedConfig, self).__init__(instance, dn) + self._rdn_attribute = 'dnaHostname' + self._must_attributes = ['dnaHostname', 'dnaPortNum'] + self._create_objectclasses = ['top', 'dnaSharedConfig'] + self._protected = False + + def create(self, properties=None, basedn=None, ensure=False): + """The shared config DNA plugin entry has two RDN values + The function takes care about that special case + """ + + for attr in self._must_attributes: + if properties.get(attr, None) is None: + raise ldap.UNWILLING_TO_PERFORM('Attribute %s must not be None' % attr) + + assert basedn is not None, "Base DN should be specified" + + # Make a DN with the two items RDN and base DN + decomposed_dn = [[('dnaHostname', properties['dnaHostname'], 1), + ('dnaPortNum', properties['dnaPortNum'], 1)]] + ldap.dn.str2dn(basedn) + dn = ldap.dn.dn2str(decomposed_dn) + + exists = False + if ensure: + # If we are running in stateful ensure mode, we need to check if the object exists, and + # we can see the state that it is in. + try: + self._instance.search_ext_s(dn, ldap.SCOPE_BASE, self._object_filter, attrsonly=1, + serverctrls=self._server_controls, clientctrls=self._client_controls, + escapehatch='i am sure') + exists = True + except ldap.NO_SUCH_OBJECT: + pass + + if exists and ensure: + # update properties + self._log.debug('Exists %s' % dn) + self._dn = dn + # Now use replace_many to setup our values + mods = [] + for k, v in list(properties.items()): + mods.append((ldap.MOD_REPLACE, k, v)) + self._instance.modify_ext_s(self._dn, mods, serverctrls=self._server_controls, + clientctrls=self._client_controls, escapehatch='i am sure') + else: + self._log.debug('Creating %s' % dn) + mods = [('objectclass', ensure_list_bytes(self._create_objectclasses))] + # Bring our mods to one type and do ensure bytes on the list + for attr, value in properties.items(): + if not isinstance(value, list): + value = [value] + mods.append((attr, ensure_list_bytes(value))) + # We rely on exceptions here to indicate failure to the parent. + self._log.debug('Creating entry %s : %s' % (dn, mods)) + self._instance.add_ext_s(dn, mods, serverctrls=self._server_controls, clientctrls=self._client_controls, + escapehatch='i am sure') + # If it worked, we need to fix our instance dn + self._dn = dn + + return self + + +class DNAPluginSharedConfigs(DSLdapObjects): + """A DSLdapObjects entity which represents DNA Plugin config entry + + :param instance: An instance + :type instance: lib389.DirSrv + :param basedn: Base DN for all account entries below + :type basedn: str + """ + + def __init__(self, instance, basedn=None): + super(DNAPluginSharedConfigs, self).__init__(instance) + self._objectclasses = ['top', 'dnaSharedConfig'] + self._filterattrs = ['dnaHostname', 'dnaPortNum'] + self._childobject = DNAPluginSharedConfig + self._basedn = basedn + + def create(self, properties=None): + """Create an object under base DN of our entry + + :param properties: Attributes for the new entry + :type properties: dict + + :returns: DSLdapObject of the created entry + """ + + co = self._entry_to_instance(dn=None, entry=None) + return co.create(properties, self._basedn) + + class Plugins(DSLdapObjects): """A DSLdapObjects entity which represents plugin entry
0
7135e21ae00e475cac0a11e972bccf55d90989a2
389ds/389-ds-base
Issue 1081 - CI - Add more tests for overwriting x-origin issue (#5815) Description: Add a test suite that tests an attributetype and its x-origin values in a replicated environment s1c1 and s1h1c1. Also, ensure the custom x-origin is correctly overwritten in the replication event as 'user defined'. Related: https://github.com/389ds/389-ds-base/issues/1081 Reviewed by: @progier389 (Thanks!)
commit 7135e21ae00e475cac0a11e972bccf55d90989a2 Author: Simon Pichugin <[email protected]> Date: Thu Sep 7 10:48:17 2023 -0700 Issue 1081 - CI - Add more tests for overwriting x-origin issue (#5815) Description: Add a test suite that tests an attributetype and its x-origin values in a replicated environment s1c1 and s1h1c1. Also, ensure the custom x-origin is correctly overwritten in the replication event as 'user defined'. Related: https://github.com/389ds/389-ds-base/issues/1081 Reviewed by: @progier389 (Thanks!) diff --git a/dirsrvtests/tests/suites/schema/schema_replication_origin_test.py b/dirsrvtests/tests/suites/schema/schema_replication_origin_test.py new file mode 100644 index 000000000..9e4ce498c --- /dev/null +++ b/dirsrvtests/tests/suites/schema/schema_replication_origin_test.py @@ -0,0 +1,235 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import json +import logging +import re +import time +import ldap +import pytest +from lib389._constants import SUFFIX, ReplicaRole, DEFAULT_SUFFIX +from lib389.topologies import create_topology +from lib389.replica import Agreements, ReplicationManager +from lib389.schema import Schema +from lib389.idm.user import UserAccounts +from lib389.cli_base import LogCapture +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +def pattern_errorlog(file, log_pattern): + """Check for a pattern in the error log file.""" + + try: + pattern_errorlog.last_pos += 1 + except AttributeError: + pattern_errorlog.last_pos = 0 + + found = None + log.debug("_pattern_errorlog: start at offset %d" % pattern_errorlog.last_pos) + file.seek(pattern_errorlog.last_pos) + + # Use a while true iteration because 'for line in file: hit a + # python bug that break file.tell() + while True: + line = file.readline() + log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) + found = log_pattern.search(line) + if ((line == '') or (found)): + break + + log.debug("_pattern_errorlog: end at offset %d" % file.tell()) + pattern_errorlog.last_pos = file.tell() + return found + + +def trigger_update(topology, user_rdn, num): + """It triggers an update on the supplier. This will start a replication + session and a schema push + """ + + users_s = UserAccounts(topology.ms["supplier1"], DEFAULT_SUFFIX) + user = users_s.get(user_rdn) + user.replace('telephonenumber', str(num)) + + # wait until the update is replicated (until up to x seconds) + users_c = UserAccounts(topology.cs["consumer1"], DEFAULT_SUFFIX) + for _ in range(30): + try: + user = users_c.get(user_rdn) + val = user.get_attr_val_int('telephonenumber') + if val == num: + return + # the expected value is not yet replicated. try again + time.sleep(1) + log.debug(f"trigger_update: receive {val} (expected {num})") + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + + +def trigger_schema_push(topology, user_rdn, num): + """Triggers a schema push from the supplier to the consumer or hub.""" + + supplier = topology['topology'].ms["supplier1"] + if topology['type'] == "m1h1c1": + consumer = topology['topology'].hs["hub1"] + else: + consumer = topology['topology'].cs["consumer1"] + + agreements = supplier.agreement.list(suffix=SUFFIX, + consumer_host=consumer.host, + consumer_port=consumer.port) + assert (len(agreements) == 1) + ra = agreements[0] + trigger_update(topology['topology'], user_rdn, num) + supplier.agreement.pause(ra.dn) + supplier.agreement.resume(ra.dn) + trigger_update(topology['topology'], user_rdn, num) + + +def add_attributetype(inst, num, at_name, x_origin): + """Adds a new attribute type to the schema.""" + + schema = Schema(inst) + # Add new attribute + parameters = { + 'names': [at_name], + 'oid': str(9000 + num), + 'desc': 'Test extra parenthesis in X-ORIGIN', + # 'x_origin': [x_origin], + 'x_origin': None, + 'syntax': '1.3.6.1.4.1.1466.115.121.1.15', + 'syntax_len': None, + 'x_ordered': None, + 'collective': None, + 'obsolete': None, + 'single_value': None, + 'no_user_mod': None, + 'equality': None, + 'substr': None, + 'ordering': None, + 'usage': None, + 'sup': None + } + schema.add_attributetype(parameters) + + [email protected](scope="function", params=["m1c1", "m1h1c1"]) +def topology(request): + """Create Replication Deployment based on the params""" + + if request.param == "m1c1": + topo_roles = {ReplicaRole.SUPPLIER: 1, ReplicaRole.CONSUMER: 1} + elif request.param == "m1h1c1": + topo_roles = {ReplicaRole.SUPPLIER: 1, ReplicaRole.HUB: 1, ReplicaRole.CONSUMER: 1} + + topology = create_topology(topo_roles, request=request) + + topology.logcap = LogCapture() + return { + 'topology': topology, + 'type': request.param + } + + [email protected](scope="function") +def schema_replication_init(topology): + """Initialize the test environment """ + + supplier = topology['topology'].ms["supplier1"] + supplier.errorlog_file = open(supplier.errlog, "r") + users = UserAccounts(supplier, DEFAULT_SUFFIX) + user = users.create_test_user() + user.replace('telephonenumber', '0') + + return user + + [email protected]("xorigin", ['user defined', 'custom xorigin']) +def test_schema_xorigin_repl(topology, schema_replication_init, xorigin): + """Check consumer schema is a superset (one extra OC) of supplier schema, then + schema is pushed and there is a message in the error log + + :id: 2b29823b-3e83-4b25-954a-8a081dbc15ee + :setup: Supplier and consumer topology, with one user entry; + Supplier, hub and consumer topology, with one user entry + :steps: + 1. Push the schema from the supplier to the consumer (an error should not be generated) + 2. Update the schema of the consumer, so it will be a superset of the supplier's schema + 3. Update the schema of the supplier to make its nsSchemaCSN larger than the consumer's + 4. Push the schema from the supplier to the consumer (an error should be generated) + 5. Check if the supplier learns the missing definition + 6. Check the error logs for any issues + 7. Check the startup and final state of the schema replication process + :expectedresults: + 1. The supplier's schema update should be successful + 2. The consumer's schema update should be successful + 3. The supplier's schema update should be successful + 4. The schema push operation should be successful + 5. The supplier should successfully learn the missing definition + 6. There should be no error messages in the logs + 7. The startup and final state of the schema replication process should be as expected + """ + + repl = ReplicationManager(DEFAULT_SUFFIX) + user = schema_replication_init + hub = None + supplier = topology['topology'].ms["supplier1"] + consumer = topology['topology'].cs["consumer1"] + if topology['type'] == "m1h1c1": + hub = topology['topology'].hs["hub1"] + + add_attributetype(supplier, 1, 'testAttribute', xorigin) + + # Search for attribute with JSON option + schema = Schema(supplier) + attr_result = schema.query_attributetype('testAttribute', json=True) + # Verify the x-origin value is correct + assert attr_result['at']['x_origin'][0] == "user defined" + + trigger_schema_push(topology, user.rdn, 1) + repl.wait_for_replication(supplier, consumer) + supplier_schema_csn = supplier.schema.get_schema_csn() + consumer_schema_csn = consumer.schema.get_schema_csn() + assert supplier_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(supplier.errorlog_file, regex) + if res is not None: + assert False + + # add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) + add_attributetype(consumer, 2, 'testAttributeCA', xorigin) + time.sleep(2) + add_attributetype(supplier, 3, 'testAttributeSA', xorigin) + + # now push the scheam + trigger_schema_push(topology, user.rdn, 2) + repl.wait_for_replication(supplier, consumer) + supplier_schema_csn = supplier.schema.get_schema_csn() + consumer_schema_csn = consumer.schema.get_schema_csn() + assert supplier_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + # This message may happen during the learning phase + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + pattern_errorlog(supplier.errorlog_file, regex) + + # Check that standard schema was not rewritten to be "user defined' on the consumer + cn_attrs = json.loads(consumer.schema.query_attributetype("cn", json=True)) + cn_attr = cn_attrs['at'] + assert cn_attr['x_origin'][0].lower() != "user defined" + if len(cn_attr['x_origin']) > 1: + assert cn_attr['x_origin'][1].lower() != "user defined" + + # Check that the new OC "supplierNewOCB" was written to be "user defined' on the consumer + ocs = json.loads(consumer.schema.query_attributetype("testAttributeSA", json=True)) + new_oc = ocs['at'] + assert new_oc['x_origin'][0].lower() == "user defined" diff --git a/dirsrvtests/tests/suites/schema/schema_replication_test.py b/dirsrvtests/tests/suites/schema/schema_replication_test.py index 0d083cb05..226957c2a 100644 --- a/dirsrvtests/tests/suites/schema/schema_replication_test.py +++ b/dirsrvtests/tests/suites/schema/schema_replication_test.py @@ -293,7 +293,7 @@ def test_schema_replication_two(topology_m1c1, schema_replication_init): else: assert supplier_schema_csn != consumer_schema_csn - # Check the error log of the supplier does not contain an error + # Check the error log of the supplier does contain an error # This message may happen during the learning phase regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) @@ -468,7 +468,7 @@ def test_schema_replication_five(topology_m1c1, schema_replication_init): else: assert supplier_schema_csn != consumer_schema_csn - # Check the error log of the supplier does not contain an error + # Check the error log of the supplier does contain an error # This message may happen during the learning phase regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) @@ -641,7 +641,7 @@ def test_schema_replication_eight(topology_m1c1, schema_replication_init): else: assert supplier_schema_csn != consumer_schema_csn - # Check the error log of the supplier does not contain an error + # Check the error log of the supplier does contain an error # This message may happen during the learning phase regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex)
0
846447c62e3ad2150b054c87c96e797dc1d803f3
389ds/389-ds-base
Ticket 47457 - default nsslapd-sasl-max-buffer-size should be 2MB Description: Increase the default max buffer size to 2mb from 64k. https://fedorahosted.org/389/ticket/47457 Reviewed by: richm(Thanks!)
commit 846447c62e3ad2150b054c87c96e797dc1d803f3 Author: Mark Reynolds <[email protected]> Date: Wed Nov 27 14:59:14 2013 -0500 Ticket 47457 - default nsslapd-sasl-max-buffer-size should be 2MB Description: Increase the default max buffer size to 2mb from 64k. https://fedorahosted.org/389/ticket/47457 Reviewed by: richm(Thanks!) diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c index 9d1cf778a..7f00134e5 100644 --- a/ldap/servers/slapd/libglobs.c +++ b/ldap/servers/slapd/libglobs.c @@ -177,8 +177,9 @@ static int config_set_schemareplace ( const char *attrname, char *value, #define DEFAULT_PW_RESETFAILURECOUNT "600" #define DEFAULT_PW_LOCKDURATION "3600" #define DEFAULT_NDN_SIZE "20971520" -#define DEFAULT_SASL_MAXBUFSIZE "65536" -#define SLAPD_DEFAULT_SASL_MAXBUFSIZE 65536 +#define DEFAULT_MAXBERSIZE 2097152 +#define DEFAULT_SASL_MAXBUFSIZE "2097152" +#define SLAPD_DEFAULT_SASL_MAXBUFSIZE 2097152 #ifdef MEMPOOL_EXPERIMENTAL #define DEFAULT_MEMPOOL_MAXFREELIST "1024" #endif @@ -5614,9 +5615,9 @@ config_get_maxbersize() maxbersize = slapdFrontendConfig->maxbersize; if(maxbersize==0) - maxbersize= 2 * 1024 * 1024; /* Default: 2Mb */ - return maxbersize; + maxbersize = DEFAULT_MAXBERSIZE; + return maxbersize; } int
0
c4befd634828338e98b1a0c93db12b1c9e70525c
389ds/389-ds-base
Issue 50800 - wildcards in rootdn-allow-ip attribute are not accepted Description: The asterick character was missing from the allowed character list. Also cleaned up the source in the C file. Thanks @yrro for contributing the original patch! relates: https://pagure.io/389-ds-base/issue/50800 Reviewed by: firstyear (Thanks!)
commit c4befd634828338e98b1a0c93db12b1c9e70525c Author: Mark Reynolds <[email protected]> Date: Thu Mar 19 21:24:05 2020 -0400 Issue 50800 - wildcards in rootdn-allow-ip attribute are not accepted Description: The asterick character was missing from the allowed character list. Also cleaned up the source in the C file. Thanks @yrro for contributing the original patch! relates: https://pagure.io/389-ds-base/issue/50800 Reviewed by: firstyear (Thanks!) diff --git a/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py b/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py index a3fc99999..a053c2961 100644 --- a/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py +++ b/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py @@ -17,7 +17,6 @@ from lib389.utils import * from lib389.tasks import * from lib389.tools import DirSrvTools from lib389.topologies import topology_st -from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD from lib389.idm.directorymanager import DirectoryManager from lib389.plugins import RootDNAccessControlPlugin @@ -269,7 +268,7 @@ def test_rootdn_access_denied_ip(topology_st, rootdn_setup, rootdn_cleanup, time # Change the denied IP so root DN succeeds plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-ip', '255.255.255.255')]) - + attr_updated = 0 for i in range(0, timeout): if ('255.255.255.255' in str(plugin.get_deny_ip())): @@ -377,7 +376,7 @@ def test_rootdn_access_allowed_ip(topology_st, rootdn_setup, rootdn_cleanup, tim raise Exception ("rootdn-allow-ip was not updated") # Bind as Root DN - should fail - uri = 'ldap://{}:{}'.format(localhost, topology_st.standalone.port) + uri = 'ldap://{}:{}'.format('127.0.0.1', topology_st.standalone.port) with pytest.raises(ldap.UNWILLING_TO_PERFORM): rootdn_bind(topology_st.standalone, uri=uri) @@ -588,6 +587,77 @@ def test_rootdn_config_validate(topology_st, rootdn_setup, rootdn_cleanup): plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-host', 'host.####.com')]) +def test_rootdn_access_denied_ip_wildcard(topology_st, rootdn_setup, rootdn_cleanup): + """Test denied IP feature with a wildcard + + :id: 73c74f62-9ac2-4bb6-8a63-bacc8d8bbf93 + :setup: Standalone instance, rootdn plugin set up + :steps: + 1. Set rootdn-deny-ip to '127.*' + 2. Bind as Root DN + 3. Change the denied IP so root DN succeeds + 4. Bind as Root DN + :expectedresults: + 1. Success + 2. Should fail + 3. Success + 4. Success + """ + + log.info('Running test_rootdn_access_denied_ip_wildcard...') + + plugin.add_deny_ip('127.*') + time.sleep(.5) + + # Bind as root DN - should fail + uri = 'ldap://{}:{}'.format('127.0.0.1', topology_st.standalone.port) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + rootdn_bind(topology_st.standalone, uri=uri) + + # Change the denied IP so root DN succeeds + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-ip', '255.255.255.255')]) + time.sleep(.5) + + # Bind should succeed + rootdn_bind(topology_st.standalone, uri=uri) + + +def test_rootdn_access_allowed_ip_wildcard(topology_st, rootdn_setup, rootdn_cleanup): + """Test allowed ip feature + + :id: c3e22c61-9ed2-4e89-8243-6ff686ecad9b + :setup: Standalone instance, rootdn plugin set up + :steps: + 1. Set allowed ip to 255.255.255.255 - blocks the Root DN + 2. Bind as Root DN + 3. Allow 127.* + 4. Bind as Root DN + :expectedresults: + 1. Success + 2. Should fail + 3. Success + 4. Success + """ + + log.info('Running test_rootdn_access_allowed_ip...') + + # Set allowed ip to 255.255.255.255 - blocks the Root DN + plugin.add_allow_ip('255.255.255.255') + time.sleep(.5) + + # Bind as Root DN - should fail + uri = 'ldap://{}:{}'.format("127.0.0.1", topology_st.standalone.port) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + rootdn_bind(topology_st.standalone, uri=uri) + + # Allow localhost + plugin.add_allow_ip('127.*') + time.sleep(.5) + + # Bind should succeed + rootdn_bind(topology_st.standalone, uri=uri) + + if __name__ == '__main__': # Run isolated # -s for DEBUG mode diff --git a/ldap/servers/plugins/rootdn_access/rootdn_access.c b/ldap/servers/plugins/rootdn_access/rootdn_access.c index 2f84aeae7..b256fa290 100644 --- a/ldap/servers/plugins/rootdn_access/rootdn_access.c +++ b/ldap/servers/plugins/rootdn_access/rootdn_access.c @@ -48,14 +48,14 @@ /* * Plugin Functions */ -int rootdn_init(Slapi_PBlock *pb); -static int rootdn_start(Slapi_PBlock *pb); -static int rootdn_close(Slapi_PBlock *pb); -static int rootdn_load_config(Slapi_PBlock *pb); -static int rootdn_check_access(Slapi_PBlock *pb); -static int rootdn_check_host_wildcard(char *host, char *client_host); +int32_t rootdn_init(Slapi_PBlock *pb); +static int32_t rootdn_start(Slapi_PBlock *pb); +static int32_t rootdn_close(Slapi_PBlock *pb); +static int32_t rootdn_load_config(Slapi_PBlock *pb); +static int32_t rootdn_check_access(Slapi_PBlock *pb); +static int32_t rootdn_check_host_wildcard(char *host, char *client_host); static int rootdn_check_ip_wildcard(char *ip, char *client_ip); -static int rootdn_preop_bind_init(Slapi_PBlock *pb); +static int32_t rootdn_preop_bind_init(Slapi_PBlock *pb); char *strToLower(char *str); /* @@ -104,10 +104,10 @@ rootdn_get_plugin_dn(void) } -int +int32_t rootdn_init(Slapi_PBlock *pb) { - int status = 0; + int32_t status = 0; char *plugin_identity = NULL; slapi_log_err(SLAPI_LOG_TRACE, ROOTDN_PLUGIN_SUBSYSTEM, @@ -157,7 +157,7 @@ rootdn_init(Slapi_PBlock *pb) return status; } -static int +static int32_t rootdn_preop_bind_init(Slapi_PBlock *pb) { if (slapi_pblock_set(pb, SLAPI_PLUGIN_INTERNAL_PRE_BIND_FN, (void *)rootdn_check_access) != 0) { @@ -169,7 +169,7 @@ rootdn_preop_bind_init(Slapi_PBlock *pb) return 0; } -static int +static int32_t rootdn_start(Slapi_PBlock *pb __attribute__((unused))) { slapi_log_err(SLAPI_LOG_PLUGIN, ROOTDN_PLUGIN_SUBSYSTEM, "--> rootdn_start\n"); @@ -196,14 +196,14 @@ rootdn_free(void) ips_to_deny = NULL; } -static int +static int32_t rootdn_close(Slapi_PBlock *pb __attribute__((unused))) { rootdn_free(); return 0; } -static int +static int32_t rootdn_load_config(Slapi_PBlock *pb) { Slapi_Entry *e = NULL; @@ -217,9 +217,9 @@ rootdn_load_config(Slapi_PBlock *pb) char *token, *iter = NULL, *copy; char hour[3], min[3]; size_t end; - int result = 0; - int time; - int i; + int32_t result = 0; + int32_t time; + slapi_log_err(SLAPI_LOG_PLUGIN, ROOTDN_PLUGIN_SUBSYSTEM, "--> rootdn_load_config\n"); @@ -344,7 +344,7 @@ rootdn_load_config(Slapi_PBlock *pb) goto free_and_return; } if (hosts_tmp) { - for (i = 0; hosts_tmp[i] != NULL; i++) { + for (size_t i = 0; hosts_tmp[i] != NULL; i++) { end = strspn(hosts_tmp[i], "0123456789.*-ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"); if (!end || hosts_tmp[i][end] != '\0') { slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, "rootdn_load_config - " @@ -357,7 +357,7 @@ rootdn_load_config(Slapi_PBlock *pb) } } if (hosts_to_deny_tmp) { - for (i = 0; hosts_to_deny_tmp[i] != NULL; i++) { + for (size_t i = 0; hosts_to_deny_tmp[i] != NULL; i++) { end = strspn(hosts_to_deny_tmp[i], "0123456789.*-ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"); if (!end || hosts_to_deny_tmp[i][end] != '\0') { slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, "rootdn_load_config - " @@ -370,8 +370,8 @@ rootdn_load_config(Slapi_PBlock *pb) } } if (ips_tmp) { - for (i = 0; ips_tmp[i] != NULL; i++) { - end = strspn(ips_tmp[i], "0123456789:ABCDEFabcdef."); + for (size_t i = 0; ips_tmp[i] != NULL; i++) { + end = strspn(ips_tmp[i], "0123456789:ABCDEFabcdef.*"); if (!end || ips_tmp[i][end] != '\0') { slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, "rootdn_load_config - " "IP address contains invalid characters (%s), skipping\n", @@ -397,7 +397,7 @@ rootdn_load_config(Slapi_PBlock *pb) } } if (ips_to_deny_tmp) { - for (i = 0; ips_to_deny_tmp[i] != NULL; i++) { + for (size_t i = 0; ips_to_deny_tmp[i] != NULL; i++) { end = strspn(ips_to_deny_tmp[i], "0123456789:ABCDEFabcdef.*"); if (!end || ips_to_deny_tmp[i][end] != '\0') { slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, "rootdn_load_config - " @@ -449,7 +449,7 @@ free_and_return: } -static int +static int32_t rootdn_check_access(Slapi_PBlock *pb) { PRNetAddr *client_addr = NULL; @@ -457,9 +457,8 @@ rootdn_check_access(Slapi_PBlock *pb) time_t curr_time; struct tm *timeinfo = NULL; char *dnsName = NULL; - int isRoot = 0; - int rc = SLAPI_PLUGIN_SUCCESS; - int i; + int32_t isRoot = 0; + int32_t rc = SLAPI_PLUGIN_SUCCESS; /* * Verify this is a root DN @@ -489,8 +488,8 @@ rootdn_check_access(Slapi_PBlock *pb) curr_total = (time_t)(timeinfo->tm_hour * 3600) + (timeinfo->tm_min * 60); if ((curr_total < open_time) || (curr_total >= close_time)) { - slapi_log_err(SLAPI_LOG_PLUGIN, ROOTDN_PLUGIN_SUBSYSTEM, "rootdn_check_access - Bind not in the " - "allowed time window\n"); + slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, + "rootdn_check_access - Bind not in the allowed time window\n"); return -1; } } @@ -508,8 +507,8 @@ rootdn_check_access(Slapi_PBlock *pb) daysAllowed = strToLower(daysAllowed); if (!strstr(daysAllowed, today)) { - slapi_log_err(SLAPI_LOG_PLUGIN, ROOTDN_PLUGIN_SUBSYSTEM, "rootdn_check_access - " - "Bind not allowed for today(%s), only allowed on days: %s\n", + slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, "rootdn_check_access - " + "Bind not allowed for today(%s), only allowed on days: %s\n", today, daysAllowed); return -1; } @@ -518,7 +517,7 @@ rootdn_check_access(Slapi_PBlock *pb) * Check the host restrictions, deny always overrides allow */ if (hosts || hosts_to_deny) { - char buf[PR_NETDB_BUF_SIZE]; + char buf[PR_NETDB_BUF_SIZE] = {0}; char *host; /* @@ -526,8 +525,8 @@ rootdn_check_access(Slapi_PBlock *pb) */ client_addr = (PRNetAddr *)slapi_ch_malloc(sizeof(PRNetAddr)); if (slapi_pblock_get(pb, SLAPI_CONN_CLIENTNETADDR, client_addr) != 0) { - slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, "rootdn_check_access - " - "Could not get client address for hosts.\n"); + slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, + "rootdn_check_access - Could not get client address for hosts.\n"); rc = -1; goto free_and_return; } @@ -541,14 +540,14 @@ rootdn_check_access(Slapi_PBlock *pb) dnsName = slapi_ch_strdup(host_entry->h_name); } else { /* no hostname */ - slapi_log_err(SLAPI_LOG_PLUGIN, ROOTDN_PLUGIN_SUBSYSTEM, "rootdn_check_access - " - "Client address missing hostname\n"); + slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, + "rootdn_check_access - Client address missing hostname\n"); rc = -1; goto free_and_return; } } else { - slapi_log_err(SLAPI_LOG_PLUGIN, ROOTDN_PLUGIN_SUBSYSTEM, "rootdn_check_access - " - "client IP address could not be resolved\n"); + slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, + "rootdn_check_access - client IP address could not be resolved\n"); rc = -1; goto free_and_return; } @@ -556,18 +555,22 @@ rootdn_check_access(Slapi_PBlock *pb) * Now we have our hostname, now do our checks */ if (hosts_to_deny) { - for (i = 0; hosts_to_deny[i] != NULL; i++) { + for (size_t i = 0; hosts_to_deny[i] != NULL; i++) { host = hosts_to_deny[i]; /* check for wild cards */ if (host[0] == '*') { if (rootdn_check_host_wildcard(host, dnsName) == 0) { /* match, return failure */ + slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, "rootdn_check_access - " + "hostname (%s) matched denied host (%s)\n", dnsName, host); rc = -1; goto free_and_return; } } else { if (strcasecmp(host, dnsName) == 0) { /* we have a match, return failure */ + slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, "rootdn_check_access - " + "hostname (%s) matched denied host (%s)\n", dnsName, host); rc = -1; goto free_and_return; } @@ -576,7 +579,7 @@ rootdn_check_access(Slapi_PBlock *pb) rc = 0; } if (hosts) { - for (i = 0; hosts[i] != NULL; i++) { + for (size_t i = 0; hosts[i] != NULL; i++) { host = hosts[i]; /* check for wild cards */ if (host[0] == '*') { @@ -600,14 +603,15 @@ rootdn_check_access(Slapi_PBlock *pb) * Check the IP address restrictions, deny always overrides allow */ if (ips || ips_to_deny) { - char ip_str[256]; + char ip_str[256] = {0}; char *ip; - int ip_len, i; + int32_t ip_len; if (client_addr == NULL) { client_addr = (PRNetAddr *)slapi_ch_malloc(sizeof(PRNetAddr)); if (slapi_pblock_get(pb, SLAPI_CONN_CLIENTNETADDR, client_addr) != 0) { - slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, "rootdn_check_access - Could not get client address for IP.\n"); + slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, + "rootdn_check_access - Could not get client address for IP.\n"); rc = -1; goto free_and_return; } @@ -620,13 +624,15 @@ rootdn_check_access(Slapi_PBlock *pb) v4addr.inet.family = PR_AF_INET; v4addr.inet.ip = client_addr->ipv6.ip.pr_s6_addr32[3]; if (PR_NetAddrToString(&v4addr, ip_str, sizeof(ip_str)) != PR_SUCCESS) { - slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, "rootdn_check_access - Could not get IPv4 from client address.\n"); + slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, + "rootdn_check_access - Could not get IPv4 from client address.\n"); rc = -1; goto free_and_return; } } else { if (PR_NetAddrToString(client_addr, ip_str, sizeof(ip_str)) != PR_SUCCESS) { - slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, "rootdn_check_access - Could not get IPv6 from client address.\n"); + slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, + "rootdn_check_access - Could not get IPv6 from client address.\n"); rc = -1; goto free_and_return; } @@ -635,18 +641,22 @@ rootdn_check_access(Slapi_PBlock *pb) * Now we have our IP address, do our checks */ if (ips_to_deny) { - for (i = 0; ips_to_deny[i] != NULL; i++) { + for (size_t i = 0; ips_to_deny[i] != NULL; i++) { ip = ips_to_deny[i]; ip_len = strlen(ip); if (ip[ip_len - 1] == '*') { if (rootdn_check_ip_wildcard(ips_to_deny[i], ip_str) == 0) { /* match, return failure */ + slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, "rootdn_check_access - " + "ip address (%s) matched denied IP address (%s)\n", ip_str, ip); rc = -1; goto free_and_return; } } else { if (strcasecmp(ip_str, ip) == 0) { /* match, return failure */ + slapi_log_err(SLAPI_LOG_ERR, ROOTDN_PLUGIN_SUBSYSTEM, "rootdn_check_access - " + "ip address (%s) matched denied IP address (%s)\n", ip_str, ip); rc = -1; goto free_and_return; } @@ -655,7 +665,7 @@ rootdn_check_access(Slapi_PBlock *pb) rc = 0; } if (ips) { - for (i = 0; ips[i] != NULL; i++) { + for (size_t i = 0; ips[i] != NULL; i++) { ip = ips[i]; ip_len = strlen(ip); if (ip[ip_len - 1] == '*') { @@ -664,6 +674,7 @@ rootdn_check_access(Slapi_PBlock *pb) rc = 0; goto free_and_return; } + } else { if (strcasecmp(ip_str, ip) == 0) { /* match, return success */ @@ -684,17 +695,19 @@ free_and_return: return rc; } -static int +static int32_t rootdn_check_host_wildcard(char *host, char *client_host) { - int host_len = strlen(host); - int client_len = strlen(client_host); - int i, j; + size_t host_len = strlen(host); + size_t client_len = strlen(client_host); + size_t i, j; + /* * Start at the end of the string and move backwards, and skip the first char "*" */ if (client_len < host_len) { /* this can't be a match */ + return -1; } for (i = host_len - 1, j = client_len - 1; i > 0; i--, j--) { @@ -710,7 +723,7 @@ static int rootdn_check_ip_wildcard(char *ip, char *client_ip) { size_t ip_len = strlen(ip); - int i; + /* * Start at the beginning of the string and move forward, and skip the last char "*" */ @@ -718,7 +731,7 @@ rootdn_check_ip_wildcard(char *ip, char *client_ip) /* this can't be a match */ return -1; } - for (i = 0; i < ip_len - 1; i++) { + for (size_t i = 0; i < ip_len - 1; i++) { if (ip[i] != client_ip[i]) { return -1; } @@ -730,9 +743,7 @@ rootdn_check_ip_wildcard(char *ip, char *client_ip) char * strToLower(char *str) { - size_t i; - - for (i = 0; str && i < strlen(str); i++) { + for (size_t i = 0; str && i < strlen(str); i++) { str[i] = tolower(str[i]); } return str;
0
92cc2b1e66900f0b356a7056f11ec21502a2e31f
389ds/389-ds-base
Issue 5867 - lib389 should use filter for tarfile as recommended by PEP 706 (#5868) Problem: tarfile interface evolved after CVE-2007-4559 and using object generated by tarfile.open without setting explicitly a filter has been obsoleted. Solution: Add an extraction_filter after every tarfile.open call **Issue:** [5867](https://github.com/389ds/389-ds-base/issues/5867) **Reviewed by:** @droideck Thanks !
commit 92cc2b1e66900f0b356a7056f11ec21502a2e31f Author: progier389 <[email protected]> Date: Tue Aug 1 15:39:31 2023 +0200 Issue 5867 - lib389 should use filter for tarfile as recommended by PEP 706 (#5868) Problem: tarfile interface evolved after CVE-2007-4559 and using object generated by tarfile.open without setting explicitly a filter has been obsoleted. Solution: Add an extraction_filter after every tarfile.open call **Issue:** [5867](https://github.com/389ds/389-ds-base/issues/5867) **Reviewed by:** @droideck Thanks ! diff --git a/dirsrvtests/tests/tickets/ticket47988_test.py b/dirsrvtests/tests/tickets/ticket47988_test.py index 8ccaf607e..28ecf7ed8 100644 --- a/dirsrvtests/tests/tickets/ticket47988_test.py +++ b/dirsrvtests/tests/tickets/ticket47988_test.py @@ -82,6 +82,7 @@ def _install_schema(server, tarFile): os.chdir(tmpSchema) tar = tarfile.open(tarFile, 'r:gz') + tar.extraction_filter = (lambda member, path: member) for member in tar.getmembers(): tar.extract(member.name) diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py index ce9f05507..2fbe90e79 100644 --- a/src/lib389/lib389/__init__.py +++ b/src/lib389/lib389/__init__.py @@ -1378,6 +1378,7 @@ class DirSrv(SimpleLDAPObject, object): name = "backup_%s_%s.tar.gz" % (self.serverid, time.strftime("%m%d%Y_%H%M%S")) backup_file = os.path.join(backup_dir, name) tar = tarfile.open(backup_file, "w:gz") + tar.extraction_filter = (lambda member, path: member) for name in listFilesToBackup: tar.add(name) @@ -1451,6 +1452,7 @@ class DirSrv(SimpleLDAPObject, object): os.chdir(prefix_pattern) tar = tarfile.open(backup_file) + tar.extraction_filter = (lambda member, path: member) for member in tar.getmembers(): if os.path.isfile(member.name): # diff --git a/src/lib389/lib389/tools.py b/src/lib389/lib389/tools.py index c5bab5815..7e47d373d 100644 --- a/src/lib389/lib389/tools.py +++ b/src/lib389/lib389/tools.py @@ -301,6 +301,7 @@ class DirSrvTools(object): name = "backup_%s.tar.gz" % (time.strftime("%m%d%Y_%H%M%S")) backup_file = os.path.join(backup_dir, name) tar = tarfile.open(backup_file, "w:gz") + tar.extraction_filter = (lambda member, path: member) for name in listFilesToBackup: if os.path.isfile(name): @@ -359,6 +360,7 @@ class DirSrvTools(object): os.chdir(dirsrv.prefix) tar = tarfile.open(backup_file) + tar.extraction_filter = (lambda member, path: member) for member in tar.getmembers(): if os.path.isfile(member.name): #
0
bed6f0d549d7817f5ab58a1b6a1b60f29c84d752
389ds/389-ds-base
Ticket #48191 - CI test: added test cases for ticket 48191 Description: Adding nsslapd-maxsimplepaged-per-conn
commit bed6f0d549d7817f5ab58a1b6a1b60f29c84d752 Author: Noriko Hosoi <[email protected]> Date: Tue Jun 9 15:40:55 2015 -0700 Ticket #48191 - CI test: added test cases for ticket 48191 Description: Adding nsslapd-maxsimplepaged-per-conn diff --git a/dirsrvtests/tickets/ticket48191_test.py b/dirsrvtests/tickets/ticket48191_test.py new file mode 100644 index 000000000..4011cb20b --- /dev/null +++ b/dirsrvtests/tickets/ticket48191_test.py @@ -0,0 +1,312 @@ +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from ldap.controls import SimplePagedResultsControl +from ldap.controls.simple import GetEffectiveRightsControl + +log = logging.getLogger(__name__) + +installation_prefix = None + +CONFIG_DN = 'cn=config' +MYSUFFIX = 'o=ticket48191.org' +MYSUFFIXBE = 'ticket48191' + +_MYLDIF = 'ticket48191.ldif' + +SEARCHFILTER = '(objectclass=*)' + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + [email protected](scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket48191_setup(topology): + """ + Import 20 entries + Set nsslapd-maxsimplepaged-per-conn in cn=config + If the val is negative, no limit. + If the value is 0, the simple paged results is disabled. + If the value is positive, the value is the max simple paged results requests per connection. + The setting has to be dynamic. + """ + log.info('Testing Ticket 48191 - Config parameter nsslapd-maxsimplepaged-per-conn') + + # bind as directory manager + topology.standalone.log.info("Bind as %s" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + topology.standalone.log.info("\n\n######################### SETUP SUFFIX o=ticket48191.org ######################\n") + + topology.standalone.backend.create(MYSUFFIX, {BACKEND_NAME: MYSUFFIXBE}) + topology.standalone.mappingtree.create(MYSUFFIX, bename=MYSUFFIXBE) + + topology.standalone.log.info("\n\n######################### Generate Test data ######################\n") + + # get tmp dir + mytmp = topology.standalone.getDir(__file__, TMP_DIR) + if mytmp is None: + mytmp = "/tmp" + + MYLDIF = '%s%s' % (mytmp, _MYLDIF) + os.system('ls %s' % MYLDIF) + os.system('rm -f %s' % MYLDIF) + if hasattr(topology.standalone, 'prefix'): + prefix = topology.standalone.prefix + else: + prefix = None + dbgen_prog = prefix + '/bin/dbgen.pl' + topology.standalone.log.info('dbgen_prog: %s' % dbgen_prog) + os.system('%s -s %s -o %s -n 14' % (dbgen_prog, MYSUFFIX, MYLDIF)) + cmdline = 'egrep dn: %s | wc -l' % MYLDIF + p = os.popen(cmdline, "r") + dnnumstr = p.readline() + global dnnum + dnnum = int(dnnumstr) + topology.standalone.log.info("We have %d entries.\n", dnnum) + + topology.standalone.log.info("\n\n######################### Import Test data ######################\n") + + args = {TASK_WAIT: True} + importTask = Tasks(topology.standalone) + importTask.importLDIF(MYSUFFIX, MYSUFFIXBE, MYLDIF, args) + + topology.standalone.log.info("\n\n######################### SEARCH ALL ######################\n") + topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + global entries + entries = topology.standalone.search_s(MYSUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER) + topology.standalone.log.info("Returned %d entries.\n", len(entries)) + + #print entries + + assert dnnum == len(entries) + + topology.standalone.log.info('%d entries are successfully imported.' % dnnum) + +def test_ticket48191_run_0(topology): + topology.standalone.log.info("\n\n######################### SEARCH WITH SIMPLE PAGED RESULTS CONTROL (no nsslapd-maxsimplepaged-per-conn) ######################\n") + + page_size = 4 + spr_req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + + known_ldap_resp_ctrls = { + SimplePagedResultsControl.controlType: SimplePagedResultsControl, + } + + topology.standalone.log.info("Calling search_ext...") + msgid = topology.standalone.search_ext(MYSUFFIX, + ldap.SCOPE_SUBTREE, + SEARCHFILTER, + ['cn'], + serverctrls=[spr_req_ctrl]) + pageddncnt = 0 + pages = 0 + while True: + pages += 1 + + topology.standalone.log.info("Getting page %d" % pages) + rtype, rdata, rmsgid, responcectrls = topology.standalone.result3(msgid, resp_ctrl_classes=known_ldap_resp_ctrls) + topology.standalone.log.info("%d results" % len(rdata)) + pageddncnt += len(rdata) + + topology.standalone.log.info("Results:") + for dn, attrs in rdata: + topology.standalone.log.info("dn: %s" % dn) + + pctrls = [ + c for c in responcectrls if c.controlType == SimplePagedResultsControl.controlType + ] + if not pctrls: + topology.standalone.log.info('Warning: Server ignores RFC 2696 control.') + break + + if pctrls[0].cookie: + spr_req_ctrl.cookie = pctrls[0].cookie + topology.standalone.log.info("cookie: %s" % spr_req_ctrl.cookie) + msgid = topology.standalone.search_ext(MYSUFFIX, + ldap.SCOPE_SUBTREE, + SEARCHFILTER, + ['cn'], + serverctrls=[spr_req_ctrl]) + else: + topology.standalone.log.info("No cookie") + break + + topology.standalone.log.info("Paged result search returned %d entries in %d pages.\n", pageddncnt, pages) + + global dnnum + global entries + assert dnnum == len(entries) + assert pages == (dnnum / page_size) + +def test_ticket48191_run_1(topology): + topology.standalone.log.info("\n\n######################### SEARCH WITH SIMPLE PAGED RESULTS CONTROL (nsslapd-maxsimplepaged-per-conn: 0) ######################\n") + + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-maxsimplepaged-per-conn', '0')]) + + page_size = 4 + spr_req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + + known_ldap_resp_ctrls = { + SimplePagedResultsControl.controlType: SimplePagedResultsControl, + } + + topology.standalone.log.info("Calling search_ext...") + msgid = topology.standalone.search_ext(MYSUFFIX, + ldap.SCOPE_SUBTREE, + SEARCHFILTER, + ['cn'], + serverctrls=[spr_req_ctrl]) + + topology.standalone.log.fatal('Unexpected success') + try: + rtype, rdata, rmsgid, responcectrls = topology.standalone.result3(msgid, resp_ctrl_classes=known_ldap_resp_ctrls) + except ldap.UNWILLING_TO_PERFORM, e: + topology.standalone.log.info('Returned the expected RC UNWILLING_TO_PERFORM') + return + except ldap.LDAPError, e: + topology.standalone.log.fatal('Unexpected error: ' + e.message['desc']) + assert False + topology.standalone.log.info("Type %d" % rtype) + topology.standalone.log.info("%d results" % len(rdata)) + assert False + +def test_ticket48191_run_2(topology): + topology.standalone.log.info("\n\n######################### SEARCH WITH SIMPLE PAGED RESULTS CONTROL (nsslapd-maxsimplepaged-per-conn: 1000) ######################\n") + + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-maxsimplepaged-per-conn', '1000')]) + + page_size = 4 + spr_req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + + known_ldap_resp_ctrls = { + SimplePagedResultsControl.controlType: SimplePagedResultsControl, + } + + topology.standalone.log.info("Calling search_ext...") + msgid = topology.standalone.search_ext(MYSUFFIX, + ldap.SCOPE_SUBTREE, + SEARCHFILTER, + ['cn'], + serverctrls=[spr_req_ctrl]) + pageddncnt = 0 + pages = 0 + while True: + pages += 1 + + topology.standalone.log.info("Getting page %d" % pages) + rtype, rdata, rmsgid, responcectrls = topology.standalone.result3(msgid, resp_ctrl_classes=known_ldap_resp_ctrls) + topology.standalone.log.info("%d results" % len(rdata)) + pageddncnt += len(rdata) + + topology.standalone.log.info("Results:") + for dn, attrs in rdata: + topology.standalone.log.info("dn: %s" % dn) + + pctrls = [ + c for c in responcectrls if c.controlType == SimplePagedResultsControl.controlType + ] + if not pctrls: + topology.standalone.log.info('Warning: Server ignores RFC 2696 control.') + break + + if pctrls[0].cookie: + spr_req_ctrl.cookie = pctrls[0].cookie + topology.standalone.log.info("cookie: %s" % spr_req_ctrl.cookie) + msgid = topology.standalone.search_ext(MYSUFFIX, + ldap.SCOPE_SUBTREE, + SEARCHFILTER, + ['cn'], + serverctrls=[spr_req_ctrl]) + else: + topology.standalone.log.info("No cookie") + break + + topology.standalone.log.info("Paged result search returned %d entries in %d pages.\n", pageddncnt, pages) + + global dnnum + global entries + assert dnnum == len(entries) + assert pages == (dnnum / page_size) + + topology.standalone.log.info("ticket48191 was successfully verified.") + + +def test_ticket48191_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket48191_setup(topo) + test_ticket48191_run_0(topo) + test_ticket48191_run_1(topo) + test_ticket48191_run_2(topo) + test_ticket48191_final(topo) + + +if __name__ == '__main__': + run_isolated() +
0
6ee9a1bd3aa5014aff3b8b07a032c35a1c66d2e2
389ds/389-ds-base
Ticket 47958 - Memory leak in password admin if the admin entry does not exist Bug Description: If passwordAdminDN is set to an entry that does not exist memory is leaked. Fix Description: The leak occurs because we do not free the internal search results, even when zero entries are returned. https://fedorahosted.org/389/ticket/47958 Reviewed by: rmeggins(Thanks!)
commit 6ee9a1bd3aa5014aff3b8b07a032c35a1c66d2e2 Author: Mark Reynolds <[email protected]> Date: Mon Nov 17 09:46:33 2014 -0500 Ticket 47958 - Memory leak in password admin if the admin entry does not exist Bug Description: If passwordAdminDN is set to an entry that does not exist memory is leaked. Fix Description: The leak occurs because we do not free the internal search results, even when zero entries are returned. https://fedorahosted.org/389/ticket/47958 Reviewed by: rmeggins(Thanks!) diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c index 9c541c512..7f806126a 100644 --- a/ldap/servers/slapd/pw.c +++ b/ldap/servers/slapd/pw.c @@ -1558,14 +1558,17 @@ pw_get_admin_users(passwdPolicy *pwp) * Check if the DN exists and has "group" objectclasses */ pb = slapi_pblock_new(); - slapi_search_internal_set_pb(pb, binddn, LDAP_SCOPE_BASE,"(|(objectclass=groupofuniquenames)(objectclass=groupofnames))", - NULL, 0, NULL, NULL, (void *) plugin_get_default_component_id(), 0); + slapi_search_internal_set_pb(pb, binddn, LDAP_SCOPE_BASE, + "(|(objectclass=groupofuniquenames)(objectclass=groupofnames))", + NULL, 0, NULL, NULL, (void *) plugin_get_default_component_id(), 0); slapi_search_internal_pb(pb); slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &res); if (res != LDAP_SUCCESS) { + slapi_free_search_results_internal(pb); slapi_pblock_destroy(pb); - LDAPDebug(LDAP_DEBUG_ANY, "pw_get_admin_users: search failed for %s: error %d - Password Policy Administrators can not be set\n", - slapi_sdn_get_dn(sdn), res, 0); + LDAPDebug(LDAP_DEBUG_ANY, "pw_get_admin_users: search failed for %s: error %d - " + "Password Policy Administrators can not be set\n", + slapi_sdn_get_dn(sdn), res, 0); return; } /* @@ -1581,7 +1584,8 @@ pw_get_admin_users(passwdPolicy *pwp) slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries); uniquemember_vals = slapi_entry_attr_get_charray_ext(entries[0], "uniquemember", &uniquemember_count); member_vals = slapi_entry_attr_get_charray_ext(entries[0], "member", &member_count); - pwp->pw_admin_user = (Slapi_DN **)slapi_ch_calloc((uniquemember_count + member_count + 1), sizeof(Slapi_DN *)); + pwp->pw_admin_user = (Slapi_DN **)slapi_ch_calloc((uniquemember_count + member_count + 1), + sizeof(Slapi_DN *)); if(uniquemember_count > 0){ for(i = 0; i < uniquemember_count; i++){ pwp->pw_admin_user[count++] = slapi_sdn_new_dn_passin(uniquemember_vals[i]);
0
ad1b78ea157fba69b48471558cedde8bf899cee6
389ds/389-ds-base
Ticket 50056 - Fix UI bugs (part 2) Description: Fix issues with deleting SASL mappings, loading the replicated suffix dropdown select lists, improve root dn password change validation, improved docs in dscreate template for suffixes, hide password values in console logging, remove ssca directory whenremoivnign the last instance, and issues with audit fail logging in UI. https://pagure.io/389-ds-base/issue/50056 Reviewed by: vashirov(Thanks!)
commit ad1b78ea157fba69b48471558cedde8bf899cee6 Author: Mark Reynolds <[email protected]> Date: Wed Dec 19 23:10:48 2018 -0500 Ticket 50056 - Fix UI bugs (part 2) Description: Fix issues with deleting SASL mappings, loading the replicated suffix dropdown select lists, improve root dn password change validation, improved docs in dscreate template for suffixes, hide password values in console logging, remove ssca directory whenremoivnign the last instance, and issues with audit fail logging in UI. https://pagure.io/389-ds-base/issue/50056 Reviewed by: vashirov(Thanks!) diff --git a/src/cockpit/389-console/src/ds.js b/src/cockpit/389-console/src/ds.js index b5f091ad3..c735a86d1 100644 --- a/src/cockpit/389-console/src/ds.js +++ b/src/cockpit/389-console/src/ds.js @@ -29,13 +29,6 @@ var DSCTL = "dsctl"; var DSCREATE = "dscreate"; var ENV = ""; -/* -// Used for local development testing -var DSCONF = '/home/mareynol/source/ds389/389-ds-base/src/lib389/cli/dsconf'; -var DSCTL = '/home/mareynol/source/ds389/389-ds-base/src/lib389/cli/dsctl'; -var DSCREATE = '/home/mareynol/source/ds389/389-ds-base/src/lib389/cli/dscreate'; -var ENV = 'PYTHONPATH=/home/mareynol/source/ds389/389-ds-base/src/lib389'; -*/ /* * Console logging function for CLI commands @@ -46,12 +39,12 @@ var ENV = 'PYTHONPATH=/home/mareynol/source/ds389/389-ds-base/src/lib389'; */ function log_cmd(js_func, desc, cmd_array) { if (window.console) { - var pw_args = ['--passwd', '--bind-pw']; + var pw_args = ['--passwd', '--bind-pw', '--bind-passwd', '--nsslapd-rootpw']; var cmd_list = []; var converted_pw = false; for (var idx in cmd_array) { - var cmd = cmd_array[idx]; + var cmd = cmd_array[idx].toString(); converted_pw = false; for (var arg_idx in pw_args) { if ( cmd.startsWith(pw_args[arg_idx]) ) { @@ -316,11 +309,48 @@ function save_all () { // save_security(); } +function load_repl_suffix_dropdowns() { + // Update replication drop downs (agmts mainly) + var repl_dropdowns = ['select-repl-agmt-suffix', 'select-repl-winsync-suffix', + 'cleanallruv-suffix', 'monitor-repl-backend-list']; + var repl_cmd = [DSCONF, '-j', 'ldapi://%2fvar%2frun%2f' + server_id + '.socket','replication', 'list']; + log_cmd('load_repl_suffix_dropdowns', 'get replicated suffix list', repl_cmd); + cockpit.spawn(repl_cmd, { superuser: true, "err": "message", "environ": [ENV]}).done(function(data) { + // Update dropdowns + for (var idx in repl_dropdowns) { + $("#" + repl_dropdowns[idx]).find('option').remove(); + } + var obj = JSON.parse(data); + for (var idx in obj['items']) { + for (var list in repl_dropdowns){ + $("#" + repl_dropdowns[list]).append('<option value="' + obj['items'][idx] + '" selected="selected">' + obj['items'][idx] +'</option>'); + } + } + if (obj['items'].length == 0){ + // Disable create agmt buttons + $("#create-agmt").prop("disabled", true); + $("#winsync-create-agmt").prop("disabled", true); + $("#create-cleanallruv-btn").prop("disabled", true); + } else { + // Enable repl agmt buttons + $("#create-agmt").prop("disabled", false); + $("#winsync-create-agmt").prop("disabled", false); + $("#create-cleanallruv-btn").prop("disabled", false); + } + }); +} + +var loading_cfg = 0; + function load_config (){ + // If we are currently loading config don't do it twice + if (loading_cfg == 1){ + return; + } + loading_cfg = 1; + // Load the configuration for all the pages. - var dropdowns = ['local-pwp-suffix', 'select_repl_suffix', 'select-repl-cfg-suffix', - 'select-repl-agmt-suffix', 'select-repl-winsync-suffix', - 'cleanallruv-suffix', 'monitor-repl-backend-list']; + var dropdowns = ['local-pwp-suffix', 'select-repl-cfg-suffix']; // Show the spinner, and reset the pages $("#loading-msg").html("Loading Directory Server configuration for <i><b>" + server_id + "</b></i>..."); @@ -338,7 +368,7 @@ function load_config (){ cockpit.spawn(cmd, { superuser: true, "err": "message", "environ": [ENV]}).done(function(data) { // Update dropdowns for (var idx in dropdowns) { - $("#" + dropdowns[idx]).empty(); + $("#" + dropdowns[idx]).find('option').remove(); } var obj = JSON.parse(data); for (var idx in obj['items']) { @@ -378,6 +408,7 @@ function load_config (){ $("#server-content").show(); $("#server-config").show(); clearInterval(loading_config); + loading_cfg = 0; console.log("Completed configuration initialization."); } }, 300); @@ -386,6 +417,7 @@ function load_config (){ popup_err("Failed To Contact Server",data.message); $("#everything").show(); check_inst_alive(1); + loading_cfg = 0; }); } diff --git a/src/cockpit/389-console/src/replication.html b/src/cockpit/389-console/src/replication.html index 9307fff11..06a26e8e0 100644 --- a/src/cockpit/389-console/src/replication.html +++ b/src/cockpit/389-console/src/replication.html @@ -5,8 +5,6 @@ <label class="ds-config-label-med" for="select-repl-role">Database Suffix</Label><select class="btn btn-default dropdown" id="select-repl-cfg-suffix"> - <option>dc=example,dc=com</option> - <option>o=ipaca</option> </select> <div id="ds-repl-enabled" hidden> @@ -151,8 +149,6 @@ Replication Agreements <div id="repl-agmts" class="all-pages" hidden> <h3 class="ds-config-header">Replication Agreements for <select class="btn btn-default dropdown" id="select-repl-agmt-suffix"> - <option>dc=example,dc=com</option> - <option>o=ipaca</option> </select></h3> <div class="ds-page-content"> diff --git a/src/cockpit/389-console/src/replication.js b/src/cockpit/389-console/src/replication.js index 62215aca1..3aa313636 100644 --- a/src/cockpit/389-console/src/replication.js +++ b/src/cockpit/389-console/src/replication.js @@ -456,6 +456,7 @@ function get_and_set_repl_config () { // Show the page (in case it was hidden) $("#ds-repl-enabled").hide(); $("#repl-config-content").show(); + load_repl_suffix_dropdowns(); console.log("Finished loading replication configuration."); }).fail(function(data) { @@ -463,6 +464,7 @@ function get_and_set_repl_config () { current_role = "Disabled"; $("#repl-config-content").hide(); $("#ds-repl-enabled").show(); + load_repl_suffix_dropdowns(); }); } else { // No Suffix diff --git a/src/cockpit/389-console/src/servers.html b/src/cockpit/389-console/src/servers.html index 5dcb4f2cc..ef647fecc 100644 --- a/src/cockpit/389-console/src/servers.html +++ b/src/cockpit/389-console/src/servers.html @@ -154,7 +154,6 @@ <option>SSHA384</option> <option>SSHA256</option> <option>SSHA</option> - <option>NS-MTA-MD5</option> <option>MD5</option> <option>SMD5</option> <option>CRYPT-MD5</option> @@ -493,7 +492,7 @@ <!-- Access logging --> <div id="server-access-log" class="all-pages" hidden> <h3 class="ds-config-header">Access Log Settings</h3> - <input type="checkbox" class="ds-config-checkbox" id="nsslapd-accesslog-logging-enabled" checked><label + <input type="checkbox" class="ds-config-checkbox" id="nsslapd-accesslog-logging-enabled"><label for="nsslapd-accesslog-logging-enabled" class="ds-label" title="Enable access logging (nsslapd-accesslog-logging-enabled)."> Enable Access Logging</label> <div class="ds-expired-div" id="accesslog-attrs"> <div class="ds-inline"> @@ -586,7 +585,7 @@ <div id="server-audit-log" class="all-pages" hidden> <h3 class="ds-config-header">Audit Log Settings</h3> - <input type="checkbox" class="ds-config-checkbox" id="nsslapd-auditlog-logging-enabled" checked><label + <input type="checkbox" class="ds-config-checkbox" id="nsslapd-auditlog-logging-enabled"><label for="nsslapd-auditlog-logging-enabled" class="ds-label" title="Enable audit logging (nsslapd-auditlog-logging-enabled)."> Enable Audit Logging</label> <div class="ds-expired-div" id="auditlog-attrs"> <div class="ds-inline"> @@ -594,11 +593,6 @@ <label for="nsslapd-auditlog" class="ds-config-label" title="The audit log location and name (nsslapd-auditlog).">Audit Log Location</label><input class="ds-input" type="text" id="nsslapd-auditlog" size="40"/> </div> - <div> - <input type="checkbox" class="ds-server-checkbox" id="nsslapd-auditfaillog-logging-enabled"><label - for="nsslapd-auditfaillog-logging-enabled" class="ds-label" title="Log failed operations in the audit log (nsslapd-auditfaillog-logging-enabled)."> - Log Failed Operations</label> - </div> </div> <p></p> <h4 class="ds-sub-header">Rotation Policy</h4> @@ -654,7 +648,7 @@ <!-- Auditfail logging --> <div id="server-auditfail-log" class="all-pages" hidden> <h3 class="ds-config-header">Audit Failure Log Settings</h3> - <input type="checkbox" class="ds-config-checkbox" id="nsslapd-auditfaillog-logging-enabled" checked><label + <input type="checkbox" class="ds-config-checkbox" id="nsslapd-auditfaillog-logging-enabled"><label for="nsslapd-auditfaillog-logging-enabled" class="ds-label" title="Enable audit failure logging (nsslapd-auditfaillog-logging-enabled)."> Enable Audit Failure Logging</label> <div class="ds-expired-div" id="auditfaillog-attrs"> <label for="nsslapd-auditfaillog" class="ds-config-label" title="The audit failure log location and name (nsslapd-auditfaillog).">Audit Failure Log Location</label><input diff --git a/src/cockpit/389-console/src/servers.js b/src/cockpit/389-console/src/servers.js index 35433491b..204a14dac 100644 --- a/src/cockpit/389-console/src/servers.js +++ b/src/cockpit/389-console/src/servers.js @@ -180,6 +180,11 @@ function get_and_set_config () { $("#" + attr).trigger('change'); } config_values[attr] = val; + + // Handle password confirm inputs + if (attr == "nsslapd-rootpw"){ + $("#nsslapd-rootpw-confirm").val(val); + } } // Do the log level tables @@ -320,7 +325,7 @@ function apply_mods(mods) { for (remaining in mods) { $("#" + remaining.attr).val(config_values[remaining.attr]); } - check_inst_alive(1); + check_inst_alive(0); return; // Stop on error }); } @@ -352,7 +357,7 @@ function save_config() { var val = $("#" + attr).val(); // But first check for rootdn-pw changes and check confirm input matches - if (attr == "nsslapd-rootpw" && val != config_values[attr]) { + if (attr == "nsslapd-rootpw" && (val != config_values[attr] || val != $("#nsslapd-rootpw-confirm").val())) { // Password change, make sure passwords match if (val != $("#nsslapd-rootpw-confirm").val()){ popup_msg("Passwords do not match!", "The Directory Manager passwords do not match, please correct before saving again."); diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py index 590ac2fb0..96e7c02c9 100644 --- a/src/lib389/lib389/cli_conf/replication.py +++ b/src/lib389/lib389/cli_conf/replication.py @@ -243,6 +243,22 @@ def demote_replica(inst, basedn, log, args): print("Successfully demoted replica to \"{}\"".format(role)) +def list_suffixes(inst, basedn, log, args): + suffixes = [] + replicas = Replicas(inst).list() + for replica in replicas: + suffixes.append(replica.get_suffix()) + + if args.json: + print(json.dumps({"type": "list", "items": suffixes})) + else: + if len(suffixes) == 0: + print("There are no replicated suffixes") + else: + for suffix in suffixes: + print(suffix) + + def get_repl_config(inst, basedn, log, args): replicas = Replicas(inst) replica = replicas.get(args.suffix) @@ -841,6 +857,9 @@ def create_parser(subparsers): repl_disable_parser.set_defaults(func=disable_replication) repl_disable_parser.add_argument('--suffix', required=True, help='The DN of the suffix to have replication disabled') + repl_list_parser = repl_subcommands.add_parser('list', help='List all the replicated suffixes') + repl_list_parser.set_defaults(func=list_suffixes) + repl_promote_parser = repl_subcommands.add_parser('promote', help='Promte replica to a Hub or Master') repl_promote_parser.set_defaults(func=promote_replica) repl_promote_parser.add_argument('--suffix', required=True, help="The DN of the replication suffix to promote") diff --git a/src/lib389/lib389/instance/options.py b/src/lib389/lib389/instance/options.py index c41586c22..a32ec8a88 100644 --- a/src/lib389/lib389/instance/options.py +++ b/src/lib389/lib389/instance/options.py @@ -286,7 +286,9 @@ class Backend2Base(Options2): self._options['suffix'] = 'dc=example,dc=com' self._type['suffix'] = str - self._helptext['suffix'] = "Sets the root suffix stored in this database." + self._helptext['suffix'] = ("Sets the root suffix stored in this database. If you do not uncomment the suffix " + + "attribute the install process will NOT create the backend/suffix. You can also " + + "create multiple backends/suffixes by duplicating this section.") self._options['sample_entries'] = "no" self._type['sample_entries'] = str diff --git a/src/lib389/lib389/instance/remove.py b/src/lib389/lib389/instance/remove.py index 70ebfcc11..2d8ce99a8 100644 --- a/src/lib389/lib389/instance/remove.py +++ b/src/lib389/lib389/instance/remove.py @@ -40,6 +40,8 @@ def remove_ds_instance(dirsrv, force=False): remove_paths['inst_dir'] = dirsrv.ds_paths.inst_dir marker_path = "%s/sysconfig/dirsrv-%s" % (dirsrv.ds_paths.sysconf_dir, dirsrv.serverid) + etc_dirsrv_path = os.path.join(dirsrv.ds_paths.sysconf_dir, 'dirsrv/') + ssca_path = os.path.join(etc_dirsrv_path, 'ssca/') # Check the marker exists. If it *does not* warn about this, and say that to # force removal you should touch this file. @@ -93,6 +95,15 @@ def remove_ds_instance(dirsrv, force=False): if dirsrv.sslport is not None: selinux_label_port(dirsrv.sslport, remove_label=True) + # If this was the last instance, remove the ssca directory + insts = dirsrv.list(all=True) + if len(insts) == 0: + # Remove /etc/dirsrv/ssca + try: + shutil.rmtree(ssca_path) + except FileNotFoundError: + pass + # Done! _log.debug("Complete") diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py index 1a25a4ee6..1b3bf0dc5 100644 --- a/src/lib389/lib389/instance/setup.py +++ b/src/lib389/lib389/instance/setup.py @@ -42,9 +42,9 @@ def get_port(port, default_port, secure=False): # Get the port number for the interactive installer and validate it while 1: if secure: - val = input('\nEnter secure port number [{}]: '.format(default_port)) + val = input('\nEnter secure port number [{}]: '.format(default_port)).rstrip() else: - val = input('\nEnter port number [{}]: '.format(default_port)) + val = input('\nEnter port number [{}]: '.format(default_port)).rstrip() if val != "" or default_port == "": # Validate port is number and in a valid range @@ -224,7 +224,7 @@ class SetupDs(object): """Check if instance name is already taken :param serverid - name of the server instance :param prefix - name of prefix build location - :return True - if the serfver id is already in use + :return True - if the server id is already in use False - if the server id is available """ if prefix != "/usr": @@ -275,19 +275,19 @@ class SetupDs(object): 'schema_dir': ds_paths.schema_dir} # Start asking questions, beginning with the hostname... - val = input('\nEnter system\'s hostname [{}]: '.format(general['full_machine_name'])) + val = input('\nEnter system\'s hostname [{}]: '.format(general['full_machine_name'])).rstrip() if val != "": general['full_machine_name'] = val # Strict host name checking msg = ("\nUse strict hostname verification (set to \"no\" if using GSSAPI behind a load balancer) [yes]: ") while 1: - val = input(msg) + val = input(msg).rstrip().lower() if val != "": - if val.lower() == "no" or val.lower() == "n": + if val == "no" or val == "n": slapd['strict_host_checking'] = False break - if val.lower() == "yes" or val.lower() == "y": + if val == "yes" or val == "y": # Use default break @@ -305,7 +305,7 @@ class SetupDs(object): if self._server_id_taken(slapd['instance_name'], prefix=slapd['prefix']): slapd['instance_name'] = "" - val = input('\nEnter the instance name [{}]: '.format(slapd['instance_name'])) + val = input('\nEnter the instance name [{}]: '.format(slapd['instance_name'])).rstrip() if val != "": if not all(ord(c) < 128 for c in val): print("Server identifier can not contain non ascii characters") @@ -355,12 +355,12 @@ class SetupDs(object): # Self-Signed Cert DB while 1: - val = input('\nCreate self-signed certificate database [yes]: ') + val = input('\nCreate self-signed certificate database [yes]: ').rstrip().lower() if val != "": - if val.lower() == 'no' or val.lower() == "n": + if val== 'no' or val == "n": slapd['self_sign_cert'] = False break - elif val.lower() == "yes" or val.lower() == "y": + elif val == "yes" or val == "y": # Default value is already yes break else: @@ -383,7 +383,7 @@ class SetupDs(object): # Root DN while 1: - val = input('\nEnter Directory Manager DN [{}]: '.format(slapd['root_dn'])) + val = input('\nEnter Directory Manager DN [{}]: '.format(slapd['root_dn'])).rstrip() if val != '': # Validate value is a DN if is_a_dn(val, allow_anon=False): @@ -398,12 +398,12 @@ class SetupDs(object): # Root DN Password while 1: - rootpw1 = getpass.getpass('\nEnter the Directory Manager password: ') + rootpw1 = getpass.getpass('\nEnter the Directory Manager password: ').rstrip() if rootpw1 == '': print('Password can not be empty') continue - rootpw2 = getpass.getpass('Confirm the Directory Manager Password: ') + rootpw2 = getpass.getpass('Confirm the Directory Manager Password: ').rstrip() if rootpw1 != rootpw2: print('Passwords do not match') continue @@ -424,7 +424,7 @@ class SetupDs(object): suffix += ",dc=" + comp while 1: - val = input("\nEnter the database suffix (or enter \"none\" to skip) [{}]: ".format(suffix)) + val = input("\nEnter the database suffix (or enter \"none\" to skip) [{}]: ".format(suffix)).rstrip() if val != '': if val.lower() == "none": # No database, no problem @@ -443,11 +443,11 @@ class SetupDs(object): # Add sample entries? if len(backends) > 0: while 1: - val = input("\nCreate sample entries in the suffix [no]: ".format(suffix)) + val = input("\nCreate sample entries in the suffix [no]: ".format(suffix)).rstrip().lower() if val != "": - if val.lower() == "no" or val.lower() == "n": + if val == "no" or val == "n": break - if val.lower() == "yes" or val.lower() == "y": + if val == "yes" or val == "y": backend['sample_entries'] = INSTALL_LATEST_CONFIG break @@ -459,11 +459,11 @@ class SetupDs(object): # Are you ready? while 1: - val = input('\nAre you ready to install? [no]: ') - if val == '' or val.lower() == "no" or val.lower() == 'n': + val = input('\nAre you ready to install? [no]: ').rstrip().lower() + if val == '' or val == "no" or val == 'n': print('Aborting installation...') sys.exit(0) - elif val.lower() == 'yes' or val.lower() == 'y': + elif val == 'yes' or val == 'y': # lets do it! break else: diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py index 7d23c8da3..ca18374f8 100644 --- a/src/lib389/lib389/replica.py +++ b/src/lib389/lib389/replica.py @@ -1242,6 +1242,14 @@ class Replica(DSLdapObject): """ self.replace('nsds5task', 'cl2ldif') + def get_suffix(self): + """Return the suffix + """ + if self._suffix is None: + self._populate_suffix() + + return self._suffix + class Replicas(DSLdapObjects): """Replica DSLdapObjects for all replicas
0
bacdf018f340641f9edf1a3c8392b62f8dba5331
389ds/389-ds-base
Ticket 47651 - Finaliser to remove instances backups https://fedorahosted.org/389/ticket/47651 Reviewed by: Nathan Kinder
commit bacdf018f340641f9edf1a3c8392b62f8dba5331 Author: Thierry bordaz (tbordaz) <[email protected]> Date: Fri Dec 20 14:33:11 2013 +0100 Ticket 47651 - Finaliser to remove instances backups https://fedorahosted.org/389/ticket/47651 Reviewed by: Nathan Kinder diff --git a/dirsrvtests/tickets/finalizer.py b/dirsrvtests/tickets/finalizer.py index 72e0c0f74..eb0233201 100644 --- a/dirsrvtests/tickets/finalizer.py +++ b/dirsrvtests/tickets/finalizer.py @@ -36,6 +36,9 @@ def test_finalizer(): instance.allocate(args_instance) if instance.exists(): instance.delete() + + # remove any existing backup for this instance + instance.clearBackupFS() def run_isolated(): '''
0
28e4be01a7be5c8b5bed85dc847cf65329449dd1
389ds/389-ds-base
Issue 49157 - ds-logpipe.py crashes for non-existing users Description: Added try/except's for various OS function calls, as the tool should gracefully exit when there is a problem and not crash https://pagure.io/389-ds-base/issue/49157 Reviewed by: firstyear(Thanks!)
commit 28e4be01a7be5c8b5bed85dc847cf65329449dd1 Author: Mark Reynolds <[email protected]> Date: Mon Mar 20 15:29:48 2017 -0400 Issue 49157 - ds-logpipe.py crashes for non-existing users Description: Added try/except's for various OS function calls, as the tool should gracefully exit when there is a problem and not crash https://pagure.io/389-ds-base/issue/49157 Reviewed by: firstyear(Thanks!) diff --git a/ldap/admin/src/scripts/ds-logpipe.py b/ldap/admin/src/scripts/ds-logpipe.py index 4ba4d1bdf..dc1856a49 100644 --- a/ldap/admin/src/scripts/ds-logpipe.py +++ b/ldap/admin/src/scripts/ds-logpipe.py @@ -262,7 +262,8 @@ def parse_options(): options, logfname = parse_options() -if options.debug: debug = True +if options.debug: + debug = True if len(plgfuncs) == 0: plgfuncs.append(defaultplugin) @@ -270,9 +271,15 @@ if len(plgpostfuncs) == 0: plgpostfuncs.append(defaultpost) if options.user: - try: userid = int(options.user) - except ValueError: # not a numeric userid - look it up - userid = pwd.getpwnam(options.user)[2] + try: + userid = int(options.user) + except ValueError: # not a numeric userid - look it up + try: + userid = pwd.getpwnam(options.user)[2] + except Exception as e: + print("Failed to lookup name (%s) error: %s" % + (options.user, str(e))) + sys.exit(1) os.seteuid(userid) if options.scriptpidfile: @@ -298,8 +305,12 @@ except OSError as e: if e.errno == errno.ENOENT: if debug: print("Creating log pipe", logfname) - os.mkfifo(logfname) - os.chmod(logfname, 0o600) + try: + os.mkfifo(logfname) + os.chmod(logfname, 0o600) + except Exception as e: + print("Failed to create log pipe: " + str(e)) + sys.exit(1) else: raise Exception("%s [%d]" % (e.strerror, e.errno)) @@ -393,7 +404,7 @@ while not done: else: # we read something # pipe closed - usually when server shuts down done = True - + if not done and debug: print("log pipe", logfname, "closed - reopening - read", totallines, "total lines")
0
f25c7f1f988783d620171f7b648f946dc6704c81
389ds/389-ds-base
Ticket #47835 - Coverity: 12687..12692 12691 - Unbounded source buffer Description: To solve "Passing string *argv of unknown size to strdup, which expects a string of a particular size", get ARG_MAX and pass it to strndup. Reviewed by [email protected] (Thanks, Rich!) https://fedorahosted.org/389/ticket/47835
commit f25c7f1f988783d620171f7b648f946dc6704c81 Author: Noriko Hosoi <[email protected]> Date: Mon Jun 30 18:08:35 2014 -0700 Ticket #47835 - Coverity: 12687..12692 12691 - Unbounded source buffer Description: To solve "Passing string *argv of unknown size to strdup, which expects a string of a particular size", get ARG_MAX and pass it to strndup. Reviewed by [email protected] (Thanks, Rich!) https://fedorahosted.org/389/ticket/47835 diff --git a/ldap/servers/snmp/main.c b/ldap/servers/snmp/main.c index 03738777c..fd06dd42c 100644 --- a/ldap/servers/snmp/main.c +++ b/ldap/servers/snmp/main.c @@ -75,6 +75,7 @@ main (int argc, char *argv[]) { struct stat logdir_s; pid_t child_pid; FILE *pid_fp; + long arg_max = 0; /* Load options */ while ((--argc > 0) && ((*++argv)[0] == '-')) { @@ -90,11 +91,13 @@ main (int argc, char *argv[]) { } } - if (argc != 1) + if ((argc != 1) || (NULL == *argv)) { exit_usage(); + } /* load config file */ - if ((config_file = strdup(*argv)) == NULL) { + arg_max = sysconf(_SC_ARG_MAX); + if ((config_file = strndup(*argv, arg_max)) == NULL) { printf("ldap-agent: Memory error loading config file\n"); exit(1); }
0
07d9cb2b67b32d321ebfca0a4f1e57791c5bf411
389ds/389-ds-base
Bug 622907 - support piped passwords to perl-based maintenance commands https://bugzilla.redhat.com/show_bug.cgi?id=622907 Resolves: bug 622907 Bug Description: support piped passwords to perl-based maintenance commands Author: [email protected] Reviewed by: rmeggins Branch: master Fix Description: Use -t STDIN to check if the terminal is a tty Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no
commit 07d9cb2b67b32d321ebfca0a4f1e57791c5bf411 Author: Rich Megginson <[email protected]> Date: Mon Jan 24 08:37:52 2011 -0700 Bug 622907 - support piped passwords to perl-based maintenance commands https://bugzilla.redhat.com/show_bug.cgi?id=622907 Resolves: bug 622907 Bug Description: support piped passwords to perl-based maintenance commands Author: [email protected] Reviewed by: rmeggins Branch: master Fix Description: Use -t STDIN to check if the terminal is a tty Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/admin/src/scripts/template-bak2db.pl.in b/ldap/admin/src/scripts/template-bak2db.pl.in index 78a20d389..96bfd166a 100644 --- a/ldap/admin/src/scripts/template-bak2db.pl.in +++ b/ldap/admin/src/scripts/template-bak2db.pl.in @@ -93,11 +93,11 @@ if ($passwdfile ne ""){ # Read the password from terminal print "Bind Password: "; # Disable console echo - system("stty -echo"); + system("stty -echo") if -t STDIN; # read the answer $passwd = <STDIN>; # Enable console echo - system("stty echo"); + system("stty echo") if -t STDIN; print "\n"; chop($passwd); # trim trailing newline } diff --git a/ldap/admin/src/scripts/template-db2bak.pl.in b/ldap/admin/src/scripts/template-db2bak.pl.in index 5fac9a76e..27f9f30e2 100644 --- a/ldap/admin/src/scripts/template-db2bak.pl.in +++ b/ldap/admin/src/scripts/template-db2bak.pl.in @@ -90,11 +90,11 @@ if ($passwdfile ne ""){ # Read the password from terminal print "Bind Password: "; # Disable console echo - system("stty -echo"); + system("stty -echo") if -t STDIN; # read the answer $passwd = <STDIN>; # Enable console echo - system("stty echo"); + system("stty echo") if -t STDIN; print "\n"; chop($passwd); # trim trailing newline } diff --git a/ldap/admin/src/scripts/template-db2index.pl.in b/ldap/admin/src/scripts/template-db2index.pl.in index 6229cb469..91101a116 100644 --- a/ldap/admin/src/scripts/template-db2index.pl.in +++ b/ldap/admin/src/scripts/template-db2index.pl.in @@ -129,11 +129,11 @@ if ($passwdfile ne ""){ # Read the password from terminal print "Bind Password: "; # Disable console echo - system("stty -echo"); + system("stty -echo") if -t STDIN; # read the answer $passwd = <STDIN>; # Enable console echo - system("stty echo"); + system("stty echo") if -t STDIN; print "\n"; chop($passwd); # trim trailing newline } diff --git a/ldap/admin/src/scripts/template-db2ldif.pl.in b/ldap/admin/src/scripts/template-db2ldif.pl.in index 2a3c3f748..f3148ff5d 100644 --- a/ldap/admin/src/scripts/template-db2ldif.pl.in +++ b/ldap/admin/src/scripts/template-db2ldif.pl.in @@ -181,11 +181,11 @@ if ($passwdfile ne ""){ # Read the password from terminal print "Bind Password: "; # Disable console echo - system("stty -echo"); + system("stty -echo") if -t STDIN; # read the answer $passwd = <STDIN>; # Enable console echo - system("stty echo"); + system("stty echo") if -t STDIN; print "\n"; chop($passwd); # trim trailing newline } diff --git a/ldap/admin/src/scripts/template-fixup-linkedattrs.pl.in b/ldap/admin/src/scripts/template-fixup-linkedattrs.pl.in index acdf29bce..17ca9f739 100644 --- a/ldap/admin/src/scripts/template-fixup-linkedattrs.pl.in +++ b/ldap/admin/src/scripts/template-fixup-linkedattrs.pl.in @@ -111,11 +111,11 @@ if ($passwdfile ne ""){ # Read the password from terminal print "Bind Password: "; # Disable console echo - system("stty -echo"); + system("stty -echo") if -t STDIN; # read the answer $passwd = <STDIN>; # Enable console echo - system("stty echo"); + system("stty echo") if -t STDIN; print "\n"; chop($passwd); # trim trailing newline } diff --git a/ldap/admin/src/scripts/template-fixup-memberof.pl.in b/ldap/admin/src/scripts/template-fixup-memberof.pl.in index de7a6ba4a..5dff5f740 100644 --- a/ldap/admin/src/scripts/template-fixup-memberof.pl.in +++ b/ldap/admin/src/scripts/template-fixup-memberof.pl.in @@ -120,11 +120,11 @@ if ($passwdfile ne ""){ # Read the password from terminal print "Bind Password: "; # Disable console echo - system("stty -echo"); + system("stty -echo") if -t STDIN; # read the answer $passwd = <STDIN>; # Enable console echo - system("stty echo"); + system("stty echo") if -t STDIN; print "\n"; chop($passwd); # trim trailing newline } diff --git a/ldap/admin/src/scripts/template-ldif2db.pl.in b/ldap/admin/src/scripts/template-ldif2db.pl.in index 1ead21e45..b4d2c8341 100644 --- a/ldap/admin/src/scripts/template-ldif2db.pl.in +++ b/ldap/admin/src/scripts/template-ldif2db.pl.in @@ -169,11 +169,11 @@ if ($passwdfile ne ""){ # Read the password from terminal print "Bind Password: "; # Disable console echo - system("stty -echo"); + system("stty -echo") if -t STDIN; # read the answer $passwd = <STDIN>; # Enable console echo - system("stty echo"); + system("stty echo") if -t STDIN; print "\n"; chop($passwd); # trim trailing newline } diff --git a/ldap/admin/src/scripts/template-ns-accountstatus.pl.in b/ldap/admin/src/scripts/template-ns-accountstatus.pl.in index 2712d0965..0b4deeb10 100644 --- a/ldap/admin/src/scripts/template-ns-accountstatus.pl.in +++ b/ldap/admin/src/scripts/template-ns-accountstatus.pl.in @@ -465,11 +465,11 @@ if ($pwfile ne ""){ # Read the password from terminal print "Bind Password: "; # Disable console echo - system("stty -echo"); + system("stty -echo") if -t STDIN; # read the answer $rootpw = <STDIN>; # Enable console echo - system("stty echo"); + system("stty echo") if -t STDIN; print "\n"; chop($rootpw); # trim trailing newline } diff --git a/ldap/admin/src/scripts/template-ns-activate.pl.in b/ldap/admin/src/scripts/template-ns-activate.pl.in index 2712d0965..0b4deeb10 100644 --- a/ldap/admin/src/scripts/template-ns-activate.pl.in +++ b/ldap/admin/src/scripts/template-ns-activate.pl.in @@ -465,11 +465,11 @@ if ($pwfile ne ""){ # Read the password from terminal print "Bind Password: "; # Disable console echo - system("stty -echo"); + system("stty -echo") if -t STDIN; # read the answer $rootpw = <STDIN>; # Enable console echo - system("stty echo"); + system("stty echo") if -t STDIN; print "\n"; chop($rootpw); # trim trailing newline } diff --git a/ldap/admin/src/scripts/template-ns-inactivate.pl.in b/ldap/admin/src/scripts/template-ns-inactivate.pl.in index 2712d0965..0b4deeb10 100644 --- a/ldap/admin/src/scripts/template-ns-inactivate.pl.in +++ b/ldap/admin/src/scripts/template-ns-inactivate.pl.in @@ -465,11 +465,11 @@ if ($pwfile ne ""){ # Read the password from terminal print "Bind Password: "; # Disable console echo - system("stty -echo"); + system("stty -echo") if -t STDIN; # read the answer $rootpw = <STDIN>; # Enable console echo - system("stty echo"); + system("stty echo") if -t STDIN; print "\n"; chop($rootpw); # trim trailing newline } diff --git a/ldap/admin/src/scripts/template-schema-reload.pl.in b/ldap/admin/src/scripts/template-schema-reload.pl.in index 37071b4f1..7cac4836d 100644 --- a/ldap/admin/src/scripts/template-schema-reload.pl.in +++ b/ldap/admin/src/scripts/template-schema-reload.pl.in @@ -110,11 +110,11 @@ if ($passwdfile ne ""){ # Read the password from terminal print "Bind Password: "; # Disable console echo - system("stty -echo"); + system("stty -echo") if -t STDIN; # read the answer $passwd = <STDIN>; # Enable console echo - system("stty echo"); + system("stty echo") if -t STDIN; print "\n"; chop($passwd); # trim trailing newline } diff --git a/ldap/admin/src/scripts/template-syntax-validate.pl.in b/ldap/admin/src/scripts/template-syntax-validate.pl.in index d8fc878d0..401ec87bc 100644 --- a/ldap/admin/src/scripts/template-syntax-validate.pl.in +++ b/ldap/admin/src/scripts/template-syntax-validate.pl.in @@ -120,11 +120,11 @@ if ($passwdfile ne ""){ # Read the password from terminal print "Bind Password: "; # Disable console echo - system("stty -echo"); + system("stty -echo") if -t STDIN; # read the answer $passwd = <STDIN>; # Enable console echo - system("stty echo"); + system("stty echo") if -t STDIN; print "\n"; chop($passwd); # trim trailing newline } diff --git a/ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl.in b/ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl.in index 0b54917a0..7ad6ca283 100644 --- a/ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl.in +++ b/ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl.in @@ -124,11 +124,11 @@ if ($passwdfile ne ""){ # Read the password from terminal print "Bind Password: "; # Disable console echo - system("stty -echo"); + system("stty -echo") if -t STDIN; # read the answer $passwd = <STDIN>; # Enable console echo - system("stty echo"); + system("stty echo") if -t STDIN; print "\n"; chop($passwd); # trim trailing newline }
0
43fb648fd4d7663c61c7ea7ff649ffddb9cbf006
389ds/389-ds-base
Ticket 365 - passwords in clear text in the audit log Bug Description: after changing a user password, an additional modify is added to the mods: "unhashed#user#password: <clear text password>" e.g. PSEUDO_ATTR_UNHASHEDUSERPASSWORD Fix Description: Added new config param "nsslapd-audit-logging-hide-unhashed-pw". The default is "off". When "on" that single modify op is skipped from the audit logging. https://fedorahosted.org/389/ticket/365 Reviewed by: Noriko (Thanks!)
commit 43fb648fd4d7663c61c7ea7ff649ffddb9cbf006 Author: Mark Reynolds <[email protected]> Date: Wed May 16 17:53:27 2012 -0400 Ticket 365 - passwords in clear text in the audit log Bug Description: after changing a user password, an additional modify is added to the mods: "unhashed#user#password: <clear text password>" e.g. PSEUDO_ATTR_UNHASHEDUSERPASSWORD Fix Description: Added new config param "nsslapd-audit-logging-hide-unhashed-pw". The default is "off". When "on" that single modify op is skipped from the audit logging. https://fedorahosted.org/389/ticket/365 Reviewed by: Noriko (Thanks!) diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c index 79cd40748..16c0f7e73 100644 --- a/ldap/servers/slapd/auditlog.c +++ b/ldap/servers/slapd/auditlog.c @@ -55,6 +55,7 @@ char *attr_changetype = ATTR_CHANGETYPE; char *attr_newrdn = ATTR_NEWRDN; char *attr_deleteoldrdn = ATTR_DELETEOLDRDN; char *attr_modifiersname = ATTR_MODIFIERSNAME; +static int hide_unhashed_pw = 0; /* Forward Declarations */ static void write_audit_file( int optype, const char *dn, void *change, int flag, time_t curtime ); @@ -152,6 +153,10 @@ write_audit_file( for ( j = 0; mods[j] != NULL; j++ ) { int operationtype= mods[j]->mod_op & ~LDAP_MOD_BVALUES; + + if((strcmp(mods[j]->mod_type, PSEUDO_ATTR_UNHASHEDUSERPASSWORD) == 0) && hide_unhashed_pw){ + continue; + } switch ( operationtype ) { case LDAP_MOD_ADD: @@ -246,3 +251,15 @@ write_audit_file( lenstr_free( &l ); } + +void +auditlog_hide_unhashed_pw() +{ + hide_unhashed_pw = 1; +} + +void +auditlog_expose_unhashed_pw() +{ + hide_unhashed_pw = 0; +} diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c index 6d0db3644..2540e25d2 100644 --- a/ldap/servers/slapd/libglobs.c +++ b/ldap/servers/slapd/libglobs.c @@ -459,6 +459,9 @@ static struct config_get_and_set { {CONFIG_AUDITLOG_LOGGING_ENABLED_ATTRIBUTE, NULL, log_set_logging, SLAPD_AUDIT_LOG, (void**)&global_slapdFrontendConfig.auditlog_logging_enabled, CONFIG_ON_OFF, NULL}, + {CONFIG_AUDITLOG_LOGGING_HIDE_UNHASHED_PW, config_set_auditlog_unhashed_pw, + NULL, 0, + (void**)&global_slapdFrontendConfig.auditlog_logging_hide_unhashed_pw, CONFIG_ON_OFF, NULL}, {CONFIG_ACCESSLOG_BUFFERING_ATTRIBUTE, config_set_accesslogbuffering, NULL, 0, (void**)&global_slapdFrontendConfig.accesslogbuffering, CONFIG_ON_OFF, NULL}, @@ -1072,6 +1075,7 @@ FrontendConfig_init () { cfg->auditlog_minfreespace = 5; cfg->auditlog_exptime = 1; cfg->auditlog_exptimeunit = slapi_ch_strdup("month"); + cfg->auditlog_logging_hide_unhashed_pw = LDAP_OFF; cfg->entryusn_global = LDAP_OFF; cfg->entryusn_import_init = slapi_ch_strdup("0"); @@ -1171,6 +1175,21 @@ get_entry_point( int ep_name, caddr_t *ep_addr ) return rc; } +int +config_set_auditlog_unhashed_pw(const char *attrname, char *value, char *errorbuf, int apply) +{ + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + int retVal = LDAP_SUCCESS; + + retVal = config_set_onoff ( attrname, value, &(slapdFrontendConfig->auditlog_logging_hide_unhashed_pw), + errorbuf, apply); + if(strcasecmp(value,"on") == 0){ + auditlog_hide_unhashed_pw(); + } else { + auditlog_expose_unhashed_pw(); + } + return retVal; +} /* * Utility function called by many of the config_set_XXX() functions. diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h index ea6f610e9..c3ebd79ac 100644 --- a/ldap/servers/slapd/proto-slap.h +++ b/ldap/servers/slapd/proto-slap.h @@ -387,6 +387,7 @@ int config_set_disk_threshold( const char *attrname, char *value, char *errorbuf int config_set_disk_grace_period( const char *attrname, char *value, char *errorbuf, int apply ); int config_set_disk_preserve_logging( const char *attrname, char *value, char *errorbuf, int apply ); int config_set_disk_logging_critical( const char *attrname, char *value, char *errorbuf, int apply ); +int config_set_auditlog_unhashed_pw(const char *attrname, char *value, char *errorbuf, int apply); #if !defined(_WIN32) && !defined(AIX) int config_set_maxdescriptors( const char *attrname, char *value, char *errorbuf, int apply ); @@ -1190,6 +1191,8 @@ void factory_destroy_extension(int type,void *object,void *parent,void **extensi */ void write_audit_log_entry( Slapi_PBlock *pb); +void auditlog_hide_unhashed_pw(); +void auditlog_expose_unhashed_pw(); /* * eventq.c diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h index 2e4db5697..22237ce5c 100644 --- a/ldap/servers/slapd/slap.h +++ b/ldap/servers/slapd/slap.h @@ -1874,6 +1874,7 @@ typedef struct _slapdEntryPoints { #define CONFIG_ACCESSLOG_LOGGING_ENABLED_ATTRIBUTE "nsslapd-accesslog-logging-enabled" #define CONFIG_ERRORLOG_LOGGING_ENABLED_ATTRIBUTE "nsslapd-errorlog-logging-enabled" #define CONFIG_AUDITLOG_LOGGING_ENABLED_ATTRIBUTE "nsslapd-auditlog-logging-enabled" +#define CONFIG_AUDITLOG_LOGGING_HIDE_UNHASHED_PW "nsslapd-auditlog-logging-hide-unhashed-pw" #define CONFIG_ROOTDN_ATTRIBUTE "nsslapd-rootdn" #define CONFIG_ROOTPW_ATTRIBUTE "nsslapd-rootpw" #define CONFIG_ROOTPWSTORAGESCHEME_ATTRIBUTE "nsslapd-rootpwstoragescheme" @@ -2163,6 +2164,7 @@ typedef struct _slapdFrontendConfig { int auditlog_minfreespace; int auditlog_exptime; char *auditlog_exptimeunit; + int auditlog_logging_hide_unhashed_pw; int return_exact_case; /* Return attribute names with the same case * as they appear in at.conf */
0
1417198b95ca3e618ac26e748ec4bde6417c105c
389ds/389-ds-base
Issue 6544 - logconv.py: python3-magic conflicts with python3-file-magic Bug Description: python3-magic and python3-file-magic can't be installed simultaneously, python3-magic is not packaged for EL10. Fix Description: Use python3-file-magic instead. Issue identified and fix suggested by Adam Williamson. Fixes: https://github.com/389ds/389-ds-base/issues/6544 Reviewed by: @mreynolds389 (Thanks!)
commit 1417198b95ca3e618ac26e748ec4bde6417c105c Author: Viktor Ashirov <[email protected]> Date: Sat Jan 25 13:54:33 2025 +0100 Issue 6544 - logconv.py: python3-magic conflicts with python3-file-magic Bug Description: python3-magic and python3-file-magic can't be installed simultaneously, python3-magic is not packaged for EL10. Fix Description: Use python3-file-magic instead. Issue identified and fix suggested by Adam Williamson. Fixes: https://github.com/389ds/389-ds-base/issues/6544 Reviewed by: @mreynolds389 (Thanks!) diff --git a/ldap/admin/src/logconv.py b/ldap/admin/src/logconv.py index 566f9af38..2fb5bb8c1 100755 --- a/ldap/admin/src/logconv.py +++ b/ldap/admin/src/logconv.py @@ -1798,8 +1798,7 @@ class logAnalyser: return None try: - mime = magic.Magic(mime=True) - filetype = mime.from_file(filepath) + filetype = magic.detect_from_filename(filepath).mime_type # List of supported compression types compressed_mime_types = [ diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in index 1b408be0f..74bcb4576 100644 --- a/rpm/389-ds-base.spec.in +++ b/rpm/389-ds-base.spec.in @@ -200,7 +200,7 @@ Requires: json-c # Log compression Requires: zlib-devel # logconv.py, MIME type -Requires: python-magic +Requires: python3-file-magic # Picks up our systemd deps. %{?systemd_requires}
0
ab35fe80176a4388744848def3c6324c5deed05b
389ds/389-ds-base
Ticket 49522 - Fix build issues on F28 Bug Description: We are randomly get build failures around libldaputil.la libtool: error: cannot find the library 'libldaputil.la' or unhandled argument 'libldaputil.la' Fix Description: A previous commit introduced one of the issues, this patch is adding libslapd.la to the LIBADD of libldaputil https://pagure.io/389-ds-base/issue/49552 Reviewed by: mreynolds(one line commit rule)
commit ab35fe80176a4388744848def3c6324c5deed05b Author: Mark Reynolds <[email protected]> Date: Thu Apr 5 13:15:05 2018 -0400 Ticket 49522 - Fix build issues on F28 Bug Description: We are randomly get build failures around libldaputil.la libtool: error: cannot find the library 'libldaputil.la' or unhandled argument 'libldaputil.la' Fix Description: A previous commit introduced one of the issues, this patch is adding libslapd.la to the LIBADD of libldaputil https://pagure.io/389-ds-base/issue/49552 Reviewed by: mreynolds(one line commit rule) diff --git a/Makefile.am b/Makefile.am index 4c0d10e30..c0987ffb6 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1137,7 +1137,7 @@ libldaputil_la_SOURCES = lib/ldaputil/cert.c \ lib/ldaputil/vtable.c libldaputil_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(DSINTERNAL_CPPFLAGS) -I$(srcdir)/lib/ldaputil -libldaputil_la_LIBADD = $(NSS_LINK) $(NSPR_LINK) +libldaputil_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK) libldaputil_la_LDFLAGS = $(AM_LDFLAGS) #////////////////////////////////////////////////////////////////
0
1e0b18a081e27b941031e8326de7e1e1dd70b3c4
389ds/389-ds-base
Bump version to 1.4.0.8
commit 1e0b18a081e27b941031e8326de7e1e1dd70b3c4 Author: Mark Reynolds <[email protected]> Date: Thu Apr 19 17:34:11 2018 -0400 Bump version to 1.4.0.8 diff --git a/VERSION.sh b/VERSION.sh index 303a7c413..e5d328117 100644 --- a/VERSION.sh +++ b/VERSION.sh @@ -10,7 +10,7 @@ vendor="389 Project" # PACKAGE_VERSION is constructed from these VERSION_MAJOR=1 VERSION_MINOR=4 -VERSION_MAINT=0.7 +VERSION_MAINT=0.8 # NOTE: VERSION_PREREL is automatically set for builds made out of a git tree VERSION_PREREL= VERSION_DATE=$(date -u +%Y%m%d)
0
e7152719470cdbf01a80faac943b780ce8d07321
389ds/389-ds-base
Update svrcore tools to make releases easier to conduct. Release version 4.1.0 Author: wibrown
commit e7152719470cdbf01a80faac943b780ce8d07321 Author: William Brown <[email protected]> Date: Fri Apr 8 11:44:52 2016 +1000 Update svrcore tools to make releases easier to conduct. Release version 4.1.0 Author: wibrown diff --git a/AUTHORS b/AUTHORS index e69de29bb..b391368b6 100644 --- a/AUTHORS +++ b/AUTHORS @@ -0,0 +1,3 @@ + +2016 William Brown <[email protected]> + diff --git a/Makefile.am b/Makefile.am index 8e850675c..6439c94b6 100644 --- a/Makefile.am +++ b/Makefile.am @@ -8,3 +8,21 @@ EXTRA_DIST= LICENSE \ pkgconfigdir= $(libdir)/pkgconfig pkgconfig_DATA= svrcore.pc + +# if distdir is a git tag, use that for the git archive tag, else +# just assume a developer build and use HEAD +git-archive: + if [ -n "$(SRCDISTDIR)" -a -d "$(SRCDISTDIR)" ] ; then \ + srcdistdir=$(SRCDISTDIR) ; \ + else \ + srcdistdir=`pwd` ; \ + fi ; \ + cd $(srcdir) ; \ + if git show-ref --tags -q $(distdir) ; then \ + gittag=$(distdir) ; \ + else \ + gittag=HEAD ; \ + fi ; \ + git archive --prefix=$(distdir)/ $$gittag | bzip2 > $$srcdistdir/$(distdir).tar.bz2 + + diff --git a/Makefile.in b/Makefile.in index d3b8de150..1786de00b 100644 --- a/Makefile.in +++ b/Makefile.in @@ -883,6 +883,22 @@ uninstall-am: uninstall-pkgconfigDATA .PRECIOUS: Makefile +# if distdir is a git tag, use that for the git archive tag, else +# just assume a developer build and use HEAD +git-archive: + if [ -n "$(SRCDISTDIR)" -a -d "$(SRCDISTDIR)" ] ; then \ + srcdistdir=$(SRCDISTDIR) ; \ + else \ + srcdistdir=`pwd` ; \ + fi ; \ + cd $(srcdir) ; \ + if git show-ref --tags -q $(distdir) ; then \ + gittag=$(distdir) ; \ + else \ + gittag=HEAD ; \ + fi ; \ + git archive --prefix=$(distdir)/ $$gittag | bzip2 > $$srcdistdir/$(distdir).tar.bz2 + # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: diff --git a/NEWS b/NEWS index 22e274f2c..f53fff5ca 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,6 @@ +svccore 4.1.0 +============= +* Add support for systemd-ask-pass integration svrcore 4.0.4 ============= diff --git a/configure b/configure index 9d7c67c88..06162fd13 100755 --- a/configure +++ b/configure @@ -1,8 +1,8 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for svrcore 4.0.4. +# Generated by GNU Autoconf 2.69 for svrcore 4.1.0. # -# Report bugs to <[email protected]>. +# Report bugs to <[email protected], [email protected]>. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. @@ -275,10 +275,10 @@ fi $as_echo "$0: be upgraded to zsh 4.3.4 or later." else $as_echo "$0: Please tell [email protected] and -$0: [email protected] about your system, including any -$0: error possibly output before this message. Then install -$0: a modern shell, or manually run the script under such a -$0: shell if you do have one." +$0: [email protected], [email protected] about your +$0: system, including any error possibly output before this +$0: message. Then install a modern shell, or manually run +$0: the script under such a shell if you do have one." fi exit 1 fi @@ -590,9 +590,9 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='svrcore' PACKAGE_TARNAME='svrcore' -PACKAGE_VERSION='4.0.4' -PACKAGE_STRING='svrcore 4.0.4' -PACKAGE_BUGREPORT='[email protected]' +PACKAGE_VERSION='4.1.0' +PACKAGE_STRING='svrcore 4.1.0' +PACKAGE_BUGREPORT='[email protected], [email protected]' PACKAGE_URL='' ac_unique_file="src/svrcore.h" @@ -1334,7 +1334,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures svrcore 4.0.4 to adapt to many kinds of systems. +\`configure' configures svrcore 4.1.0 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1404,7 +1404,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of svrcore 4.0.4:";; + short | recursive ) echo "Configuration of svrcore 4.1.0:";; esac cat <<\_ACEOF @@ -1461,7 +1461,7 @@ Some influential environment variables: Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. -Report bugs to <[email protected]>. +Report bugs to <[email protected], [email protected]>. _ACEOF ac_status=$? fi @@ -1524,7 +1524,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -svrcore configure 4.0.4 +svrcore configure 4.1.0 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -1869,9 +1869,9 @@ $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} -( $as_echo "## ---------------------------------- ## -## Report this to [email protected] ## -## ---------------------------------- ##" +( $as_echo "## rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr ## +## Report this to [email protected] ## +## rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr ##" ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac @@ -1893,7 +1893,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by svrcore $as_me 4.0.4, which was +It was created by svrcore $as_me 4.1.0, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -2756,7 +2756,7 @@ fi # Define the identity of the package. PACKAGE='svrcore' - VERSION='4.0.4' + VERSION='4.1.0' cat >>confdefs.h <<_ACEOF @@ -13236,7 +13236,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by svrcore $as_me 4.0.4, which was +This file was extended by svrcore $as_me 4.1.0, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -13296,13 +13296,13 @@ $config_headers Configuration commands: $config_commands -Report bugs to <[email protected]>." +Report bugs to <[email protected], [email protected]>." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -svrcore config.status 4.0.4 +svrcore config.status 4.1.0 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/configure.ac b/configure.ac index a2fe97fe4..ec0d03ab5 100644 --- a/configure.ac +++ b/configure.ac @@ -2,7 +2,7 @@ # Process this file with autoconf to produce a configure script. AC_PREREQ(2.59) -AC_INIT(svrcore, 4.0.4, [email protected], svrcore) +AC_INIT(svrcore, 4.1.0, [[email protected], [email protected]], svrcore) AM_INIT_AUTOMAKE([gnu check-news dist-bzip2]) AM_MAINTAINER_MODE AC_CONFIG_SRCDIR([src/svrcore.h]) diff --git a/svrcore.spec b/svrcore.spec index 94346785e..41a1a39eb 100644 --- a/svrcore.spec +++ b/svrcore.spec @@ -3,10 +3,10 @@ Summary: Secure PIN handling using NSS crypto Name: svrcore -Version: 4.0.4 +Version: 4.1.0 Release: 1%{?dist} -License: MPL/GPL/LGPL -URL: http://www.mozilla.org/projects/security/pki/ +License: MPL2.0 +URL: https://pagure.io/svrcore Group: Development/Libraries BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) Requires: nspr >= %{nspr_version} @@ -15,7 +15,7 @@ BuildRequires: nspr-devel >= %{nspr_version} BuildRequires: nss-devel >= %{nss_version} BuildRequires: pkgconfig -Source0: ftp://ftp.mozilla.org/pub/mozilla.org/directory/svrcore/releases/%{version}/src/%{name}-%{version}.tar.bz2 +Source0: http://www.port389.org/binaries/%{name}-%{version}.tar.bz2 %description svrcore provides applications with several ways to handle secure PIN storage @@ -74,6 +74,9 @@ rm -f $RPM_BUILD_ROOT%{_libdir}/libsvrcore.la %{_includedir}/svrcore.h %changelog +* Fri Apr 8 2016 William Brown <[email protected]> - 4.1.0 +- Added systemd ask password support + * Tue Mar 13 2007 Rich Megginson <[email protected]> - 4.0.4-1 - Removed some autoconf generated files which were GPL only - all - code needs to be tri-licensed
0
fb3d355933335af85b1c8491eb5a67e67b7c32e9
389ds/389-ds-base
Issue 50536 - After audit log file is rotated, DS version string is logged after each update Description: Created test case, which checks if DS version string is present only once at the top of the audit log after it is rotated. Relates: https://pagure.io/389-ds-base/issue/50536 Reviewed by: spichugi (Thanks!)
commit fb3d355933335af85b1c8491eb5a67e67b7c32e9 Author: Barbora Smejkalova <[email protected]> Date: Wed Nov 6 09:47:43 2019 +0100 Issue 50536 - After audit log file is rotated, DS version string is logged after each update Description: Created test case, which checks if DS version string is present only once at the top of the audit log after it is rotated. Relates: https://pagure.io/389-ds-base/issue/50536 Reviewed by: spichugi (Thanks!) diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py index 31e984a55..2306cb96f 100644 --- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py +++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py @@ -19,6 +19,7 @@ from lib389.idm.organizationalunit import OrganizationalUnits from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, DN_CONFIG, HOST_STANDALONE, PORT_STANDALONE, DN_DM, PASSWORD from lib389.utils import ds_is_older import ldap +import glob pytestmark = pytest.mark.tier1 @@ -161,6 +162,26 @@ def clean_access_logs(topology_st, request): return clean_access_logs +def set_audit_log_config_values(topology_st, request, enabled, logsize): + topo = topology_st.standalone + + topo.config.set('nsslapd-auditlog-logging-enabled', enabled) + topo.config.set('nsslapd-auditlog-maxlogsize', logsize) + + def fin(): + topo.start() + log.info('Setting audit log config back to default values') + topo.config.set('nsslapd-auditlog-logging-enabled', 'off') + topo.config.set('nsslapd-auditlog-maxlogsize', '100') + + request.addfinalizer(fin) + + [email protected](scope="function") +def set_audit_log_config_values_to_rotate(topology_st, request): + set_audit_log_config_values(topology_st, request, 'on', '1') + + @pytest.mark.bz1273549 def test_check_default(topology_st): """Check the default value of nsslapd-logging-hr-timestamps-enabled, @@ -769,6 +790,66 @@ def test_log_base_dn_when_invalid_attr_request(topology_st): assert topology_st.standalone.ds_access_log.match(r'.*SRCH base="{}".*'.format(DEFAULT_SUFFIX)) [email protected](ds_is_older('1.3.8'), reason="May fail because of bug 1676948") [email protected] [email protected] +def test_audit_log_rotate_and_check_string(topology_st, clean_access_logs, set_audit_log_config_values_to_rotate): + """Version string should be logged only once at the top of audit log + after it is rotated. + + :id: 14dffb22-2f9c-11e9-8a03-54e1ad30572c + + :setup: Standalone instance + + :steps: + 1. Set nsslapd-auditlog-logging-enabled: on + 2. Set nsslapd-auditlog-maxlogsize: 1 + 3. Do modifications to the entry, until audit log file is rotated + 4. Check audit logs + + :expectedresults: + 1. Attribute nsslapd-auditlog-logging-enabled should be set to on + 2. Attribute nsslapd-auditlog-maxlogsize should be set to 1 + 3. Audit file should grow till 1MB and then should be rotated + 4. Audit file log should contain version string only once at the top + """ + + standalone = topology_st.standalone + search_ds = '389-Directory' + + users = UserAccounts(standalone, DEFAULT_SUFFIX) + user = users.create(properties={ + 'uid': 'test_audit_log', + 'cn': 'test', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '1000', + 'homeDirectory': '/home/test', + }) + + log.info('Doing modifications to rotate audit log') + audit_log = standalone.ds_paths.audit_log + while len(glob.glob(audit_log + '*')) == 2: + user.replace('description', 'test'*100) + + log.info('Doing one more modification just in case') + user.replace('description', 'test2'*100) + + standalone.stop() + + count = 0 + with open(audit_log) as f: + log.info('Check that DS string is present on first line') + assert search_ds in f.readline() + f.seek(0) + + log.info('Check that DS string is present only once') + for line in f.readlines(): + if search_ds in line: + count += 1 + assert count == 1 + + if __name__ == '__main__': # Run isolated # -s for DEBUG mode
0
3ddb10279a472291aea234128b7fb9f028cf60c1
389ds/389-ds-base
Issue 3527 - Add PROXY protocol support (#5762) Description: Add support to 389-base for the PROXY protocol for ACI evaluation and also for logging client queries. The proxy protocol is described here: http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt Fixes: https://github.com/389ds/389-ds-base/issues/3527 Reviewed by: @Firstyear, @progier389, @mreynolds389 (Thanks!)
commit 3ddb10279a472291aea234128b7fb9f028cf60c1 Author: Simon Pichugin <[email protected]> Date: Mon Jun 12 09:57:23 2023 -0700 Issue 3527 - Add PROXY protocol support (#5762) Description: Add support to 389-base for the PROXY protocol for ACI evaluation and also for logging client queries. The proxy protocol is described here: http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt Fixes: https://github.com/389ds/389-ds-base/issues/3527 Reviewed by: @Firstyear, @progier389, @mreynolds389 (Thanks!) diff --git a/Makefile.am b/Makefile.am index 0dd546356..3c8937a53 100644 --- a/Makefile.am +++ b/Makefile.am @@ -479,6 +479,7 @@ dist_noinst_HEADERS = \ ldap/servers/slapd/filter.h \ ldap/servers/slapd/getopt_ext.h \ ldap/servers/slapd/getsocketpeer.h \ + ldap/servers/slapd/haproxy.h \ ldap/servers/slapd/intrinsics.h \ ldap/servers/slapd/log.h \ ldap/servers/slapd/openldapber.h \ @@ -1102,6 +1103,7 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \ ldap/servers/slapd/filterentry.c \ ldap/servers/slapd/generation.c \ ldap/servers/slapd/getfilelist.c \ + ldap/servers/slapd/haproxy.c \ ldap/servers/slapd/ldapi.c \ ldap/servers/slapd/ldaputil.c \ ldap/servers/slapd/lenstr.c \ @@ -1914,6 +1916,7 @@ test_slapd_SOURCES = test/main.c \ test/libslapd/schema/filter_validate.c \ test/libslapd/operation/v3_compat.c \ test/libslapd/spal/meminfo.c \ + test/libslapd/haproxy/parse.c \ test/plugins/test.c \ test/plugins/pwdstorage/pbkdf2.c diff --git a/dirsrvtests/tests/suites/basic/haproxy_test.py b/dirsrvtests/tests/suites/basic/haproxy_test.py new file mode 100644 index 000000000..08db0b50e --- /dev/null +++ b/dirsrvtests/tests/suites/basic/haproxy_test.py @@ -0,0 +1,96 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import ldap +import logging +import pytest +from lib389._constants import DEFAULT_SUFFIX, PASSWORD +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.account import Anonymous + +log = logging.getLogger(__name__) +DN = "uid=common,ou=people," + DEFAULT_SUFFIX +HOME_DIR = '/home/common' + [email protected](scope="function") +def setup_test(topo, request): + """Setup test environment""" + log.info("Add nsslapd-haproxy-trusted-ip attribute") + topo.standalone.config.set('nsslapd-haproxy-trusted-ip', '192.168.0.1') + assert topo.standalone.config.present('nsslapd-haproxy-trusted-ip', '192.168.0.1') + + log.info("Add a user") + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + try: + users.create(properties={ + 'uid': 'common', + 'cn': 'common', + 'sn': 'common', + 'uidNumber': '3000', + 'gidNumber': '4000', + 'homeDirectory': HOME_DIR, + 'description': 'test haproxy with this user', + 'userPassword': PASSWORD + }) + except ldap.ALREADY_EXISTS: + log.info("User already exists") + pass + + +def test_haproxy_trust_ip_attribute(topo, setup_test): + """Test nsslapd-haproxy-trusted-ip attribute set and delete + + :id: 8a0789a6-3ede-40e2-966c-9a2c87eaac05 + :setup: Standalone instance with nsslapd-haproxy-trusted-ip attribute and a user + :steps: + 1. Check that nsslapd-haproxy-trusted-ip attribute is present + 2. Delete nsslapd-haproxy-trusted-ip attribute + 3. Check that nsslapd-haproxy-trusted-ip attribute is not present + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + log.info("Check that nsslapd-haproxy-trusted-ip attribute is present") + assert topo.standalone.config.present('nsslapd-haproxy-trusted-ip', '192.168.0.1') + + log.info("Delete nsslapd-haproxy-trusted-ip attribute") + topo.standalone.config.remove_all('nsslapd-haproxy-trusted-ip') + + log.info("Check that nsslapd-haproxy-trusted-ip attribute is not present") + assert not topo.standalone.config.present('nsslapd-haproxy-trusted-ip', '192.168.0.1') + + +def test_binds_with_haproxy_trust_ip_attribute(topo, setup_test): + """Test that non-proxy binds are not blocked when nsslapd-haproxy-trusted-ip attribute is set + + :id: 14273c16-fed9-497e-8ebb-09e3dabc7914 + :setup: Standalone instance with nsslapd-haproxy-trusted-ip attribute and a user + :steps: + 1. Try to bind as anonymous user + 2. Try to bind as a user + 3. Check that userPassword is correct and we can get it + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + log.info("Bind as anonymous user") + Anonymous(topo.standalone).bind() + + log.info("Bind as a user") + user_entry = UserAccount(topo.standalone, DN) + user_conn = user_entry.bind(PASSWORD) + + log.info("Check that userPassword is correct and we can get it") + user_entry = UserAccount(user_conn, DN) + home = user_entry.get_attr_val_utf8('homeDirectory') + assert home == HOME_DIR diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c index 6561d8331..10e49ed54 100644 --- a/ldap/servers/slapd/connection.c +++ b/ldap/servers/slapd/connection.c @@ -214,6 +214,7 @@ connection_cleanup(Connection *conn) conn->c_idlesince = 0; conn->c_flags = 0; conn->c_needpw = 0; + conn->c_haproxyheader_read = 0; conn->c_prfd = NULL; /* c_ci stays as it is */ conn->c_fdi = SLAPD_INVALID_SOCKET_INDEX; @@ -1134,6 +1135,28 @@ conn_buffered_data_avail_nolock(Connection *conn, int *conn_closed) } } +/* Function to convert a PRNetAddr to a normalized IPv4 string and keep original address string. */ +static void +normalize_IPv4(const PRNetAddr *addr, char *normalizedAddr, size_t normalizedAddrSize, char *originalAddr, size_t originalAddrSize) +{ + /* Keep the original address string */ + PR_NetAddrToString(addr, originalAddr, originalAddrSize); + + if (PR_IsNetAddrType(addr, PR_IpAddrV4Mapped)) { + /* Handle IPv4-mapped-to-IPv6 */ + PRNetAddr v4addr; + /* v4addr gets the lower 32 bits of addr6 (the IPv4 address in the IPv6 format) */ + v4addr.inet.family = PR_AF_INET; + v4addr.inet.ip = addr->ipv6.ip.pr_s6_addr32[3]; + PR_NetAddrToString(&v4addr, normalizedAddr, normalizedAddrSize); + } else { + /* If it's not an IPv4-mapped IPv6 address, just keep the original format */ + strncpy(normalizedAddr, originalAddr, normalizedAddrSize - 1); + normalizedAddr[normalizedAddrSize - 1] = '\0'; + } + originalAddr[originalAddrSize - 1] = '\0'; +} + /* Upon returning from this function, we have either: 1. Read a PDU successfully. 2. Detected some error condition with the connection which requires closing it. @@ -1148,6 +1171,7 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int * { ber_len_t len = 0; int ret = 0; + int haproxy_rc = 0; int32_t waits_done = 0; ber_int_t msgid; int new_operation = 1; /* Are we doing the first I/O read for a new operation ? */ @@ -1156,6 +1180,17 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int * PRInt32 syserr = 0; size_t buffer_data_avail; int conn_closed = 0; + PRNetAddr pr_netaddr_from = {0}; + PRNetAddr pr_netaddr_dest = {0}; + char buf_ip[INET6_ADDRSTRLEN + 1] = {0}; + char buf_haproxy_destip[INET6_ADDRSTRLEN + 1] = {0}; + char str_ip[INET6_ADDRSTRLEN + 1] = {0}; + char str_haproxy_ip[INET6_ADDRSTRLEN + 1] = {0}; + char str_haproxy_destip[INET6_ADDRSTRLEN + 1] = {0}; + PRStatus status = PR_SUCCESS; + struct berval **bvals = NULL; + int proxy_connection = 0; + int restrict_access = 0; pthread_mutex_lock(&(conn->c_mutex)); /* @@ -1181,6 +1216,7 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int * } new_operation = 0; } + /* If we still haven't seen a complete PDU, read from the network */ while (*tag == LBER_DEFAULT) { int32_t ioblocktimeout_waits = conn->c_ioblocktimeout / CONN_TURBO_TIMEOUT_INTERVAL; @@ -1188,6 +1224,69 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int * PR_ASSERT(!new_operation || !conn_buffered_data_avail_nolock(conn, &conn_closed)); /* We make a non-blocking read call */ if (CONNECTION_BUFFER_OFF != conn->c_private->use_buffer) { + /* Process HAProxy header */ + if (conn->c_haproxyheader_read == 0) { + conn->c_haproxyheader_read = 1; + /* + * We only check for HAProxy header if nsslapd-haproxy-trusted-ip is configured. + * If it is we proceed with the connection only if it's comming from trusted + * proxy server with correct and complete header. + */ + if ((bvals = g_get_haproxy_trusted_ip()) != NULL) { + /* Can we have an unknown address at that point? */ + if ((haproxy_rc = haproxy_receive(conn->c_sd, &proxy_connection, &pr_netaddr_from, &pr_netaddr_dest)) == HAPROXY_ERROR) { + slapi_log_err(SLAPI_LOG_CONNS, "connection_read_operation", "Error reading HAProxy header.\n"); + disconnect_server_nomutex(conn, conn->c_connid, -1, SLAPD_DISCONNECT_PROXY_INVALID_HEADER, EPROTO); + ret = CONN_DONE; + goto done; + } + + /* If bval is NULL and we don't have an error - we still want the proper log and update */ + if ((haproxy_rc == HAPROXY_HEADER_PARSED) && (proxy_connection)) { + /* Normalize IP addresses */ + normalize_IPv4(conn->cin_addr, buf_ip, sizeof(buf_ip), str_ip, sizeof(str_ip)); + normalize_IPv4(&pr_netaddr_dest, buf_haproxy_destip, sizeof(buf_haproxy_destip), + str_haproxy_destip, sizeof(str_haproxy_destip)); + + /* Now, reset RC and set it to 0 only if a match is found */ + haproxy_rc = -1; + + /* Allow only: + * Trusted IP == Original Client IP == HAProxy Header Destination IP */ + for (size_t i = 0; bvals[i] != NULL; ++i) { + if ((strlen(bvals[i]->bv_val) == strlen(buf_ip)) && + (strlen(bvals[i]->bv_val) == strlen(buf_haproxy_destip)) && + (strncasecmp(bvals[i]->bv_val, buf_ip, strlen(buf_ip)) == 0) && + (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, strlen(buf_haproxy_destip)) == 0)) { + haproxy_rc = 0; + break; + } + } + if (haproxy_rc == -1) { + slapi_log_err(SLAPI_LOG_CONNS, "connection_read_operation", "HAProxy header received from unknown source.\n"); + disconnect_server_nomutex(conn, conn->c_connid, -1, SLAPD_DISCONNECT_PROXY_UNKNOWN, EPROTO); + ret = CONN_DONE; + goto done; + } + /* Get the HAProxy header client IP address */ + PR_NetAddrToString(&pr_netaddr_from, str_haproxy_ip, sizeof(str_haproxy_ip)); + + /* Replace cin_addr and cin_destaddr in the Connection struct with received addresses */ + slapi_ch_free((void**)&conn->cin_addr); + slapi_ch_free((void**)&conn->cin_destaddr); + conn->cin_addr = (PRNetAddr*)malloc(sizeof(PRNetAddr)); + conn->cin_destaddr = (PRNetAddr*)malloc(sizeof(PRNetAddr)); + memcpy(conn->cin_addr, &pr_netaddr_from, sizeof(PRNetAddr)); + memcpy(conn->cin_destaddr, &pr_netaddr_dest, sizeof(PRNetAddr)); + conn->c_ipaddr = slapi_ch_strdup(str_haproxy_ip); + conn->c_serveripaddr = slapi_ch_strdup(str_haproxy_destip); + slapi_log_access(LDAP_DEBUG_STATS, + "conn=%" PRIu64 " fd=%d HAProxy new_address_from=%s to new_address_dest=%s\n", + conn->c_connid, conn->c_sd, str_haproxy_ip, str_haproxy_destip); + slapi_log_security_tcp(conn, SECURITY_HAPROXY_SUCCESS, 0, ""); + } + } + } ret = connection_read_ldap_data(conn, &err); } else { ret = get_next_from_buffer(NULL, 0, &len, tag, op->o_ber, conn); @@ -2313,7 +2412,7 @@ disconnect_server_nomutex_ext(Connection *conn, PRUint64 opconnid, int opid, PRE conn->c_connid, opid, conn->c_sd, slapd_pr_strerror(reason)); } - slapi_log_security_tcp(conn, reason, slapd_pr_strerror(reason)); + slapi_log_security_tcp(conn, SECURITY_TCP_ERROR, reason, slapd_pr_strerror(reason)); if (!config_check_referral_mode()) { slapi_counter_decrement(g_get_per_thread_snmp_vars()->ops_tbl.dsConnections); diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h index 40112ff1a..b824c8f60 100644 --- a/ldap/servers/slapd/disconnect_error_strings.h +++ b/ldap/servers/slapd/disconnect_error_strings.h @@ -32,6 +32,8 @@ ER2(SLAPD_DISCONNECT_UNBIND, "Cleanly Closed Connection - U1") ER2(SLAPD_DISCONNECT_POLL, "Poll - P2") ER2(SLAPD_DISCONNECT_NTSSL_TIMEOUT, "NTSSL Timeout - T2") ER2(SLAPD_DISCONNECT_SASL_FAIL, "SASL Failure - S1") +ER2(SLAPD_DISCONNECT_PROXY_INVALID_HEADER, "Invalid Proxy Header - P3") +ER2(SLAPD_DISCONNECT_PROXY_UNKNOWN, "Unknown Proxy - P4") #endif /* __DISCONNECT_ERROR_STRINGS_H_ */ diff --git a/ldap/servers/slapd/disconnect_errors.h b/ldap/servers/slapd/disconnect_errors.h index 117952729..a0484f1c2 100644 --- a/ldap/servers/slapd/disconnect_errors.h +++ b/ldap/servers/slapd/disconnect_errors.h @@ -33,6 +33,8 @@ #define SLAPD_DISCONNECT_POLL SLAPD_DISCONNECT_ERROR_BASE + 10 #define SLAPD_DISCONNECT_NTSSL_TIMEOUT SLAPD_DISCONNECT_ERROR_BASE + 11 #define SLAPD_DISCONNECT_SASL_FAIL SLAPD_DISCONNECT_ERROR_BASE + 12 +#define SLAPD_DISCONNECT_PROXY_INVALID_HEADER SLAPD_DISCONNECT_ERROR_BASE + 13 +#define SLAPD_DISCONNECT_PROXY_UNKNOWN SLAPD_DISCONNECT_ERROR_BASE + 14 #endif /* __DISCONNECT_ERRORS_H_ */ diff --git a/ldap/servers/slapd/haproxy.c b/ldap/servers/slapd/haproxy.c new file mode 100644 index 000000000..329c9659f --- /dev/null +++ b/ldap/servers/slapd/haproxy.c @@ -0,0 +1,392 @@ +/** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2023 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). + * See LICENSE for details. + * END COPYRIGHT BLOCK **/ + +/* haproxy.c - process connection PROXY header if present */ + +#include <sys/types.h> +#include <sys/socket.h> +#include <stdarg.h> +#include <stdlib.h> +#include <stdint.h> +#include <string.h> +#include <fcntl.h> +#include "slap.h" + +/* Function to parse IPv4 addresses in version 2 */ +static int haproxy_parse_v2_addr_v4(uint32_t in_addr, unsigned in_port, PRNetAddr *pr_netaddr) +{ + char addr[INET_ADDRSTRLEN]; + + /* Check if the port is valid */ + if (in_port > 65535) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v2_addr_v4", "Port number exceeds maximum value.\n"); + return -1; + } + + /* Assign the input address and port to the PRNetAddr structure */ + pr_netaddr->inet.family = PR_AF_INET; + pr_netaddr->inet.port = in_port; + pr_netaddr->inet.ip = in_addr; + + /* Print the address in a human-readable format */ + if (inet_ntop(AF_INET, &(pr_netaddr->inet.ip), addr, INET_ADDRSTRLEN) == NULL) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v2_addr_v4", "Failed to print address.\n"); + } else { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v2_addr_v4", "Address: %s\n", addr); + } + return 0; +} + + +/* Function to parse IPv6 addresses in version 2 */ +static int haproxy_parse_v2_addr_v6(uint8_t *in6_addr, unsigned in6_port, PRNetAddr *pr_netaddr) +{ + struct sockaddr_in6 sin6; + char addr[INET6_ADDRSTRLEN]; + + /* Check if the port is valid */ + if (in6_port > 65535) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v2_addr_v6", "Port number exceeds maximum value.\n"); + return -1; + } + + /* Assign the input address and port to the PRNetAddr structure */ + memset((void *) &sin6, 0, sizeof(sin6)); + sin6.sin6_family = AF_INET6; + memcpy(&sin6.sin6_addr, in6_addr, 16); + memcpy(&pr_netaddr->ipv6.ip, &sin6.sin6_addr, sizeof(pr_netaddr->ipv6.ip)); + pr_netaddr->ipv6.port = in6_port; + pr_netaddr->ipv6.family = PR_AF_INET6; + + /* Print the address in a human-readable format */ + if (inet_ntop(AF_INET6, &(pr_netaddr->ipv6.ip), addr, INET6_ADDRSTRLEN) == NULL) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v2_addr_v6", "Failed to print address.\n"); + } else { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v2_addr_v6", "Address: %s\n", addr); + } + return 0; +} + + +/* Function to parse the header in version 2 */ +int haproxy_parse_v2_hdr(const char *str, size_t *str_len, int *proxy_connection, PRNetAddr *pr_netaddr_from, PRNetAddr *pr_netaddr_dest) +{ + struct proxy_hdr_v2 *hdr_v2 = (struct proxy_hdr_v2 *) str; + uint16_t hdr_v2_len = 0; + PRNetAddr parsed_addr_from = {{0}}; + PRNetAddr parsed_addr_dest = {{0}}; + int rc = HAPROXY_ERROR; + + *proxy_connection = 0; + + /* Check if we received enough bytes to contain the HAProxy v2 header */ + if (*str_len < PP2_HEADER_LEN) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v2_hdr", "Protocol header is short\n"); + rc = HAPROXY_NOT_A_HEADER; + goto done; + } + hdr_v2_len = ntohs(hdr_v2->len); + + if (memcmp(hdr_v2->sig, PP2_SIGNATURE, PP2_SIGNATURE_LEN) != 0) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v2_hdr", "Protocol header is invalid\n"); + rc = HAPROXY_NOT_A_HEADER; + goto done; + } + + /* Check if the header has the correct signature */ + if ((hdr_v2->ver_cmd & 0xF0) != PP2_VERSION) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v2_hdr", "Protocol version is invalid\n"); + goto done; + } + /* Check if we received enough bytes to contain the entire HAProxy v2 header, including the address information */ + if (*str_len < PP2_HEADER_LEN + hdr_v2_len) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v2_hdr", "Protocol header v2 is short\n"); + goto done; + } + + switch (hdr_v2->ver_cmd & 0x0F) { + case PP2_VER_CMD_PROXY: + /* Process the header based on the address family */ + switch (hdr_v2->fam) { + case PP2_FAM_INET | PP2_TRANS_STREAM:{ /* TCP over IPv4 */ + if (hdr_v2_len < PP2_ADDR_LEN_INET) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v2_hdr", "Address field is short\n"); + goto done; + } + if (haproxy_parse_v2_addr_v4(hdr_v2->addr.ip4.src_addr, hdr_v2->addr.ip4.src_port, &parsed_addr_from) < 0) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v2_hdr", "Client address is invalid\n"); + goto done; + } + if (haproxy_parse_v2_addr_v4(hdr_v2->addr.ip4.dst_addr, hdr_v2->addr.ip4.dst_port, &parsed_addr_dest) < 0) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v2_hdr", "Server address is invalid\n"); + goto done; + } + break; + } + case PP2_FAM_INET6 | PP2_TRANS_STREAM:{/* TCP over IPv6 */ + if (hdr_v2_len < PP2_ADDR_LEN_INET6) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v2_hdr", "Address field is short\n"); + goto done; + } + if (haproxy_parse_v2_addr_v6(hdr_v2->addr.ip6.src_addr, hdr_v2->addr.ip6.src_port, &parsed_addr_from) < 0) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v2_hdr", "Client address is invalid\n"); + goto done; + } + if (haproxy_parse_v2_addr_v6(hdr_v2->addr.ip6.dst_addr, hdr_v2->addr.ip6.dst_port, &parsed_addr_dest) < 0) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v2_hdr", "Server address is invalid\n"); + goto done; + } + break; + } + default: + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v2_hdr", "Unsupported address family\n"); + goto done; + } + /* Update the received string length to include the address information */ + *str_len = PP2_HEADER_LEN + hdr_v2_len; + rc = HAPROXY_HEADER_PARSED; + *proxy_connection = 1; + /* Copy the parsed addresses to the output parameters */ + memcpy(pr_netaddr_from, &parsed_addr_from, sizeof(PRNetAddr)); + memcpy(pr_netaddr_dest, &parsed_addr_dest, sizeof(PRNetAddr)); + goto done; + /* If it's a LOCAL command, there's no address information to parse, so just update the received string length */ + case PP2_VER_CMD_LOCAL: + *str_len = PP2_HEADER_LEN + hdr_v2_len; + rc = HAPROXY_HEADER_PARSED; + goto done; + default: + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v2_hdr", "Invalid header command\n"); + goto done; + } +done: + return rc; +} + + +/* Function to parse the protocol in version 1 */ +static int haproxy_parse_v1_protocol(const char *str, const char *protocol) +{ + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_protocol", "HAProxy protocol - %s\n", str ? str : "(null)"); + if ((str != 0) && (strcasecmp(str, protocol) == 0)) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_protocol", "HAProxy protocol is valid\n"); + return 0; + } + return -1; +} + + +/* Function to parse the family (i.e., IPv4 or IPv6) in version 1 */ +static int haproxy_parse_v1_fam(const char *str, int *addr_family) +{ + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_fam", "Address family - %s\n", str ? str : "(null)"); + if (str == 0) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_fam", "Address family is missing\n"); + return -1; + } + + if (strcasecmp(str, "TCP4") == 0) { + *addr_family = AF_INET; + return 0; + } else if (strcasecmp(str, "TCP6") == 0) { + *addr_family = AF_INET6; + return 0; + } else { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_fam", "Address family %s is unsupported\n", str); + return -1; + } +} + + +/* Function to parse addresses in version 1 */ +static int haproxy_parse_v1_addr(const char *str, PRNetAddr *pr_netaddr, int addr_family) +{ + char addrbuf[256]; + + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_addr", "addr=%s proto=%d\n", str ? str : "(null)", addr_family); + if (str == 0 || strlen(str) >= sizeof(addrbuf)) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_addr", "incorrect IP address: %s\n", str); + return -1; + } + + switch (addr_family) { + case AF_INET6: + if (slapi_is_ipv6_addr(str)) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_addr", "ipv6 address: %s\n", str); + pr_netaddr->ipv6.family = PR_AF_INET6; + } + break; + case AF_INET: + if (slapi_is_ipv4_addr(str)) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_addr", "ipv4 address: %s\n", str); + pr_netaddr->inet.family = PR_AF_INET; + } + break; + default: + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_addr", "incorrect address family: %d\n", addr_family); + return -1; + } + + if (PR_StringToNetAddr(str, pr_netaddr) != PR_SUCCESS) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_addr", "Failed to set IP address: %s\n", str); + return -1; + } + + return 0; +} + + +/* Function to parse port numbers in version 1 */ +static int haproxy_parse_v1_port(const char *str, PRNetAddr *pr_netaddr) +{ + char *endptr; + long port; + + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_port", "port=%s\n", str ? str : "(null)"); + errno = 0; /* Reset errno to 0 before calling strtol */ + port = strtol(str, &endptr, 10); + + /* Check for conversion errors */ + if (errno == ERANGE || port < 0 || port > 65535) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_port", "Port is out of range: %s\n", str); + return -1; + } + if (endptr == str || *endptr != '\0') { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_port", "No digits were found: %s\n", str); + return -1; + } + + /* Successfully parsed the port number. Set it */ + PRLDAP_SET_PORT(pr_netaddr, port); + return 0; +} + + +static inline char *get_next_token(char **copied) { + return tokenize_string(copied, " \r"); +} + + +/* Function to parse the header in version 1 */ +int haproxy_parse_v1_hdr(const char *str, size_t *str_len, int *proxy_connection, PRNetAddr *pr_netaddr_from, PRNetAddr *pr_netaddr_dest) +{ + PRNetAddr parsed_addr_from = {{0}}; + PRNetAddr parsed_addr_dest = {{0}}; + char *str_saved = NULL; + char *copied = NULL; + char *after_header = NULL; + int addr_family; + int rc = HAPROXY_ERROR; + + *proxy_connection = 0; + if (strncmp(str, "PROXY ", 6) == 0) { + str_saved = slapi_ch_strdup(str); + copied = str_saved; + after_header = split_string_at_delim(str_saved, '\n'); + + /* Check if the header is valid */ + if (after_header == 0) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_hdr", "Missing protocol header terminator\n"); + goto done; + } + /* Parse the protocol, family, addresses, and ports */ + if (haproxy_parse_v1_protocol(get_next_token(&copied), "PROXY") < 0) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_hdr", "Missing or bad protocol header\n"); + goto done; + } + /* Parse the family */ + if (haproxy_parse_v1_fam(get_next_token(&copied), &addr_family) < 0) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_hdr", "Missing or bad protocol type\n"); + goto done; + } + /* Parse the addresses */ + if (haproxy_parse_v1_addr(get_next_token(&copied), &parsed_addr_from, addr_family) < 0) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_hdr", "Missing or bad client address\n"); + goto done; + } + if (haproxy_parse_v1_addr(get_next_token(&copied), &parsed_addr_dest, addr_family) < 0) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_hdr", "Missing or bad server address\n"); + goto done; + } + /* Parse the ports */ + if (haproxy_parse_v1_port(get_next_token(&copied), &parsed_addr_from) < 0) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_hdr", "Missing or bad client port\n"); + goto done; + } + if (haproxy_parse_v1_port(get_next_token(&copied), &parsed_addr_dest) < 0) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_parse_v1_hdr", "Missing or bad server port\n"); + goto done; + } + rc = HAPROXY_HEADER_PARSED; + *proxy_connection = 1; + *str_len = after_header - str_saved; + /* Copy the parsed addresses to the output parameters */ + memcpy(pr_netaddr_from, &parsed_addr_from, sizeof(PRNetAddr)); + memcpy(pr_netaddr_dest, &parsed_addr_dest, sizeof(PRNetAddr)); + +done: + slapi_ch_free_string(&str_saved); + } else { + rc = HAPROXY_NOT_A_HEADER; + } + return rc; +} + +/** + * Function to receive and parse HAProxy headers, supporting both v1 and v2 of the protocol. + * + * @param fd: The file descriptor of the socket from which to read. + * @param proxy_connection: A pointer to an integer to store the proxy connection status (0 or 1). + * @param pr_netaddr_from: A pointer to a PRNetAddr structure to store the source address info. + * @param pr_netaddr_dest: A pointer to a PRNetAddr structure to store the destination address info. + * + * @return: Returns 0 on successful operation, -1 on error. + */ +int haproxy_receive(int fd, int *proxy_connection, PRNetAddr *pr_netaddr_from, PRNetAddr *pr_netaddr_dest) +{ + /* Buffer to store the header received from the HAProxy server */ + char hdr[HAPROXY_HEADER_MAX_LEN + 1] = {0}; + ssize_t recv_result = 0; + size_t hdr_len; + int rc = HAPROXY_ERROR; + + /* Attempt to receive the header from the HAProxy server */ + recv_result = recv(fd, hdr, sizeof(hdr) - 1, MSG_PEEK | MSG_DONTWAIT); + if (recv_result <= 0) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_receive", "EOF or error on haproxy socket: %s\n", strerror(errno)); + return rc; + } else { + hdr_len = recv_result; + } + + /* Null-terminate the header string */ + if (hdr_len < sizeof(hdr)) { + hdr[hdr_len] = 0; + } else { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_receive", "Recieved header is too long: %d\n", hdr_len); + rc = HAPROXY_NOT_A_HEADER; + return rc; + } + + rc = haproxy_parse_v1_hdr(hdr, &hdr_len, proxy_connection, pr_netaddr_from, pr_netaddr_dest); + if (rc == HAPROXY_NOT_A_HEADER) { + rc = haproxy_parse_v2_hdr(hdr, &hdr_len, proxy_connection, pr_netaddr_from, pr_netaddr_dest); + } + + if (rc == HAPROXY_HEADER_PARSED) { + slapi_log_err(SLAPI_LOG_CONNS, "haproxy_receive", "HAProxy header parsed successfully\n"); + /* Consume the data from the socket */ + recv_result = recv(fd, hdr, hdr_len, MSG_DONTWAIT); + + if (recv_result != hdr_len) { + slapi_log_err(SLAPI_LOG_ERR, "haproxy_receive", "Read error: %s: %s\n", hdr, strerror(errno)); + return HAPROXY_ERROR; + } + } + return rc; +} diff --git a/ldap/servers/slapd/haproxy.h b/ldap/servers/slapd/haproxy.h new file mode 100644 index 000000000..0040d15b7 --- /dev/null +++ b/ldap/servers/slapd/haproxy.h @@ -0,0 +1,76 @@ +/** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2023 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). + * See LICENSE for details. + * END COPYRIGHT BLOCK **/ + +#include <prnetdb.h> + +#define HAPROXY_HEADER_PARSED 0 +#define HAPROXY_NOT_A_HEADER 1 +#define HAPROXY_ERROR -1 + + /* + * Begin protocol v2 definitions from haproxy/include/types/connection.h. + */ +#define PP2_SIGNATURE "\x0D\x0A\x0D\x0A\x00\x0D\x0A\x51\x55\x49\x54\x0A" +#define PP2_SIGNATURE_LEN 12 +#define PP2_HEADER_LEN 16 + +/* ver_cmd byte */ +#define PP2_VER_CMD_LOCAL 0x00 +#define PP2_VER_CMD_PROXY 0x01 + +#define PP2_VERSION 0x20 + +/* Family byte */ +#define PP2_TRANS_UNSPEC 0x00 +#define PP2_TRANS_STREAM 0x01 +#define PP2_FAM_UNSPEC 0x00 +#define PP2_FAM_INET 0x10 +#define PP2_FAM_INET6 0x20 + +/* len field (2 bytes) */ +#define PP2_ADDR_LEN_UNSPEC (0) +#define PP2_ADDR_LEN_INET (4 + 4 + 2 + 2) +#define PP2_ADDR_LEN_INET6 (16 + 16 + 2 + 2) +#define PP2_HDR_LEN_UNSPEC (PP2_HEADER_LEN + PP2_ADDR_LEN_UNSPEC) +#define PP2_HDR_LEN_INET (PP2_HEADER_LEN + PP2_ADDR_LEN_INET) +#define PP2_HDR_LEN_INET6 (PP2_HEADER_LEN + PP2_ADDR_LEN_INET6) + +/* Both formats (v1 and v2) are designed to fit in the smallest TCP segment + * that any TCP/IP host is required to support (576 - 40 = 536 bytes). + */ +#define HAPROXY_HEADER_MAX_LEN 536 + +/* Define struct for the proxy header */ +struct proxy_hdr_v2 { + uint8_t sig[PP2_SIGNATURE_LEN]; /* PP2_SIGNATURE */ + uint8_t ver_cmd; /* protocol version | command */ + uint8_t fam; /* protocol family and transport */ + uint16_t len; /* length of remainder */ + union { + struct { /* for TCP/UDP over IPv4, len = 12 */ + uint32_t src_addr; + uint32_t dst_addr; + uint16_t src_port; + uint16_t dst_port; + } ip4; + struct { /* for TCP/UDP over IPv6, len = 36 */ + uint8_t src_addr[16]; + uint8_t dst_addr[16]; + uint16_t src_port; + uint16_t dst_port; + } ip6; + struct { /* for AF_UNIX sockets, len = 216 */ + uint8_t src_addr[108]; + uint8_t dst_addr[108]; + } unx; + } addr; +}; + +int haproxy_parse_v1_hdr(const char *str, size_t *str_len, int *proxy_connection, PRNetAddr *pr_netaddr_from, PRNetAddr *pr_netaddr_dest); +int haproxy_parse_v2_hdr(const char *str, size_t *str_len, int *proxy_connection, PRNetAddr *pr_netaddr_from, PRNetAddr *pr_netaddr_dest); +int haproxy_receive(int fd, int *proxy_connection, PRNetAddr *pr_netaddr_from, PRNetAddr *pr_netaddr_dest); diff --git a/ldap/servers/slapd/ldaputil.c b/ldap/servers/slapd/ldaputil.c index b5a340e09..5c161cf6d 100644 --- a/ldap/servers/slapd/ldaputil.c +++ b/ldap/servers/slapd/ldaputil.c @@ -1712,11 +1712,24 @@ mozldap_ldap_explode_rdn(const char *rdn, const int notypes) } int -slapi_is_ipv6_addr(const char *hostname) +slapi_is_ipv4_addr(const char *ipAddress) { PRNetAddr addr; - if (PR_StringToNetAddr(hostname, &addr) == PR_SUCCESS && + if (PR_StringToNetAddr(ipAddress, &addr) == PR_SUCCESS && + PR_IsNetAddrType(&addr, PR_IpAddrV4Mapped) && + addr.raw.family == PR_AF_INET) { + return 1; + } + return 0; +} + +int +slapi_is_ipv6_addr(const char *ipAddress) +{ + PRNetAddr addr; + + if (PR_StringToNetAddr(ipAddress, &addr) == PR_SUCCESS && !PR_IsNetAddrType(&addr, PR_IpAddrV4Mapped) && addr.raw.family == PR_AF_INET6) { return 1; @@ -1724,6 +1737,25 @@ slapi_is_ipv6_addr(const char *hostname) return 0; } +/* For debug purpose */ +void +slapi_log_prnetaddr(const PRNetAddr *addr) { + char ip_str[INET6_ADDRSTRLEN] = {0}; + uint16_t port = 0; + + if (addr->inet.family == PR_AF_INET) { + PR_NetAddrToString(addr, ip_str, sizeof(ip_str)); + port = PR_ntohs(addr->inet.port); + slapi_log_error(SLAPI_LOG_ERR, "slapi_log_prnetaddr", "IPv4: %s:%u\n", ip_str, port); + } else if (addr->raw.family == PR_AF_INET6) { + PR_NetAddrToString(addr, ip_str, sizeof(ip_str)); + port = PR_ntohs(addr->ipv6.port); + slapi_log_error(SLAPI_LOG_ERR, "slapi_log_prnetaddr", "IPv6: %s:%u\n", ip_str, port); + } else { + slapi_log_error(SLAPI_LOG_ERR, "slapi_log_prnetaddr", "Unknown address family\n"); + } +} + /* * Get the length of the ber-encoded ldap message. Note, only the length of * the LDAP operation is returned, not the length of the entire berval. diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c index 7fe7a5939..a06cad4ba 100644 --- a/ldap/servers/slapd/libglobs.c +++ b/ldap/servers/slapd/libglobs.c @@ -157,6 +157,7 @@ typedef enum { CONFIG_STRING_OR_UNKNOWN, /* use "unknown" instead of an empty string */ CONFIG_CONSTANT_INT, /* for #define values, e.g. */ CONFIG_CONSTANT_STRING, /* for #define values, e.g. */ + CONFIG_SPECIAL_TRUSTED_IP_LIST, /* this is a berval list */ CONFIG_SPECIAL_REFERRALLIST, /* this is a berval list */ CONFIG_SPECIAL_SSLCLIENTAUTH, /* maps strings to an enumeration */ CONFIG_SPECIAL_ERRORLOGLEVEL, /* requires & with LDAP_DEBUG_ANY */ @@ -855,6 +856,10 @@ static struct config_get_and_set NULL, 0, (void **)&global_slapdFrontendConfig.listenhost, CONFIG_STRING, NULL, "", NULL /* Empty value is allowed */}, + {CONFIG_HAPROXY_TRUSTED_IP, (ConfigSetFunc)config_set_haproxy_trusted_ip, + NULL, 0, + (void **)&global_slapdFrontendConfig.haproxy_trusted_ip, + CONFIG_SPECIAL_TRUSTED_IP_LIST, NULL, NULL, NULL}, {CONFIG_SNMP_INDEX_ATTRIBUTE, config_set_snmp_index, NULL, 0, (void **)&global_slapdFrontendConfig.snmp_index, @@ -2686,6 +2691,59 @@ config_set_listenhost(const char *attrname __attribute__((unused)), char *value, return retVal; } +int +config_set_haproxy_trusted_ip(const char *attrname, struct berval **value, char *errorbuf, int apply) +{ + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + int retVal = LDAP_SUCCESS; + int conn_buffer = 0; + size_t end = 0; + + if (config_value_is_null(attrname, (char *)value, errorbuf, 0)) { + return LDAP_OPERATIONS_ERROR; + } + + CFG_LOCK_READ(slapdFrontendConfig); + conn_buffer = slapdFrontendConfig->connection_buffer; + CFG_UNLOCK_READ(slapdFrontendConfig); + if (CONNECTION_BUFFER_OFF == conn_buffer) { + slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "HAProxy is not supported when nsslapd-connection-buffer is disabled (set to '0')\n"); + return LDAP_OPERATIONS_ERROR; + } + + if (value && value[0] && + PL_strncasecmp((char *)value[0]->bv_val, HAPROXY_TRUSTED_IP_REMOVE_CMD, value[0]->bv_len) != 0) { + for (size_t i = 0; value[i] != NULL; i++) { + end = strspn(value[i]->bv_val, "0123456789:ABCDEFabcdef.*"); + /* + * If no valid characters are found, or if there are characters after the valid ones, + * then print an error message and exit with LDAP_OPERATIONS_ERROR. + */ + if (!end || value[i]->bv_val[end] != '\0') { + slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "IP address contains invalid characters (%s), skipping\n", + value[i]->bv_val); + return LDAP_OPERATIONS_ERROR; + } + if (strstr(value[i]->bv_val, ":") == 0) { + /* IPv4 - make sure it's just numbers, dots, and wildcard */ + end = strspn(value[i]->bv_val, "0123456789.*"); + if (!end || value[i]->bv_val[end] != '\0') { + slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "IPv4 address contains invalid characters (%s), skipping\n", + value[i]->bv_val); + return LDAP_OPERATIONS_ERROR; + } + } + } + } + + if (apply) { + CFG_LOCK_WRITE(slapdFrontendConfig); + g_set_haproxy_trusted_ip(value); + CFG_UNLOCK_WRITE(slapdFrontendConfig); + } + return retVal; +} + int config_set_snmp_index(const char *attrname, char *value, char *errorbuf, int apply) { @@ -5980,6 +6038,38 @@ config_get_securelistenhost(void) return retVal; } +struct berval ** +config_get_haproxy_trusted_ip(void) +{ + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + struct berval **retVal = NULL; + int nTrustedIPs = 0; + + CFG_LOCK_READ(slapdFrontendConfig); + /* count the number of trusted IPs */ + for (nTrustedIPs = 0; + slapdFrontendConfig->haproxy_trusted_ip && + slapdFrontendConfig->haproxy_trusted_ip[nTrustedIPs]; + nTrustedIPs++) + ; + + retVal = (struct berval **) slapi_ch_malloc((nTrustedIPs + 1) * sizeof(struct berval *)); + + /*terminate the end, and add the trusted IPs backwards */ + retVal[nTrustedIPs--] = NULL; + + while (nTrustedIPs >= 0) { + retVal[nTrustedIPs] = (struct berval *)slapi_ch_malloc(sizeof(struct berval)); + retVal[nTrustedIPs]->bv_val = + config_copy_strval(slapdFrontendConfig->haproxy_trusted_ip[nTrustedIPs]->bv_val); + retVal[nTrustedIPs]->bv_len = slapdFrontendConfig->haproxy_trusted_ip[nTrustedIPs]->bv_len; + nTrustedIPs--; + } + CFG_UNLOCK_READ(slapdFrontendConfig); + + return retVal; +} + char * config_get_srvtab(void) { @@ -8867,6 +8957,19 @@ config_set(const char *attr, struct berval **values, char *errorbuf, int apply) retval = config_set_defaultreferral(attr, values, errorbuf, apply); } break; + case CONFIG_SPECIAL_TRUSTED_IP_LIST: + if (NULL == values) /* special token which means to remove trusted IPs */ + { + struct berval val; + struct berval *vals[2] = {0, 0}; + vals[0] = &val; + val.bv_val = HAPROXY_TRUSTED_IP_REMOVE_CMD; + val.bv_len = strlen(HAPROXY_TRUSTED_IP_REMOVE_CMD); + retval = config_set_haproxy_trusted_ip(attr, vals, errorbuf, apply); + } else { + retval = config_set_haproxy_trusted_ip(attr, values, errorbuf, apply); + } + break; default: if (values == NULL && (cgas->initvalue != NULL || cgas->geninitfunc != NULL)) { @@ -8983,6 +9086,14 @@ config_set_value( slapi_entry_attr_set_charptr(e, cgas->attr_name, ""); break; + case CONFIG_SPECIAL_TRUSTED_IP_LIST: + /* trusted IP list is already an array of berval* */ + if (value) + slapi_entry_attr_replace(e, cgas->attr_name, (struct berval **)*value); + else + slapi_entry_attr_set_charptr(e, cgas->attr_name, ""); + break; + case CONFIG_CONSTANT_STRING: assert(value); /* should be a constant value */ slapi_entry_attr_set_charptr(e, cgas->attr_name, (char *)value); diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c index cf7babfb0..4aa905576 100644 --- a/ldap/servers/slapd/log.c +++ b/ldap/servers/slapd/log.c @@ -4034,7 +4034,7 @@ slapi_log_security(Slapi_PBlock *pb, const char *event_type, const char *msg) * to rely on just the Connection struct. */ int -slapi_log_security_tcp(Connection *pb_conn, PRErrorCode error, const char *msg) +slapi_log_security_tcp(Connection *pb_conn, const char *event_type, PRErrorCode error, const char *msg) { char *server_ip = NULL; char *client_ip = NULL; @@ -4046,12 +4046,25 @@ slapi_log_security_tcp(Connection *pb_conn, PRErrorCode error, const char *msg) uint64_t conn_id = 0; json_object *log_json = NULL; + /* Check if security log is valid and functioning */ if (!(loginfo.log_security_state & LOGGING_ENABLED) || !loginfo.log_security_fdes || - !loginfo.log_security_file || - (error != SLAPD_DISCONNECT_BAD_BER_TAG && /* We only care about B1, B2, B3 */ - error != SLAPD_DISCONNECT_BER_TOO_BIG && - error != SLAPD_DISCONNECT_BER_PEEK)) + !loginfo.log_security_file) + { + return 0; + } + + /* + * Continue (not return 0) if the event is either SECURITY_TCP_ERROR + * with one of the specified error codes, or SECURITY_HAPROXY_SUCCESS. + */ + if (!((strcmp(event_type, SECURITY_TCP_ERROR) == 0) && + (error == SLAPD_DISCONNECT_BAD_BER_TAG || + error == SLAPD_DISCONNECT_BER_TOO_BIG || + error == SLAPD_DISCONNECT_BER_PEEK || + error == SLAPD_DISCONNECT_PROXY_UNKNOWN || + error == SLAPD_DISCONNECT_PROXY_INVALID_HEADER)) && + (strcmp(event_type, SECURITY_HAPROXY_SUCCESS) != 0)) { return 0; } @@ -4081,7 +4094,7 @@ slapi_log_security_tcp(Connection *pb_conn, PRErrorCode error, const char *msg) log_json = json_object_new_object(); json_object_object_add(log_json, "date", json_object_new_string(local_time)); json_object_object_add(log_json, "utc_time", json_object_new_string(utc_time)); - json_object_object_add(log_json, "event", json_object_new_string(SECURITY_TCP_ERROR)); + json_object_object_add(log_json, "event", json_object_new_string(event_type)); json_object_object_add(log_json, "client_ip", json_object_new_string(client_ip)); json_object_object_add(log_json, "server_ip", json_object_new_string(server_ip)); json_object_object_add(log_json, "ldap_version", json_object_new_int(ldap_version)); diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h index e8d3aaccd..b3c1afdf4 100644 --- a/ldap/servers/slapd/proto-slap.h +++ b/ldap/servers/slapd/proto-slap.h @@ -243,6 +243,7 @@ int config_set_SSL3ciphers(const char *attrname, char *value, char *errorbuf, in int config_set_localhost(const char *attrname, char *value, char *errorbuf, int apply); int config_set_listenhost(const char *attrname, char *value, char *errorbuf, int apply); int config_set_securelistenhost(const char *attrname, char *value, char *errorbuf, int apply); +int config_set_haproxy_trusted_ip(const char *attrname, struct berval **value, char *errorbuf, int apply); int config_set_ldapi_filename(const char *attrname, char *value, char *errorbuf, int apply); int config_set_snmp_index(const char *attrname, char *value, char *errorbuf, int apply); int config_set_ldapi_switch(const char *attrname, char *value, char *errorbuf, int apply); @@ -432,6 +433,7 @@ char *config_get_SSL3ciphers(void); char *config_get_localhost(void); char *config_get_listenhost(void); char *config_get_securelistenhost(void); +struct berval **config_get_haproxy_trusted_ip(void); char *config_get_ldapi_filename(void); int config_get_ldapi_switch(void); int config_get_ldapi_bind_switch(void); @@ -851,7 +853,7 @@ int slapi_log_access(int level, const char *fmt, ...) ; #endif int slapi_log_security(Slapi_PBlock *pb, const char *event_type, const char *msg); -int slapi_log_security_tcp(Connection *pb_conn, PRErrorCode error, const char *msg); +int slapi_log_security_tcp(Connection *pb_conn, const char *event_type, PRErrorCode error, const char *msg); int slapd_log_audit(char *buffer, int buf_len, int sourcelog); int slapd_log_audit_internal(char *buffer, int buf_len, int *state); int slapd_log_auditfail(char *buffer, int buf_len); @@ -913,6 +915,8 @@ int strarray2str(char **a, char *buf, size_t buflen, int include_quotes); int slapd_chown_if_not_owner(const char *filename, uid_t uid, gid_t gid); int slapd_comp_path(char *p0, char *p1); void replace_char(char *name, char c, char c2); +char *split_string_at_delim(char *str, char delim); +char *tokenize_string(char **str, const char *delim); void slapd_cert_not_found_error_help(char *cert_name); @@ -1084,6 +1088,8 @@ PRUint64 g_get_num_entries_sent(void); PRUint64 g_get_num_bytes_sent(void); void g_set_default_referral(struct berval **ldap_url); struct berval **g_get_default_referral(void); +void g_set_haproxy_trusted_ip(struct berval **ipaddress); +struct berval **g_get_haproxy_trusted_ip(void); void disconnect_server(Connection *conn, PRUint64 opconnid, int opid, PRErrorCode reason, PRInt32 error); int send_ldap_search_entry(Slapi_PBlock *pb, Slapi_Entry *e, LDAPControl **ectrls, char **attrs, int attrsonly); void send_ldap_result(Slapi_PBlock *pb, int err, char *matched, char *text, int nentries, struct berval **urls); diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c index d87a75ad9..56ba6db8b 100644 --- a/ldap/servers/slapd/result.c +++ b/ldap/servers/slapd/result.c @@ -158,6 +158,58 @@ g_get_default_referral() return slapdFrontendConfig->defaultreferral; } +static void +delete_haproxy_trusted_ip(struct berval **ipaddress) +{ + if (ipaddress) { + int ii = 0; + for (ii = 0; ipaddress[ii]; ++ii) + ber_bvfree(ipaddress[ii]); + slapi_ch_free((void **)&ipaddress); + } +} + +void +g_set_haproxy_trusted_ip(struct berval **ipaddress) +{ + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + struct berval **haproxy_trusted_ip = NULL; + int nTrustedIPs = 0; + + /* check to see if we want to delete all values */ + if (ipaddress && ipaddress[0] && + PL_strncasecmp((char *)ipaddress[0]->bv_val, HAPROXY_TRUSTED_IP_REMOVE_CMD, ipaddress[0]->bv_len) == 0) { + delete_haproxy_trusted_ip(slapdFrontendConfig->haproxy_trusted_ip); + slapdFrontendConfig->haproxy_trusted_ip = NULL; + return; + } + + /* count the number of ip addresses */ + for (nTrustedIPs = 0; ipaddress && ipaddress[nTrustedIPs]; nTrustedIPs++) + ; + + haproxy_trusted_ip = (struct berval **) + slapi_ch_malloc((nTrustedIPs + 1) * sizeof(struct berval *)); + + /* terminate the end, and add the trusted IPs backwards */ + haproxy_trusted_ip[nTrustedIPs--] = NULL; + + while (nTrustedIPs >= 0) { + haproxy_trusted_ip[nTrustedIPs] = ber_bvdup(ipaddress[nTrustedIPs]); + nTrustedIPs--; + } + + delete_haproxy_trusted_ip(slapdFrontendConfig->haproxy_trusted_ip); + slapdFrontendConfig->haproxy_trusted_ip = haproxy_trusted_ip; +} + +struct berval ** +g_get_haproxy_trusted_ip() +{ + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + return slapdFrontendConfig->haproxy_trusted_ip; +} + /* * routines to manage keeping track of the current number of connections * to the server. this information is used by the listener thread to diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h index 5deec5f2c..40f5cebb0 100644 --- a/ldap/servers/slapd/slap.h +++ b/ldap/servers/slapd/slap.h @@ -455,6 +455,7 @@ typedef void (*VFPV)(); /* takes undefined arguments */ #define PWD_PBE_DELIM '-' #define REFERRAL_REMOVE_CMD "remove" +#define HAPROXY_TRUSTED_IP_REMOVE_CMD "remove" /* Filenames for DSE storage */ #define DSE_FILENAME "dse.ldif" @@ -511,6 +512,8 @@ struct subfilt #include "filter.h" /* mr_filter_t */ +#include "haproxy.h" + /* * represents a search filter */ @@ -1696,6 +1699,7 @@ typedef struct conn Conn_private *c_private; /* data which is not shared outside connection.c */ int c_flags; /* Misc flags used only for SSL status currently */ int c_needpw; /* need new password */ + int c_haproxyheader_read; /* 0 if HAProxy header has not been read, 1 if it has been read */ CERTCertificate *c_client_cert; /* Client's Cert */ PRFileDesc *c_prfd; /* NSPR 2.1 FileDesc */ int c_ci; /* An index into the Connection array. For printing. */ @@ -2078,6 +2082,7 @@ typedef struct _slapdEntryPoints #define SECURITY_BIND_FAILED "BIND_FAILED" #define SECURITY_AUTHZ_ERROR "AUTHZ_ERROR" #define SECURITY_TCP_ERROR "TCP_ERROR" +#define SECURITY_HAPROXY_SUCCESS "HAPROXY_SUCCESS" /* Security log messages */ #define SECURITY_MSG_INVALID_PASSWD "INVALID_PASSWORD" @@ -2213,6 +2218,7 @@ typedef struct _slapdEntryPoints #define CONFIG_PORT_ATTRIBUTE "nsslapd-port" #define CONFIG_WORKINGDIR_ATTRIBUTE "nsslapd-workingdir" #define CONFIG_LISTENHOST_ATTRIBUTE "nsslapd-listenhost" +#define CONFIG_HAPROXY_TRUSTED_IP "nsslapd-haproxy-trusted-ip" #define CONFIG_SNMP_INDEX_ATTRIBUTE "nsslapd-snmp-index" #define CONFIG_LDAPI_FILENAME_ATTRIBUTE "nsslapd-ldapifilepath" #define CONFIG_LDAPI_SWITCH_ATTRIBUTE "nsslapd-ldapilisten" @@ -2475,6 +2481,7 @@ typedef struct _slapdFrontendConfig char *encryptionalias; char *errorlog; char *listenhost; + struct berval **haproxy_trusted_ip; int snmp_index; char *localuser; char *localhost; diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h index d9d697a49..4853e143b 100644 --- a/ldap/servers/slapd/slapi-plugin.h +++ b/ldap/servers/slapd/slapi-plugin.h @@ -3166,15 +3166,32 @@ void slapi_rdn_set_rdn(Slapi_RDN *rdn, const Slapi_RDN *fromrdn); */ void slapi_rdn_free(Slapi_RDN **rdn); +/** + * Checks if the value of ipAddress is a IPv4 address + * + * \param ipAddress is a string + * \return 1 if address is an IPv4 address + * \return 0 if address is not an IPv4 address + */ +int slapi_is_ipv4_addr(const char *ipAddress); + /** * Checks if the value of ipAddress is a IPv6 address * - * \param ipAddress is a string that is either an IPv4 or IPv6 address + * \param ipAddress is a string * \return 1 if address is an IPv6 address - * \return 0 if address is an IPv4 address + * \return 0 if address is not an IPv6 address */ int slapi_is_ipv6_addr(const char *ipAddress); +/** + * Log to Error log the value of a PRNetAddr - IPv4 or IPv6 + * For debugging purposes only + * + * \param addr is a PRNetAddr + */ +void slapi_log_prnetaddr(const PRNetAddr *addr); + /** * Returns the length of a ber-encoded ldap operation * diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c index c70e638e8..12b202818 100644 --- a/ldap/servers/slapd/util.c +++ b/ldap/servers/slapd/util.c @@ -479,6 +479,64 @@ replace_char(char *str, char c, char c2) } } + +/* +** Break a string at the delimiter +** If the delimiter is not found, the string is not modified. +** The position immediately following the delimiter is returned. +*/ +char *split_string_at_delim(char *str, char delim) { + if (str == NULL) { + return NULL; + } + + char *delim_position = strchr(str, delim); + + if (delim_position != NULL) { + *delim_position = '\0'; + delim_position++; + } + + return delim_position; +} + + +/* a simple string tokenizer */ +char *tokenize_string(char **str, const char *delim) +{ + char *cp = *str; + char *result; + + /* The string is empty or has been fully tokenized */ + if (cp == NULL) { + return NULL; + } + + /* The first loop skips any leading delimiter characters in the input string */ + while (*cp && strchr(delim, *cp)) { + cp++; + } + if (cp == NULL) { + return NULL; + } + + /* The second loop tries to find a delimiter character or reaches the end of the string */ + result = cp; + while (*cp && strchr(delim, *cp) == 0) { + cp++; + } + + /* Found! Replace the delimiter with a null character */ + if (*cp) { + *cp++ = '\0'; + } + + /* In future, search from the updated position */ + *str = cp; + return (result); +} + + /* ** This function takes a quoted attribute value of the form "abc", ** and strips off the enclosing quotes. It also deals with quoted @@ -1513,7 +1571,7 @@ util_get_capped_hardware_threads(long min, long max) threads = CPU_COUNT(&cs); if (threads == 0) { threads = sysconf(_SC_NPROCESSORS_ONLN); - } + } slapi_log_err(SLAPI_LOG_TRACE, "util_get_hardware_threads", "Detected %ld hardware threads\n", threads); @@ -1715,9 +1773,9 @@ void dup_ldif_line(struct berval *copy, const char *line, const char *endline) while (pt && pt < ptend) { line = pt; /* Search end of line */ - while (pt < ptend && *pt != '\n' && *pt != 0) { - pt++; - } + while (pt < ptend && *pt != '\n' && *pt != 0) { + pt++; + } PR_ASSERT(pt <= ptend); copylen = pt - line; if (copylen>0 && line[copylen-1] == '\r') { diff --git a/src/lib389/lib389/topologies.py b/src/lib389/lib389/topologies.py index 2b5e3db31..d6f0dadb3 100644 --- a/src/lib389/lib389/topologies.py +++ b/src/lib389/lib389/topologies.py @@ -24,6 +24,7 @@ from lib389._constants import * from lib389.cli_base import LogCapture TLS_HOSTNAME_CHECK = os.getenv('TLS_HOSTNAME_CHECK', default=True) +HAPROXY_TRUSTED_IP = os.getenv('HAPROXY_TRUSTED_IP', default='') DEBUGGING = os.getenv('DEBUGGING', default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) @@ -149,6 +150,8 @@ def _create_instances(topo_dict, suffix): instance.config.set('nsslapd-auditlog-logging-enabled','on') instance.config.set('nsslapd-auditfaillog-logging-enabled','on') instance.config.set('nsslapd-plugin-logging', 'on') + if HAPROXY_TRUSTED_IP: + instance.config.set('nsslapd-haproxy-trusted-ip', HAPROXY_TRUSTED_IP) log.info("Instance with parameters {} was created.".format(args_instance)) if "standalone1" in instances and len(instances) == 1: diff --git a/test/libslapd/haproxy/parse.c b/test/libslapd/haproxy/parse.c new file mode 100644 index 000000000..d6a09fe7e --- /dev/null +++ b/test/libslapd/haproxy/parse.c @@ -0,0 +1,321 @@ +/** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2023 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). + * See LICENSE for details. + * END COPYRIGHT BLOCK **/ + +#include "../../test_slapd.h" +#include <string.h> +#include <haproxy.h> + + +typedef struct test_input { + const char *input_str; + int expected_result; + size_t expected_len; + int expected_proxy_connection; + PRNetAddr expected_pr_netaddr_from; + PRNetAddr expected_pr_netaddr_dest; +} test_input; + +test_input test_cases[] = { + { + .input_str = "PROXY TCP4 192.168.0.1 192.168.0.2 12345 389\r\n", + .expected_result = HAPROXY_HEADER_PARSED, + .expected_len = 39, + .expected_proxy_connection = 1, + .expected_pr_netaddr_from = { .inet = { .family = PR_AF_INET, .ip = 0x0100A8C0, .port = 0x3930 }}, + .expected_pr_netaddr_dest = { .inet = { .family = PR_AF_INET, .ip = 0x0200A8C0, .port = 0x8501 }} + }, + { + .input_str = "PROXY TCP6 2001:db8::1 2001:db8::2 12345 389\r\n", + .expected_result = HAPROXY_HEADER_PARSED, + .expected_len = 46, + .expected_proxy_connection = 1, + .expected_pr_netaddr_from = { .ipv6 = { .family = PR_AF_INET6, .ip = {0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, .port = 0x3930 }}, + .expected_pr_netaddr_dest = { .ipv6 = { .family = PR_AF_INET6, .ip = {0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}, .port = 0x8501 }}, + }, + { + .input_str = "PROXY TCP6 ::ffff:192.168.0.1 ::ffff:192.168.0.2 12345 389\r\n", + .expected_result = HAPROXY_HEADER_PARSED, + .expected_len = 54, + .expected_proxy_connection = 1, + .expected_pr_netaddr_from = { .ipv6 = { .family = PR_AF_INET6, .ip = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc0, 0xa8, 0x00, 0x01}, .port = 0x3930 }}, + .expected_pr_netaddr_dest = { .ipv6 = { .family = PR_AF_INET6, .ip = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc0, 0xa8, 0x00, 0x02}, .port = 0x8501 }}, + }, + // Invalid IP + { + .input_str = "PROXY TCP4 256.168.0.1 192.168.0.2 12345 389\r\n", + .expected_result = HAPROXY_ERROR, + .expected_proxy_connection = 0, + }, + // Invalid port + { + .input_str = "PROXY TCP4 192.168.0.1 192.168.0.2 123456 389\r\n", + .expected_result = HAPROXY_ERROR, + .expected_proxy_connection = 0, + }, + // One port + { + .input_str = "PROXY TCP4 192.168.0.1 192.168.0.2 12345\r\n", + .expected_result = HAPROXY_ERROR, + .expected_proxy_connection = 0, + }, + // No ports + { + .input_str = "PROXY TCP4 192.168.0.1 192.168.0.2\r\n", + .expected_result = HAPROXY_ERROR, + .expected_proxy_connection = 0, + }, + // Empty string + { + .input_str = "", + .expected_result = HAPROXY_NOT_A_HEADER, + .expected_proxy_connection = 0, + }, + // Invalid protocol + { + .input_str = "PROXY TCP3 192.168.0.1 192.168.0.2 12345 389\r\n", + .expected_result = HAPROXY_ERROR, + .expected_proxy_connection = 0, + }, + // Missing protocol + { + .input_str = "PROXY 192.168.0.1 192.168.0.2 12345 389\r\n", + .expected_result = HAPROXY_ERROR, + .expected_proxy_connection = 0, + }, + +}; + +size_t num_tests = sizeof(test_cases) / sizeof(test_cases[0]); + +void test_libslapd_haproxy_v1(void **state) { + (void)state; + int result = 0; + + for (size_t i = 0; i < num_tests; i++) { + int proxy_connection = 0; + PRNetAddr pr_netaddr_from = {{0}}; + PRNetAddr pr_netaddr_dest = {{0}}; + size_t str_len = strlen(test_cases[i].input_str); + result = haproxy_parse_v1_hdr(test_cases[i].input_str, &str_len, &proxy_connection, &pr_netaddr_from, &pr_netaddr_dest); + + assert_int_equal(result, test_cases[i].expected_result); + assert_int_equal(proxy_connection, test_cases[i].expected_proxy_connection); + + if (test_cases[i].expected_result == 0) { + // slapi_log_error(SLAPI_LOG_ERR, "haproxy_parse_v1_hdr", "Expected pr_netaddr_from: "); + // slapi_log_prnetaddr(&test_cases[i].expected_pr_netaddr_from); + // slapi_log_error(SLAPI_LOG_ERR, "haproxy_parse_v1_hdr", "Actual pr_netaddr_from: "); + // slapi_log_prnetaddr(&pr_netaddr_from); + + // slapi_log_error(SLAPI_LOG_ERR, "haproxy_parse_v1_hdr", "Expected pr_netaddr_dest: "); + // slapi_log_prnetaddr(&test_cases[i].expected_pr_netaddr_dest); + // slapi_log_error(SLAPI_LOG_ERR, "haproxy_parse_v1_hdr", "Actual pr_netaddr_dest: "); + // slapi_log_prnetaddr(&pr_netaddr_dest); + + assert_memory_equal(&test_cases[i].expected_pr_netaddr_from, &pr_netaddr_from, sizeof(PRNetAddr)); + assert_memory_equal(&test_cases[i].expected_pr_netaddr_dest, &pr_netaddr_dest, sizeof(PRNetAddr)); + } + } +} + + +void test_libslapd_haproxy_v2_invalid(void **state) { + (void) state; // Unused + + struct { + char *desc; + char *str; + struct proxy_hdr_v2 hdr_v2; + size_t str_len; + int expected_result; + int expected_proxy_connection; + } tests[] = { + {"short header", + "short", + {}, + sizeof("short"), + HAPROXY_NOT_A_HEADER, + 0}, + {"invalid header", + "invalid_signature", + {}, + sizeof("invalid_signature"), + HAPROXY_NOT_A_HEADER, + 0}, + {"invalid signature", + NULL, + {"INVALID", PP2_VERSION | PP2_VER_CMD_PROXY, PP2_FAM_INET | PP2_TRANS_STREAM, htons(PP2_ADDR_LEN_INET)}, + PP2_HEADER_LEN + PP2_ADDR_LEN_INET, + HAPROXY_NOT_A_HEADER, + 0}, + {"unsupported family", + NULL, + {PP2_SIGNATURE, PP2_VERSION | PP2_VER_CMD_PROXY, 0x30 | PP2_TRANS_STREAM, htons(0)}, + PP2_HEADER_LEN, + HAPROXY_ERROR, + 0}, + {"unsupported protocol", + NULL, + {PP2_SIGNATURE, PP2_VERSION | PP2_VER_CMD_PROXY, PP2_FAM_INET | 0x30, htons(0)}, + PP2_HEADER_LEN, + HAPROXY_ERROR, + 0}, + {"invalid version", + NULL, + {PP2_SIGNATURE, (PP2_VERSION ^ 0xF0) | PP2_VER_CMD_PROXY, PP2_FAM_INET | PP2_TRANS_STREAM, htons(PP2_ADDR_LEN_INET)}, + PP2_HEADER_LEN + PP2_ADDR_LEN_INET, + HAPROXY_ERROR, + 0}, + {"valid header, wrong command", + NULL, + {PP2_SIGNATURE, PP2_VERSION | (PP2_VER_CMD_PROXY ^ 0xF0), PP2_FAM_INET | PP2_TRANS_STREAM, htons(PP2_ADDR_LEN_INET)}, + PP2_HEADER_LEN + PP2_ADDR_LEN_INET, + HAPROXY_ERROR, + 0}, + {"valid header, too long", + NULL, + {PP2_SIGNATURE, PP2_VERSION | PP2_VER_CMD_PROXY, PP2_FAM_INET | PP2_TRANS_STREAM, htons(PP2_ADDR_LEN_INET * 2)}, + PP2_HEADER_LEN + PP2_ADDR_LEN_INET, + HAPROXY_ERROR, + 0} + }; + + for (int i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + int proxy_connection; + PRNetAddr pr_netaddr_from; + PRNetAddr pr_netaddr_dest; + char *str_to_test = tests[i].str ? tests[i].str : (char *) &tests[i].hdr_v2; + + int result = haproxy_parse_v2_hdr(str_to_test, &tests[i].str_len, &proxy_connection, &pr_netaddr_from, &pr_netaddr_dest); + + assert_int_equal(result, tests[i].expected_result); + assert_int_equal(proxy_connection, tests[i].expected_proxy_connection); + } +} + +// Test for a valid proxy header v2 with unsupported transport protocol +void test_libslapd_haproxy_v2_unsupported_protocol(void **state) { + (void) state; // Unused + + // Create a sample string with valid proxy header v2 and unsupported transport protocol + struct proxy_hdr_v2 hdr_v2; + memcpy(hdr_v2.sig, PP2_SIGNATURE, PP2_SIGNATURE_LEN); + hdr_v2.ver_cmd = PP2_VERSION | PP2_VER_CMD_PROXY; + hdr_v2.fam = PP2_FAM_INET | 0x30; // 0x30 is unsupported + hdr_v2.len = htons(0); + + size_t str_len = PP2_HEADER_LEN; + int proxy_connection; + PRNetAddr pr_netaddr_from; + PRNetAddr pr_netaddr_dest; + + int result = haproxy_parse_v2_hdr((const char *) &hdr_v2, &str_len, &proxy_connection, &pr_netaddr_from, &pr_netaddr_dest); + + assert_int_equal(result, HAPROXY_ERROR); + assert_int_equal(proxy_connection, 0); +} + + +// Test for the case when the protocol version is invalid +void test_libslapd_haproxy_v2_invalid_version(void **state) { + (void) state; // Unused + + // Create a sample string with an invalid protocol version + struct proxy_hdr_v2 hdr_v2; + memcpy(hdr_v2.sig, PP2_SIGNATURE, PP2_SIGNATURE_LEN); + hdr_v2.ver_cmd = (PP2_VERSION ^ 0xF0) | PP2_VER_CMD_PROXY; + hdr_v2.fam = PP2_FAM_INET | PP2_TRANS_STREAM; + hdr_v2.len = htons(PP2_ADDR_LEN_INET); + + size_t str_len = PP2_HEADER_LEN + PP2_ADDR_LEN_INET; + int proxy_connection; + PRNetAddr pr_netaddr_from; + PRNetAddr pr_netaddr_dest; + + int result = haproxy_parse_v2_hdr((const char *) &hdr_v2, &str_len, &proxy_connection, &pr_netaddr_from, &pr_netaddr_dest); + + assert_int_equal(result, HAPROXY_ERROR); + assert_int_equal(proxy_connection, 0); +} + + +// Test for the case when the protocol command is valid - IPv4 and IPv6 +void test_libslapd_haproxy_v2_valid(void **state) { + (void) state; // Unused + + // We need only first two test cases as they are valid + size_t num_tests = 2; + + for (size_t i = 0; i < num_tests; i++) { + struct proxy_hdr_v2 hdr_v2; + memcpy(hdr_v2.sig, PP2_SIGNATURE, PP2_SIGNATURE_LEN); + hdr_v2.ver_cmd = PP2_VERSION | PP2_VER_CMD_PROXY; + + size_t str_len; + int proxy_connection; + PRNetAddr pr_netaddr_from = {0}; + PRNetAddr pr_netaddr_dest = {0}; + + if (i == 0) { // IPv4 test case + hdr_v2.fam = PP2_FAM_INET | PP2_TRANS_STREAM; + hdr_v2.len = htons(PP2_ADDR_LEN_INET); + uint32_t src_addr = test_cases[i].expected_pr_netaddr_from.inet.ip; + uint32_t dst_addr = test_cases[i].expected_pr_netaddr_dest.inet.ip; + uint16_t src_port = test_cases[i].expected_pr_netaddr_from.inet.port; + uint16_t dst_port = test_cases[i].expected_pr_netaddr_dest.inet.port; + memcpy(&hdr_v2.addr.ip4.src_addr, &src_addr, sizeof(src_addr)); + memcpy(&hdr_v2.addr.ip4.dst_addr, &dst_addr, sizeof(dst_addr)); + memcpy(&hdr_v2.addr.ip4.src_port, &src_port, sizeof(src_port)); + memcpy(&hdr_v2.addr.ip4.dst_port, &dst_port, sizeof(dst_port)); + str_len = PP2_HEADER_LEN + PP2_ADDR_LEN_INET; + } else { // IPv6 test case + hdr_v2.fam = PP2_FAM_INET6 | PP2_TRANS_STREAM; + hdr_v2.len = htons(PP2_ADDR_LEN_INET6); + uint8_t src_addr[16]; + uint8_t dst_addr[16]; + memcpy(src_addr, &test_cases[i].expected_pr_netaddr_from.ipv6.ip, sizeof(src_addr)); + memcpy(dst_addr, &test_cases[i].expected_pr_netaddr_dest.ipv6.ip, sizeof(dst_addr)); + uint16_t src_port = test_cases[i].expected_pr_netaddr_from.ipv6.port; + uint16_t dst_port = test_cases[i].expected_pr_netaddr_dest.ipv6.port; + memcpy(&hdr_v2.addr.ip6.src_addr, src_addr, sizeof(src_addr)); + memcpy(&hdr_v2.addr.ip6.dst_addr, dst_addr, sizeof(dst_addr)); + memcpy(&hdr_v2.addr.ip6.src_port, &src_port, sizeof(src_port)); + memcpy(&hdr_v2.addr.ip6.dst_port, &dst_port, sizeof(dst_port)); + str_len = PP2_HEADER_LEN + PP2_ADDR_LEN_INET6; + } + + int rc = haproxy_parse_v2_hdr((const char *) &hdr_v2, &str_len, &proxy_connection, &pr_netaddr_from, &pr_netaddr_dest); + + assert_int_equal(rc, HAPROXY_HEADER_PARSED); + assert_int_equal(proxy_connection, 1); + assert_memory_equal(&pr_netaddr_from, &test_cases[i].expected_pr_netaddr_from, sizeof(PRNetAddr)); + assert_memory_equal(&pr_netaddr_dest, &test_cases[i].expected_pr_netaddr_dest, sizeof(PRNetAddr)); + } +} + + +// Test for a valid proxy header v2 with LOCAL command +void test_libslapd_haproxy_v2_valid_local(void **state) { + (void) state; // Unused + // Create a sample string with valid proxy header v2 and LOCAL command + struct proxy_hdr_v2 hdr_v2; + memcpy(hdr_v2.sig, PP2_SIGNATURE, PP2_SIGNATURE_LEN); + hdr_v2.ver_cmd = PP2_VERSION | PP2_VER_CMD_LOCAL; + hdr_v2.fam = PP2_FAM_INET | PP2_TRANS_STREAM; + hdr_v2.len = htons(0); + + size_t str_len = PP2_HEADER_LEN; + int proxy_connection; + PRNetAddr pr_netaddr_from; + PRNetAddr pr_netaddr_dest; + + int result = haproxy_parse_v2_hdr((const char *) &hdr_v2, &str_len, &proxy_connection, &pr_netaddr_from, &pr_netaddr_dest); + + assert_int_equal(result, HAPROXY_HEADER_PARSED); + assert_int_equal(proxy_connection, 0); +} \ No newline at end of file diff --git a/test/libslapd/test.c b/test/libslapd/test.c index 32d0cbe0f..6520637c4 100644 --- a/test/libslapd/test.c +++ b/test/libslapd/test.c @@ -33,6 +33,10 @@ run_libslapd_tests(void) cmocka_unit_test(test_libslapd_filter_optimise), cmocka_unit_test(test_libslapd_pal_meminfo), cmocka_unit_test(test_libslapd_util_cachesane), + cmocka_unit_test(test_libslapd_haproxy_v1), + cmocka_unit_test(test_libslapd_haproxy_v2_valid), + cmocka_unit_test(test_libslapd_haproxy_v2_valid_local), + cmocka_unit_test(test_libslapd_haproxy_v2_invalid), }; return cmocka_run_group_tests(tests, NULL, NULL); } diff --git a/test/test_slapd.h b/test/test_slapd.h index 63e0ffc90..7b3879a31 100644 --- a/test/test_slapd.h +++ b/test/test_slapd.h @@ -55,6 +55,12 @@ void test_libslapd_counters_atomic_overflow(void **state); void test_libslapd_pal_meminfo(void **state); void test_libslapd_util_cachesane(void **state); +/* libslapd-haproxy */ +void test_libslapd_haproxy_v1(void **state); +void test_libslapd_haproxy_v2_valid(void **state); +void test_libslapd_haproxy_v2_valid_local(void **state); +void test_libslapd_haproxy_v2_invalid(void **state); + /* plugins */ void test_plugin_hello(void **state);
0
5a54717bfa40e3ef987bd85c5806125e49b2b278
389ds/389-ds-base
Ticket #48375 - SimplePagedResults -- in the search error case, simple paged results slot was not released. Description: If a simple paged results search fails in the backend, the simple paged results slot was not released. This patch adds it. https://fedorahosted.org/389/ticket/48375 Reviewed by [email protected] (Thank you, Thierry!!)
commit 5a54717bfa40e3ef987bd85c5806125e49b2b278 Author: Noriko Hosoi <[email protected]> Date: Wed Dec 9 12:05:24 2015 -0800 Ticket #48375 - SimplePagedResults -- in the search error case, simple paged results slot was not released. Description: If a simple paged results search fails in the backend, the simple paged results slot was not released. This patch adds it. https://fedorahosted.org/389/ticket/48375 Reviewed by [email protected] (Thank you, Thierry!!) diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c index 8a1443147..20761d54d 100644 --- a/ldap/servers/slapd/opshared.c +++ b/ldap/servers/slapd/opshared.c @@ -814,15 +814,26 @@ op_shared_search (Slapi_PBlock *pb, int send_result) * wait the end of the loop to send back this error */ flag_no_such_object = 1; - break; + } else { + /* err something other than LDAP_NO_SUCH_OBJECT, so the backend will + * have sent the result - + * Set a flag here so we don't return another result. */ + sent_result = 1; } - /* err something other than LDAP_NO_SUCH_OBJECT, so the backend will - * have sent the result - - * Set a flag here so we don't return another result. */ - sent_result = 1; /* fall through */ case -1: /* an error occurred */ + /* PAGED RESULTS */ + if (op_is_pagedresults(operation)) { + /* cleanup the slot */ + PR_Lock(pb->pb_conn->c_mutex); + pagedresults_set_search_result(pb->pb_conn, operation, NULL, 1, pr_idx); + rc = pagedresults_set_current_be(pb->pb_conn, NULL, pr_idx, 1); + PR_Unlock(pb->pb_conn->c_mutex); + } + if (1 == flag_no_such_object) { + break; + } slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err); if (err == LDAP_NO_SUCH_OBJECT) {
0
83d57eeb8d0c5864ea000b8b834908d8500ae196
389ds/389-ds-base
bump version to 1.3.5.2
commit 83d57eeb8d0c5864ea000b8b834908d8500ae196 Author: Noriko Hosoi <[email protected]> Date: Tue May 3 11:17:22 2016 -0700 bump version to 1.3.5.2 diff --git a/VERSION.sh b/VERSION.sh index c13c57b38..0cede259f 100644 --- a/VERSION.sh +++ b/VERSION.sh @@ -10,7 +10,7 @@ vendor="389 Project" # PACKAGE_VERSION is constructed from these VERSION_MAJOR=1 VERSION_MINOR=3 -VERSION_MAINT=5.1 +VERSION_MAINT=5.2 # NOTE: VERSION_PREREL is automatically set for builds made out of a git tree VERSION_PREREL= VERSION_DATE=`date -u +%Y%m%d%H%M%S`
0
877128469b89715e6d3f786b740732edad7b4b66
389ds/389-ds-base
Issue 4428 - Paged Results with Chaining Test Case Bug Description: This test case shows how a paged search with criticality set to false, causes chaining to sigsegv. Fix Description: N/A - this is a reproducer, not the fix. fixes: #4428 Author: William Brown <[email protected]> Review by: @droideck, @mreynolds389
commit 877128469b89715e6d3f786b740732edad7b4b66 Author: William Brown <[email protected]> Date: Thu Nov 12 13:02:42 2020 +1000 Issue 4428 - Paged Results with Chaining Test Case Bug Description: This test case shows how a paged search with criticality set to false, causes chaining to sigsegv. Fix Description: N/A - this is a reproducer, not the fix. fixes: #4428 Author: William Brown <[email protected]> Review by: @droideck, @mreynolds389 diff --git a/dirsrvtests/tests/suites/chaining_plugin/paged_search_test.py b/dirsrvtests/tests/suites/chaining_plugin/paged_search_test.py new file mode 100644 index 000000000..108f9b727 --- /dev/null +++ b/dirsrvtests/tests/suites/chaining_plugin/paged_search_test.py @@ -0,0 +1,91 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 William Brown <[email protected]> +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import ldap +import pytest +import time +import shutil +from lib389.idm.account import Accounts, Account +from lib389.topologies import topology_i2 as topology +from lib389.backend import Backends +from lib389._constants import DEFAULT_SUFFIX +from lib389.plugins import ChainingBackendPlugin +from lib389.chaining import ChainingLinks +from lib389.mappingTree import MappingTrees + +pytestmark = pytest.mark.tier1 + +def test_chaining_paged_search(topology): + """ Test paged search through the chaining db. This + would cause a SIGSEGV with paged search which could + be triggered by SSSD. + + :id: 7b29b1f5-26cf-49fa-9fe7-ee29a1408633 + :setup: Two standalones in chaining. + :steps: + 1. Configure chaining between the nodes + 2. Do a chaining search (no page) to assert it works + 3. Do a paged search through chaining. + + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + st1 = topology.ins["standalone1"] + st2 = topology.ins["standalone2"] + + ### We setup so that st1 -> st2 + + # Clear all the BE in st1 + bes1 = Backends(st1) + for be in bes1.list(): + be.delete() + + # Setup st1 to chain to st2 + chain_plugin_1 = ChainingBackendPlugin(st1) + chain_plugin_1.enable() + + chains = ChainingLinks(st1) + chain = chains.create(properties={ + 'cn': 'demochain', + 'nsslapd-suffix': DEFAULT_SUFFIX, + 'nsmultiplexorbinddn': '', + 'nsmultiplexorcredentials': '', + 'nsfarmserverurl': st2.toLDAPURL(), + }) + + mts = MappingTrees(st1) + # Due to a bug in lib389, we need to delete and recreate the mt. + for mt in mts.list(): + mt.delete() + mts.ensure_state(properties={ + 'cn': DEFAULT_SUFFIX, + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'demochain', + }) + # Restart to enable + st1.restart() + + # Get an anonymous connection. + anon = Account(st1, dn='') + anon_conn = anon.bind(password='') + + # Now do a search from st1 -> st2 + accs_1 = Accounts(anon_conn, DEFAULT_SUFFIX) + assert len(accs_1.list()) > 0 + + # Allow time to attach lldb if needed. + # import time + # print("🔥🔥🔥") + # time.sleep(45) + + # Now do a *paged* search from st1 -> st2 + assert len(accs_1.list(paged_search=2, paged_critical=False)) > 0 + + diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py index c60837601..2252f4ac6 100644 --- a/src/lib389/lib389/_mapped_object.py +++ b/src/lib389/lib389/_mapped_object.py @@ -9,6 +9,7 @@ import ldap import ldap.dn +from ldap.controls import SimplePagedResultsControl from ldap import filter as ldap_filter import logging import json @@ -1032,10 +1033,11 @@ class DSLdapObjects(DSLogging, DSLints): # functions with very little work on the behalf of the overloader return self._childobject(instance=self._instance, dn=dn) - def list(self): + def list(self, paged_search=None, paged_critical=True): """Get a list of children entries (DSLdapObject, Replica, etc.) using a base DN and objectClasses of our object (DSLdapObjects, Replicas, etc.) + :param paged_search: None for no paged search, or an int of page size to use. :returns: A list of children entries """ @@ -1044,20 +1046,64 @@ class DSLdapObjects(DSLogging, DSLints): # This will yield and & filter for objectClass with as many terms as needed. filterstr = self._get_objectclass_filter() self._log.debug('list filter = %s' % filterstr) - try: - results = self._instance.search_ext_s( - base=self._basedn, - scope=self._scope, - filterstr=filterstr, - attrlist=self._list_attrlist, - serverctrls=self._server_controls, clientctrls=self._client_controls, - escapehatch='i am sure' - ) - # def __init__(self, instance, dn=None): + + if type(paged_search) == int: + self._log.debug('listing with paged search -> %d', paged_search) + # If paged_search -> + results = [] + pages = 0 + pctrls = [] + req_pr_ctrl = SimplePagedResultsControl(paged_critical, size=paged_search, cookie='') + if self._server_controls is not None: + controls = [req_pr_ctrl] + self._server_controls + else: + controls = [req_pr_ctrl] + while True: + msgid = self._instance.search_ext( + base=self._basedn, + scope=self._scope, + filterstr=filterstr, + attrlist=self._list_attrlist, + serverctrls=controls, + clientctrls=self._client_controls, + escapehatch='i am sure' + ) + self._log.info('Getting page %d' % (pages,)) + rtype, rdata, rmsgid, rctrls = self._instance.result3(msgid, escapehatch='i am sure') + results.extend(rdata) + pages += 1 + self._log.debug("%s" % rctrls) + pctrls = [ c for c in rctrls + if c.controlType == SimplePagedResultsControl.controlType] + if pctrls and pctrls[0].cookie: + req_pr_ctrl.cookie = pctrls[0].cookie + if self._server_controls is not None: + controls = [req_pr_ctrl] + self._server_controls + else: + controls = [req_pr_ctrl] + else: + break + #End while + # Result3 doesn't map through Entry, so we have to do it manually. + results = [Entry(r) for r in results] insts = [self._entry_to_instance(dn=r.dn, entry=r) for r in results] - except ldap.NO_SUCH_OBJECT: - # There are no objects to select from, se we return an empty array - insts = [] + # End paged search + else: + # If not paged + try: + results = self._instance.search_ext_s( + base=self._basedn, + scope=self._scope, + filterstr=filterstr, + attrlist=self._list_attrlist, + serverctrls=self._server_controls, clientctrls=self._client_controls, + escapehatch='i am sure' + ) + # def __init__(self, instance, dn=None): + insts = [self._entry_to_instance(dn=r.dn, entry=r) for r in results] + except ldap.NO_SUCH_OBJECT: + # There are no objects to select from, se we return an empty array + insts = [] return insts def exists(self, selector=[], dn=None):
0
2f02b0d0fee902c4a7777273bbeb519bf1c182c8
389ds/389-ds-base
Ticket 205 - snmp counters index strings for multiple network interfaces with ip addr and tcp port pairs Bug Description: When a host contains several interfaces, if several DS instances listen on the same port (but on different interfaces) the snmp report will only contain one of those instances. Fix Description: The fix is to define a new cn=config attribute (nsslapd-snmp-index) that is a number. The value of this attribute will replace the instance port number (nsslapd-port) in the snmp report. https://fedorahosted.org/389/ticket/205 Reviewed by: Nathan Kinder (thanks Nathan) Platforms tested: Fedora 17 Flag Day: no Doc impact: yes. A new configuration attribute (in cn=config) needs to be described ("nsslapd-snmp-index"). nsslapd-snmp-index: It specifies an index (identifier) that would be appened to the RHDS MIB OIDs of the instance. If it is defined it will replace in the reported OID the 'port' (nsslapd-port). For example: instance1 and instance2 have defined nsslapd-snmp-index (greater than 0). instance3 did not define nsslapd-snmp-index or nsslapd-snmp-index=0, so its slot number is appened. > snmpwalk -v 2c -c redhat localhost RHDS-MIB::dsSimpleAuthBinds RHDS-MIB::dsSimpleAuthBinds.<snmp_index_instance1> = Counter64: 16 RHDS-MIB::dsSimpleAuthBinds.<snmp_index_instance2> = Counter64: 4 RHDS-MIB::dsSimpleAuthBinds.<port_instance3> = Counter64: 24 In case of several RHDS instances listening on the same port (on different interfaces), it offers a means to link the reported snmp counters to a given instance. This attribute is an optional numeric value greater or equal to 0. 0 means that the snmp index is not used and the SNMP report will contain the port number. Any change of value requires (to be taken into account) restart of both DS and DS snmp sub-agent.
commit 2f02b0d0fee902c4a7777273bbeb519bf1c182c8 Author: Thierry bordaz (tbordaz) <[email protected]> Date: Wed Apr 17 16:06:32 2013 +0200 Ticket 205 - snmp counters index strings for multiple network interfaces with ip addr and tcp port pairs Bug Description: When a host contains several interfaces, if several DS instances listen on the same port (but on different interfaces) the snmp report will only contain one of those instances. Fix Description: The fix is to define a new cn=config attribute (nsslapd-snmp-index) that is a number. The value of this attribute will replace the instance port number (nsslapd-port) in the snmp report. https://fedorahosted.org/389/ticket/205 Reviewed by: Nathan Kinder (thanks Nathan) Platforms tested: Fedora 17 Flag Day: no Doc impact: yes. A new configuration attribute (in cn=config) needs to be described ("nsslapd-snmp-index"). nsslapd-snmp-index: It specifies an index (identifier) that would be appened to the RHDS MIB OIDs of the instance. If it is defined it will replace in the reported OID the 'port' (nsslapd-port). For example: instance1 and instance2 have defined nsslapd-snmp-index (greater than 0). instance3 did not define nsslapd-snmp-index or nsslapd-snmp-index=0, so its slot number is appened. > snmpwalk -v 2c -c redhat localhost RHDS-MIB::dsSimpleAuthBinds RHDS-MIB::dsSimpleAuthBinds.<snmp_index_instance1> = Counter64: 16 RHDS-MIB::dsSimpleAuthBinds.<snmp_index_instance2> = Counter64: 4 RHDS-MIB::dsSimpleAuthBinds.<port_instance3> = Counter64: 24 In case of several RHDS instances listening on the same port (on different interfaces), it offers a means to link the reported snmp counters to a given instance. This attribute is an optional numeric value greater or equal to 0. 0 means that the snmp index is not used and the SNMP report will contain the port number. Any change of value requires (to be taken into account) restart of both DS and DS snmp sub-agent. diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c index 0831cc802..10e0e7c5e 100644 --- a/ldap/servers/slapd/libglobs.c +++ b/ldap/servers/slapd/libglobs.c @@ -152,6 +152,7 @@ static int config_set_schemareplace ( const char *attrname, char *value, #define DEFAULT_OUTBOUND_LDAP_IO_TIMEOUT "300000" #define DEFAULT_MAX_FILTER_NEST_LEVEL "40" #define DEFAULT_GROUPEVALNESTLEVEL "0" +#define DEFAULT_SNMP_INDEX "0" #define DEFAULT_MAX_SASLIO_SIZE "2097152" #define DEFAULT_DISK_THRESHOLD "2097152" #define DEFAULT_DISK_GRACE_PERIOD "60" @@ -198,7 +199,7 @@ static int config_set_schemareplace ( const char *attrname, char *value, #define DEFAULT_LDAPI_SEARCH_BASE "dc=example,dc=com" #define DEFAULT_LDAPI_AUTO_DN "cn=peercred,cn=external,cn=auth" #define ENTRYUSN_IMPORT_INIT "0" -#define DEFAULT_ALLOWED_TO_DELETE_ATTRS "nsslapd-listenhost nsslapd-securelistenhost nsslapd-defaultnamingcontext" +#define DEFAULT_ALLOWED_TO_DELETE_ATTRS "nsslapd-listenhost nsslapd-securelistenhost nsslapd-defaultnamingcontext nsslapd-snmp-index" #define SALTED_SHA1_SCHEME_NAME "SSHA" /* CONFIG_ON_OFF */ @@ -734,6 +735,10 @@ static struct config_get_and_set { NULL, 0, (void**)&global_slapdFrontendConfig.listenhost, CONFIG_STRING, NULL, NULL/* NULL value is allowed */}, + {CONFIG_SNMP_INDEX_ATTRIBUTE, config_set_snmp_index, + NULL, 0, + (void**) &global_slapdFrontendConfig.snmp_index, + CONFIG_INT, NULL, DEFAULT_SNMP_INDEX}, {CONFIG_LDAPI_FILENAME_ATTRIBUTE, config_set_ldapi_filename, NULL, 0, (void**)&global_slapdFrontendConfig.ldapi_filename, @@ -1981,6 +1986,39 @@ config_set_listenhost( const char *attrname, char *value, char *errorbuf, int ap return retVal; } +int +config_set_snmp_index(const char *attrname, char *value, char *errorbuf, int apply) +{ + int retVal = LDAP_SUCCESS; + long snmp_index; + long snmp_index_disable; + char *endp = NULL; + + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + + snmp_index_disable = atol(DEFAULT_SNMP_INDEX); /* if snmp index is disabled, use the nsslapd-port instead */; + + if (config_value_is_null(attrname, value, errorbuf, 0)) { + snmp_index = snmp_index_disable; + } else { + errno = 0; + snmp_index = strtol(value, &endp, 10); + + if (*endp != '\0' || errno == ERANGE || snmp_index < snmp_index_disable) { + PR_snprintf(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "%s: invalid value \"%s\", %s must be greater or equal to %d (%d means disabled)", + attrname, value, CONFIG_SNMP_INDEX_ATTRIBUTE, snmp_index_disable, snmp_index_disable); + retVal = LDAP_OPERATIONS_ERROR; + } + } + + if (apply) { + CFG_LOCK_WRITE(slapdFrontendConfig); + slapdFrontendConfig->snmp_index = snmp_index; + CFG_UNLOCK_WRITE(slapdFrontendConfig); + } + return retVal; +} + int config_set_ldapi_filename( const char *attrname, char *value, char *errorbuf, int apply ) { int retVal = LDAP_SUCCESS; diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h index 05d2212c4..ea6fb6c65 100644 --- a/ldap/servers/slapd/proto-slap.h +++ b/ldap/servers/slapd/proto-slap.h @@ -273,6 +273,7 @@ int config_set_localhost( const char *attrname, char *value, char *errorbuf, int int config_set_listenhost( const char *attrname, char *value, char *errorbuf, int apply ); int config_set_securelistenhost( const char *attrname, char *value, char *errorbuf, int apply ); int config_set_ldapi_filename( const char *attrname, char *value, char *errorbuf, int apply ); +int config_set_snmp_index( const char *attrname, char *value, char *errorbuf, int apply ); int config_set_ldapi_switch( const char *attrname, char *value, char *errorbuf, int apply ); int config_set_ldapi_bind_switch( const char *attrname, char *value, char *errorbuf, int apply ); int config_set_ldapi_root_dn( const char *attrname, char *value, char *errorbuf, int apply ); diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h index c72d1934f..224706cd8 100644 --- a/ldap/servers/slapd/slap.h +++ b/ldap/servers/slapd/slap.h @@ -1967,6 +1967,7 @@ typedef struct _slapdEntryPoints { #define CONFIG_PORT_ATTRIBUTE "nsslapd-port" #define CONFIG_WORKINGDIR_ATTRIBUTE "nsslapd-workingdir" #define CONFIG_LISTENHOST_ATTRIBUTE "nsslapd-listenhost" +#define CONFIG_SNMP_INDEX_ATTRIBUTE "nsslapd-snmp-index" #define CONFIG_LDAPI_FILENAME_ATTRIBUTE "nsslapd-ldapifilepath" #define CONFIG_LDAPI_SWITCH_ATTRIBUTE "nsslapd-ldapilisten" #define CONFIG_LDAPI_BIND_SWITCH_ATTRIBUTE "nsslapd-ldapiautobind" @@ -2164,6 +2165,7 @@ typedef struct _slapdFrontendConfig { char *encryptionalias; char *errorlog; char *listenhost; + int snmp_index; #ifndef _WIN32 char *localuser; #endif /* _WIN32 */ diff --git a/ldap/servers/snmp/main.c b/ldap/servers/snmp/main.c index 53af97223..42dd76b0a 100644 --- a/ldap/servers/snmp/main.c +++ b/ldap/servers/snmp/main.c @@ -329,6 +329,8 @@ load_config(char *conf_path) } else if ((p = strstr(line, "server")) != NULL) { int got_port = 0; int got_rundir = 0; + int got_snmp_index = 0; + long snmp_index = 0; int lineno = 0; char *entry = NULL; char *instancename = NULL; @@ -423,10 +425,15 @@ load_config(char *conf_path) if ((strcmp(attr, "dn") == 0) && (strcmp(val, "cn=config") == 0)) { char *dse_line = NULL; + + /* Look for port and rundir attributes */ while ((dse_line = ldif_getline(&entryp)) != NULL) { ldif_parse_line(dse_line, &attr, &val, &vlen); - if (strcmp(attr, "nsslapd-port") == 0) { + if (strcmp(attr, "nsslapd-snmp-index") == 0) { + snmp_index = atol(val); + got_snmp_index = 1; + } else if (strcmp(attr, "nsslapd-port") == 0) { serv_p->port = atol(val); got_port = 1; } else if (strcmp(attr, "nsslapd-rundir") == 0) { @@ -448,8 +455,8 @@ load_config(char *conf_path) } /* Stop processing this entry if we found the - * port and rundir settings */ - if (got_port && got_rundir) { + * port and rundir and snmp_index settings */ + if (got_port && got_rundir && got_snmp_index) { break; } } @@ -479,6 +486,15 @@ load_config(char *conf_path) error = 1; goto close_and_exit; } + + /* in case a snmp index is specified, it replace the nsslapd-port + * This would allow to give an index to a snmp report, rather than using + * the TCP interface port number (because the same port may be listen on multiple interfaces). + * For snmp_index values <= 0 (disabled), let's keep the port + */ + if (got_snmp_index && (snmp_index > 0)) { + serv_p->port = snmp_index; + } /* Insert server instance into linked list */ serv_p->next = server_head;
0
e998632b45b49813df128cee17b813bee306f580
389ds/389-ds-base
Ticket 49105 - Sig FPE when ns-slapd has 0 backends. Bug Description: The autotuning system assumed we had 1 or more backends. As a result, when you start a server with no backends, a divide by 0 was encountered Fix Description: Check the backend count before we attempt the division. https://fedorahosted.org/389/ticket/49105 Author: wibrown Review by: nhosoi (Thanks!)
commit e998632b45b49813df128cee17b813bee306f580 Author: William Brown <[email protected]> Date: Mon Jan 30 12:32:20 2017 +1000 Ticket 49105 - Sig FPE when ns-slapd has 0 backends. Bug Description: The autotuning system assumed we had 1 or more backends. As a result, when you start a server with no backends, a divide by 0 was encountered Fix Description: Check the backend count before we attempt the division. https://fedorahosted.org/389/ticket/49105 Author: wibrown Review by: nhosoi (Thanks!) diff --git a/ldap/servers/slapd/back-ldbm/start.c b/ldap/servers/slapd/back-ldbm/start.c index 56dd2c493..759af8a6e 100644 --- a/ldap/servers/slapd/back-ldbm/start.c +++ b/ldap/servers/slapd/back-ldbm/start.c @@ -150,27 +150,29 @@ ldbm_back_start_autotune(struct ldbminfo *li) { db_pages = (512 * MEGABYTE) / pagesize; } - /* Number of entry cache pages per backend. */ - entry_pages = (zone_pages - db_pages) / backend_count; - /* Now, clamp this value to a 64mb boundary. */ - /* How many pages are in 64mb? */ - clamp_pages = (64 * MEGABYTE) / pagesize; - /* Now divide the entry pages by this, and also mod. If mod != 0, we need - * to add 1 to the diveded number. This should give us: - * 510 * 1024 * 1024 == 510MB - * 534773760 bytes - * 130560 pages at 4096 pages. - * 16384 pages for 64Mb - * 130560 / 16384 = 7 - * 130560 % 16384 = 15872 which is != 0 - * therfore 7 + 1, aka 8 * 16384 = 131072 pages = 536870912 bytes = 512MB. - */ - clamp_div = entry_pages / clamp_pages; - clamp_mod = entry_pages % clamp_pages; - if (clamp_mod != 0) { - /* If we want to clamp down, remove this line. This would change the above from 510mb -> 448mb. */ - clamp_div += 1; - entry_pages = clamp_div * clamp_pages; + if (backend_count > 0 ) { + /* Number of entry cache pages per backend. */ + entry_pages = (zone_pages - db_pages) / backend_count; + /* Now, clamp this value to a 64mb boundary. */ + /* How many pages are in 64mb? */ + clamp_pages = (64 * MEGABYTE) / pagesize; + /* Now divide the entry pages by this, and also mod. If mod != 0, we need + * to add 1 to the diveded number. This should give us: + * 510 * 1024 * 1024 == 510MB + * 534773760 bytes + * 130560 pages at 4096 pages. + * 16384 pages for 64Mb + * 130560 / 16384 = 7 + * 130560 % 16384 = 15872 which is != 0 + * therfore 7 + 1, aka 8 * 16384 = 131072 pages = 536870912 bytes = 512MB. + */ + clamp_div = entry_pages / clamp_pages; + clamp_mod = entry_pages % clamp_pages; + if (clamp_mod != 0) { + /* If we want to clamp down, remove this line. This would change the above from 510mb -> 448mb. */ + clamp_div += 1; + entry_pages = clamp_div * clamp_pages; + } } slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "found %luk physical memory\n", pages*(pagesize/1024)); @@ -202,7 +204,9 @@ ldbm_back_start_autotune(struct ldbminfo *li) { /* For each backend */ /* apply the appropriate cache size if 0 */ - li->li_cache_autosize_ec = (unsigned long)entry_pages * pagesize; + if (backend_count > 0 ) { + li->li_cache_autosize_ec = (unsigned long)entry_pages * pagesize; + } for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj; inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
0
23a5d68ceea58b96d5b72c149d8f1c3a9fe8fe6f
389ds/389-ds-base
Issue 5421 - CI - makes replication/acceptance_test.py::test_modify_entry more robust (#5422) Bug description: test_modify_entry relies on replication latency with sleeps Fix description: change sleep into wait_for_replication relates: #5421 Reviewed by: Simon Pichugin
commit 23a5d68ceea58b96d5b72c149d8f1c3a9fe8fe6f Author: tbordaz <[email protected]> Date: Fri Aug 19 09:35:23 2022 +0200 Issue 5421 - CI - makes replication/acceptance_test.py::test_modify_entry more robust (#5422) Bug description: test_modify_entry relies on replication latency with sleeps Fix description: change sleep into wait_for_replication relates: #5421 Reviewed by: Simon Pichugin diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py index c65672e3a..ecfba2e58 100644 --- a/dirsrvtests/tests/suites/replication/acceptance_test.py +++ b/dirsrvtests/tests/suites/replication/acceptance_test.py @@ -132,9 +132,17 @@ def test_modify_entry(topo_m4, create_entry): log.info('Modifying entry {} - add operation'.format(TEST_ENTRY_DN)) + m1 = topo_m4.ms["supplier1"] + m2 = topo_m4.ms["supplier2"] + m3 = topo_m4.ms["supplier3"] + m4 = topo_m4.ms["supplier4"] + repl = ReplicationManager(DEFAULT_SUFFIX) + test_user = UserAccount(topo_m4.ms["supplier1"], TEST_ENTRY_DN) test_user.add('mail', '{}@redhat.com'.format(TEST_ENTRY_NAME)) - time.sleep(sleep_time) + repl.wait_for_replication(m1, m2) + repl.wait_for_replication(m1, m3) + repl.wait_for_replication(m1, m4) all_user = topo_m4.all_get_dsldapobject(TEST_ENTRY_DN, UserAccount) for u in all_user: @@ -142,7 +150,9 @@ def test_modify_entry(topo_m4, create_entry): log.info('Modifying entry {} - replace operation'.format(TEST_ENTRY_DN)) test_user.replace('mail', '{}@greenhat.com'.format(TEST_ENTRY_NAME)) - time.sleep(sleep_time) + repl.wait_for_replication(m1, m2) + repl.wait_for_replication(m1, m3) + repl.wait_for_replication(m1, m4) all_user = topo_m4.all_get_dsldapobject(TEST_ENTRY_DN, UserAccount) for u in all_user: @@ -150,7 +160,9 @@ def test_modify_entry(topo_m4, create_entry): log.info('Modifying entry {} - delete operation'.format(TEST_ENTRY_DN)) test_user.remove('mail', '{}@greenhat.com'.format(TEST_ENTRY_NAME)) - time.sleep(sleep_time) + repl.wait_for_replication(m1, m2) + repl.wait_for_replication(m1, m3) + repl.wait_for_replication(m1, m4) all_user = topo_m4.all_get_dsldapobject(TEST_ENTRY_DN, UserAccount) for u in all_user:
0
4513cc460a2822f586f4028220d54adb00ce413d
389ds/389-ds-base
Issue 4396 - Minor memory leak in backend (#4558) Bug Description: As multiple suffixes per backend were no longer used, this functionality has been replaced with a single suffix per backend. Legacy code remains that adds multiple suffixes to the dse internal backend, resulting in memory allocations that are lost. Also a minor typo is corrected in backend.c Fix Description: Calls to be_addsuffix on the DSE backend are removed as they are never used. Fixes: https://github.com/389ds/389-ds-base/issues/4396 Reviewed by: mreynolds389, Firstyear, droideck (Thank you)
commit 4513cc460a2822f586f4028220d54adb00ce413d Author: James Chapman <[email protected]> Date: Tue Jan 26 10:29:42 2021 +0000 Issue 4396 - Minor memory leak in backend (#4558) Bug Description: As multiple suffixes per backend were no longer used, this functionality has been replaced with a single suffix per backend. Legacy code remains that adds multiple suffixes to the dse internal backend, resulting in memory allocations that are lost. Also a minor typo is corrected in backend.c Fix Description: Calls to be_addsuffix on the DSE backend are removed as they are never used. Fixes: https://github.com/389ds/389-ds-base/issues/4396 Reviewed by: mreynolds389, Firstyear, droideck (Thank you) diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c index 5c7041eb0..7d3fdd588 100644 --- a/ldap/servers/slapd/backend.c +++ b/ldap/servers/slapd/backend.c @@ -42,7 +42,7 @@ be_init(Slapi_Backend *be, const char *type, const char *name, int isprivate, in } be->be_monitordn = slapi_create_dn_string("cn=monitor,cn=%s,cn=%s,cn=plugins,cn=config", name, type); - if (NULL == be->be_configdn) { + if (NULL == be->be_monitordn) { slapi_log_err(SLAPI_LOG_ERR, "be_init", "Failed create instance monitor dn for " "plugin %s, instance %s\n", diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c index f4bd21392..8c0fd9ad9 100644 --- a/ldap/servers/slapd/fedse.c +++ b/ldap/servers/slapd/fedse.c @@ -2881,7 +2881,7 @@ search_snmp(Slapi_PBlock *pb __attribute__((unused)), } /* - * Called from config.c to install the internal backends + * Called from main.c to install the internal backends */ int setup_internal_backends(char *configdir) @@ -2900,7 +2900,6 @@ setup_internal_backends(char *configdir) Slapi_DN counters; Slapi_DN snmp; Slapi_DN root; - Slapi_Backend *be; Slapi_DN encryption; Slapi_DN saslmapping; Slapi_DN plugins; @@ -2949,16 +2948,11 @@ setup_internal_backends(char *configdir) dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &saslmapping, LDAP_SCOPE_SUBTREE, "(objectclass=nsSaslMapping)", sasl_map_config_add, NULL, NULL); dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &plugins, LDAP_SCOPE_SUBTREE, "(objectclass=nsSlapdPlugin)", check_plugin_path, NULL, NULL); - be = be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin); - be_addsuffix(be, &root); - be_addsuffix(be, &monitor); - be_addsuffix(be, &config); + be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin); /* - * Now that the be's are in place, we can - * setup the mapping tree. + * Now that the be's are in place, we can setup the mapping tree. */ - if (mapping_tree_init()) { slapi_log_err(SLAPI_LOG_EMERG, "setup_internal_backends", "Failed to init mapping tree\n"); exit(1);
0
88fc5c026f157a814ee26426552a2b9492075626
389ds/389-ds-base
Issue 48185 - Remove referint-logchanges attr from referint's config Description: The referint-logchanges attribute of referint plugin serves no purpose; it does absolutely nothing. Remove it. https://pagure.io/389-ds-base/issue/48185 Author: Ilias95 Review by: mreynolds, wibrown (Thanks Ilias!)
commit 88fc5c026f157a814ee26426552a2b9492075626 Author: Ilias Stamatis <[email protected]> Date: Tue Aug 1 13:53:34 2017 +0300 Issue 48185 - Remove referint-logchanges attr from referint's config Description: The referint-logchanges attribute of referint plugin serves no purpose; it does absolutely nothing. Remove it. https://pagure.io/389-ds-base/issue/48185 Author: Ilias95 Review by: mreynolds, wibrown (Thanks Ilias!) diff --git a/dirsrvtests/tests/suites/dynamic_plugins/plugin_tests.py b/dirsrvtests/tests/suites/dynamic_plugins/plugin_tests.py index e35b98ba7..293c167ab 100644 --- a/dirsrvtests/tests/suites/dynamic_plugins/plugin_tests.py +++ b/dirsrvtests/tests/suites/dynamic_plugins/plugin_tests.py @@ -1999,7 +1999,6 @@ def test_referint(inst, args=None): 'referint-membership-attr': 'member', 'referint-update-delay': '0', 'referint-logfile': REFERINT_LOGFILE, - 'referint-logchanges': '0' }))) except ldap.LDAPError as e: log.fatal('test_referint: Failed to shared config entry: error ' + e.message['desc']) diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in index ea6b5059c..a3b850ef8 100644 --- a/ldap/ldif/template-dse.ldif.in +++ b/ldap/ldif/template-dse.ldif.in @@ -620,7 +620,6 @@ nsslapd-pluginenabled: off nsslapd-pluginprecedence: 40 referint-update-delay: 0 referint-logfile: %log_dir%/referint -referint-logchanges: 0 referint-membership-attr: member referint-membership-attr: uniquemember referint-membership-attr: owner diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c index 2d3d973c9..f6d1c27a2 100644 --- a/ldap/servers/plugins/referint/referint.c +++ b/ldap/servers/plugins/referint/referint.c @@ -27,7 +27,6 @@ #define REFERINT_PLUGIN_SUBSYSTEM "referint-plugin" /* used for logging */ #define REFERINT_PREOP_DESC "referint preop plugin" #define REFERINT_ATTR_DELAY "referint-update-delay" -#define REFERINT_ATTR_LOGCHANGES "referint-logchanges" #define REFERINT_ATTR_LOGFILE "referint-logfile" #define REFERINT_ATTR_MEMBERSHIP "referint-membership-attr" #define MAX_LINE 2048 @@ -39,7 +38,6 @@ typedef struct referint_config { int delay; char *logfile; - int logchanges; char **attrs; } referint_config; @@ -51,14 +49,13 @@ int referint_postop_del(Slapi_PBlock *pb); int referint_postop_modrdn(Slapi_PBlock *pb); int referint_postop_start(Slapi_PBlock *pb); int referint_postop_close(Slapi_PBlock *pb); -int update_integrity(Slapi_DN *sDN, char *newrDN, Slapi_DN *newsuperior, int logChanges); +int update_integrity(Slapi_DN *sDN, char *newrDN, Slapi_DN *newsuperior); int GetNextLine(char *dest, int size_dest, PRFileDesc *stream); int my_fgetc(PRFileDesc *stream); void referint_thread_func(void *arg); void writeintegritylog(Slapi_PBlock *pb, char *logfilename, Slapi_DN *sdn, char *newrdn, Slapi_DN *newsuperior, Slapi_DN *requestorsdn); int load_config(Slapi_PBlock *pb, Slapi_Entry *config_entry, int apply); int referint_get_delay(void); -int referint_get_logchanges(void); char *referint_get_logfile(void); char **referint_get_attrs(void); int referint_postop_modify(Slapi_PBlock *pb); @@ -69,7 +66,7 @@ Slapi_DN *referint_get_config_area(void); void referint_set_plugin_area(Slapi_DN *sdn); Slapi_DN *referint_get_plugin_area(void); int referint_sdn_config_cmp(Slapi_DN *sdn); -void referint_get_config(int *delay, int *logchanges, char **logfile); +void referint_get_config(int *delay, char **logfile); /* global thread control stuff */ static PRLock *referint_mutex = NULL; @@ -273,7 +270,6 @@ referint_postop_init(Slapi_PBlock *pb) /* * referint-update-delay: 0 * referint-logfile: /var/log/dirsrv/slapd-localhost/referint - * referint-logchanges: 0 * referint-membership-attr: member * referint-membership-attr: uniquemember * referint-membership-attr: owner @@ -311,9 +307,8 @@ load_config(Slapi_PBlock *pb, Slapi_Entry *config_entry, int apply) rc = SLAPI_PLUGIN_FAILURE; goto done; } else { - /* set these for config validation */ + /* set this for config validation */ tmp_config->delay = -2; - tmp_config->logchanges = -1; } if ((value = slapi_entry_attr_get_charptr(config_entry, REFERINT_ATTR_DELAY))) { @@ -333,11 +328,6 @@ load_config(Slapi_PBlock *pb, Slapi_Entry *config_entry, int apply) tmp_config->logfile = value; new_config_present = 1; } - if ((value = slapi_entry_attr_get_charptr(config_entry, REFERINT_ATTR_LOGCHANGES))) { - tmp_config->logchanges = atoi(value); - slapi_ch_free_string(&value); - new_config_present = 1; - } if ((attrs = slapi_entry_attr_get_charray(config_entry, REFERINT_ATTR_MEMBERSHIP))) { tmp_config->attrs = attrs; new_config_present = 1; @@ -353,10 +343,6 @@ load_config(Slapi_PBlock *pb, Slapi_Entry *config_entry, int apply) slapi_log_err(SLAPI_LOG_ERR, REFERINT_PLUGIN_SUBSYSTEM, "load_config - Plugin configuration is missing %s\n", REFERINT_ATTR_LOGFILE); rc = SLAPI_PLUGIN_FAILURE; - } else if (tmp_config->logchanges == -1) { - slapi_log_err(SLAPI_LOG_ERR, REFERINT_PLUGIN_SUBSYSTEM, "load_config - Plugin configuration is missing %s\n", - REFERINT_ATTR_LOGCHANGES); - rc = SLAPI_PLUGIN_FAILURE; } else if (!tmp_config->attrs) { slapi_log_err(SLAPI_LOG_ERR, REFERINT_PLUGIN_SUBSYSTEM, "load_config - Plugin configuration is missing %s\n", REFERINT_ATTR_MEMBERSHIP); @@ -392,7 +378,6 @@ load_config(Slapi_PBlock *pb, Slapi_Entry *config_entry, int apply) tmp_config->delay = atoi(argv[0]); tmp_config->logfile = slapi_ch_strdup(argv[1]); - tmp_config->logchanges = atoi(argv[2]); for (i = 3; argv[i] != NULL; i++) { slapi_ch_array_add(&tmp_config->attrs, slapi_ch_strdup(argv[i])); } @@ -508,18 +493,6 @@ referint_get_delay(void) return delay; } -int -referint_get_logchanges(void) -{ - int log_changes; - - slapi_rwlock_rdlock(config_rwlock); - log_changes = config->logchanges; - slapi_rwlock_unlock(config_rwlock); - - return log_changes; -} - char * referint_get_logfile(void) { @@ -533,15 +506,12 @@ referint_get_logfile(void) } void -referint_get_config(int *delay, int *logchanges, char **logfile) +referint_get_config(int *delay, char **logfile) { slapi_rwlock_rdlock(config_rwlock); if (delay) { *delay = config->delay; } - if (logchanges) { - *logchanges = config->logchanges; - } if (logfile) { *logfile = slapi_ch_strdup(config->logfile); } @@ -617,7 +587,6 @@ referint_postop_del(Slapi_PBlock *pb) Slapi_DN *sdn = NULL; char *logfile = NULL; int delay; - int logChanges = 0; int isrepop = 0; int oprc; int rc = SLAPI_PLUGIN_SUCCESS; @@ -637,7 +606,7 @@ referint_postop_del(Slapi_PBlock *pb) return SLAPI_PLUGIN_SUCCESS; } - referint_get_config(&delay, &logChanges, NULL); + referint_get_config(&delay, NULL); if (delay == -1) { /* integrity updating is off */ @@ -645,7 +614,7 @@ referint_postop_del(Slapi_PBlock *pb) } else if (delay == 0) { /* no delay */ /* call function to update references to entry */ if (referint_sdn_in_entry_scope(sdn)) { - rc = update_integrity(sdn, NULL, NULL, logChanges); + rc = update_integrity(sdn, NULL, NULL); } } else { /* write the entry to integrity log */ @@ -669,7 +638,6 @@ referint_postop_modrdn(Slapi_PBlock *pb) int oprc; int rc = SLAPI_PLUGIN_SUCCESS; int delay; - int logChanges = 0; int isrepop = 0; if (slapi_pblock_get(pb, SLAPI_IS_REPLICATED_OPERATION, &isrepop) != 0 || @@ -689,7 +657,7 @@ referint_postop_modrdn(Slapi_PBlock *pb) return SLAPI_PLUGIN_SUCCESS; } - referint_get_config(&delay, &logChanges, NULL); + referint_get_config(&delay, NULL); if (delay == -1) { /* integrity updating is off */ @@ -698,7 +666,7 @@ referint_postop_modrdn(Slapi_PBlock *pb) /* call function to update references to entry */ if (!plugin_EntryScope && !plugin_ExcludeEntryScope) { /* no scope defined, default always process referint */ - rc = update_integrity(sdn, newrdn, newsuperior, logChanges); + rc = update_integrity(sdn, newrdn, newsuperior); } else { const char *newsuperiordn = slapi_sdn_get_dn(newsuperior); if ((newsuperiordn == NULL && referint_sdn_in_entry_scope(sdn)) || @@ -707,10 +675,10 @@ referint_postop_modrdn(Slapi_PBlock *pb) * It is a modrdn inside the scope or into the scope, * process normal modrdn */ - rc = update_integrity(sdn, newrdn, newsuperior, logChanges); + rc = update_integrity(sdn, newrdn, newsuperior); } else if (referint_sdn_in_entry_scope(sdn)) { /* the entry is moved out of scope, treat as delete */ - rc = update_integrity(sdn, NULL, NULL, logChanges); + rc = update_integrity(sdn, NULL, NULL); } } } else { @@ -1100,8 +1068,7 @@ bail: int update_integrity(Slapi_DN *origSDN, char *newrDN, - Slapi_DN *newsuperior, - int logChanges __attribute__((unused))) + Slapi_DN *newsuperior) { Slapi_PBlock *search_result_pb = NULL; Slapi_PBlock *mod_pb = slapi_pblock_new(); @@ -1395,7 +1362,6 @@ referint_thread_func(void *arg __attribute__((unused))) char *iter = NULL; Slapi_DN *sdn = NULL; Slapi_DN *tmpsuperior = NULL; - int logChanges = 0; int delay; int no_changes; @@ -1405,7 +1371,7 @@ referint_thread_func(void *arg __attribute__((unused))) while (1) { /* refresh the config */ slapi_ch_free_string(&logfilename); - referint_get_config(&delay, &logChanges, &logfilename); + referint_get_config(&delay, &logfilename); no_changes = 1; while (no_changes) { @@ -1467,7 +1433,7 @@ referint_thread_func(void *arg __attribute__((unused))) } } - update_integrity(sdn, tmprdn, tmpsuperior, logChanges); + update_integrity(sdn, tmprdn, tmpsuperior); slapi_sdn_free(&sdn); slapi_ch_free_string(&tmprdn);
0
f599eedaeb1785444411b3ee240f73bade8d6df9
389ds/389-ds-base
Ticket 49055 - Fix debugging mode issue Bug description: It is impossible to turn debugging mode on without modifying lib389 code. Fix description: Change the source of DEBUGGING constant from hard coded to the one from environment variable named DEBUGGING. https://fedorahosted.org/389/ticket/49055 Reviewed by: wibrown (Thanks!)
commit f599eedaeb1785444411b3ee240f73bade8d6df9 Author: Simon Pichugin <[email protected]> Date: Mon Jan 2 08:28:27 2017 +0100 Ticket 49055 - Fix debugging mode issue Bug description: It is impossible to turn debugging mode on without modifying lib389 code. Fix description: Change the source of DEBUGGING constant from hard coded to the one from environment variable named DEBUGGING. https://fedorahosted.org/389/ticket/49055 Reviewed by: wibrown (Thanks!) diff --git a/src/lib389/lib389/topologies.py b/src/lib389/lib389/topologies.py index 6cd3d79eb..7880fb6d2 100644 --- a/src/lib389/lib389/topologies.py +++ b/src/lib389/lib389/topologies.py @@ -6,6 +6,7 @@ # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # +import os import sys import time import logging @@ -14,7 +15,7 @@ from lib389 import DirSrv from lib389._constants import * from lib389.properties import * -DEBUGGING = False +DEBUGGING = os.getenv('DEBUGGING', default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else:
0
b3aae6b16bb99eec56d2bda84ac8fc9a979b354e
389ds/389-ds-base
Ticket 47815 - add lib389 test Description: Add a lib389 test for ticket 47815 https://fedorahosted.org/389/ticket/47815 Reviewed by: nhosoi(Thanks!)
commit b3aae6b16bb99eec56d2bda84ac8fc9a979b354e Author: Mark Reynolds <[email protected]> Date: Wed Jun 25 16:00:26 2014 -0400 Ticket 47815 - add lib389 test Description: Add a lib389 test for ticket 47815 https://fedorahosted.org/389/ticket/47815 Reviewed by: nhosoi(Thanks!) diff --git a/dirsrvtests/tickets/ticket47815_test.py b/dirsrvtests/tickets/ticket47815_test.py new file mode 100644 index 000000000..1f2ce3378 --- /dev/null +++ b/dirsrvtests/tickets/ticket47815_test.py @@ -0,0 +1,227 @@ +import os +import sys +import time +import ldap +import logging +import socket +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from constants import * + +log = logging.getLogger(__name__) + +installation_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + At the beginning, It may exists a standalone instance. + It may also exists a backup for the standalone instance. + + Principle: + If standalone instance exists: + restart it + If backup of standalone exists: + create/rebind to standalone + + restore standalone instance from backup + else: + Cleanup everything + remove instance + remove backup + Create instance + Create backup + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the backups + backup_standalone = standalone.checkBackupFS() + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + if instance_standalone: + # assuming the instance is already stopped, just wait 5 sec max + standalone.stop(timeout=5) + standalone.start(timeout=10) + + if backup_standalone: + # The backup exist, assuming it is correct + # we just re-init the instance with it + if not instance_standalone: + standalone.create() + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # restore standalone instance from backup + standalone.stop(timeout=10) + standalone.restoreFS(backup_standalone) + standalone.start(timeout=10) + + else: + # We should be here only in two conditions + # - This is the first time a test involve standalone instance + # - Something weird happened (instance/backup destroyed) + # so we discard everything and recreate all + + # Remove the backup. So even if we have a specific backup file + # (e.g backup_standalone) we clear backup that an instance may have created + if backup_standalone: + standalone.clearBackupFS() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # Time to create the backups + standalone.stop(timeout=10) + standalone.backupfile = standalone.backupFS() + standalone.start(timeout=10) + + # + # Here we have standalone instance up and running + # Either coming from a backup recovery + # or from a fresh (re)init + # Time to return the topology + return TopologyStandalone(standalone) + + +def test_ticket47815(topology): + """ + Test betxn plugins reject an invalid option, and make sure that the rejected entry + is not in the entry cache. + + Enable memberOf, automember, and retrocl plugins + Add the automember config entry + Add the automember group + Add a user that will be rejected by a betxn plugin - result error 53 + Attempt the same add again, and it should result in another error 53 (not error 68) + """ + result = 0 + result2 = 0 + + log.info('Testing Ticket 47815 - Add entries that should be rejected by the betxn plugins, and are not left in the entry cache') + + # Enabled the plugins + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) + topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + + # configure automember config entry + log.info('Adding automember config') + try: + topology.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', { + 'objectclass': 'top autoMemberDefinition'.split(), + 'autoMemberScope': 'dc=example,dc=com', + 'autoMemberFilter': 'cn=user', + 'autoMemberDefaultGroup': 'cn=group,dc=example,dc=com', + 'autoMemberGroupingAttr': 'member:dn', + 'cn': 'group cfg'}))) + except: + log.error('Failed to add automember config') + exit(1) + + topology.standalone.stop(timeout=120) + time.sleep(1) + topology.standalone.start(timeout=120) + time.sleep(3) + + # need to reopen a connection toward the instance + topology.standalone.open() + + # add automember group + log.info('Adding automember group') + try: + topology.standalone.add_s(Entry(('cn=group,dc=example,dc=com', { + 'objectclass': 'top groupOfNames'.split(), + 'cn': 'group'}))) + except: + log.error('Failed to add automember group') + exit(1) + + # add user that should result in an error 53 + log.info('Adding invalid entry') + + try: + topology.standalone.add_s(Entry(('cn=user,dc=example,dc=com', { + 'objectclass': 'top person'.split(), + 'sn': 'user', + 'cn': 'user'}))) + except ldap.UNWILLING_TO_PERFORM: + log.debug('Adding invalid entry failed as expected') + result = 53 + except ldap.LDAPError, e: + log.error('Unexpected result ' + e.message['desc']) + assert False + if result == 0: + log.error('Add operation unexpectedly succeeded') + assert False + + # Attempt to add user again, should result in error 53 again + try: + topology.standalone.add_s(Entry(('cn=user,dc=example,dc=com', { + 'objectclass': 'top person'.split(), + 'sn': 'user', + 'cn': 'user'}))) + except ldap.UNWILLING_TO_PERFORM: + log.debug('2nd add of invalid entry failed as expected') + result2 = 53 + except ldap.LDAPError, e: + log.error('Unexpected result ' + e.message['desc']) + assert False + if result2 == 0: + log.error('2nd Add operation unexpectedly succeeded') + assert False + + # If we got here we passed! + log.info('Ticket47815 Test - Passed') + + +def test_ticket47815_final(topology): + topology.standalone.stop(timeout=10) + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47815(topo) + +if __name__ == '__main__': + run_isolated()
0
95a7f23262076d90fdc8a9ec76e131e9e4c09bcc
389ds/389-ds-base
Ticket 49241 - add symblic link location to db2bak.pl output Description: If a symbolic link is used for the script's backup location then add info to the output. https://pagure.io/389-ds-base/issue/49241 Reviewed by: firstyear(Thanks!)
commit 95a7f23262076d90fdc8a9ec76e131e9e4c09bcc Author: Mark Reynolds <[email protected]> Date: Wed May 24 12:15:20 2017 -0400 Ticket 49241 - add symblic link location to db2bak.pl output Description: If a symbolic link is used for the script's backup location then add info to the output. https://pagure.io/389-ds-base/issue/49241 Reviewed by: firstyear(Thanks!) diff --git a/ldap/admin/src/scripts/db2bak.pl.in b/ldap/admin/src/scripts/db2bak.pl.in index 335285e4a..352a01e58 100644 --- a/ldap/admin/src/scripts/db2bak.pl.in +++ b/ldap/admin/src/scripts/db2bak.pl.in @@ -105,7 +105,12 @@ if ($archivedir eq "") { } else { $symname = $archivedir; } - print("Back up directory: $archivedir\n"); + if ($symname eq "") { + print("Back up directory: $archivedir\n"); + } else { + print("Back up directory: $archivedir -> $mybakdir/$archivebase\n"); + } + # If an archive dir is specified, create it as a symlink pointing # to the default backup dir not to violate the selinux policy. $archivedir = "${mybakdir}/${archivebase}";
0
d214765932c9c701ea03e6663b03419b96426337
389ds/389-ds-base
Ticket 49650 - lib389 enable_tls doesn't work on F28 Bug Description: In the lib389 we have the method inst.enable_tls(). It creates certificates and sets up the server for TLS communication. It works on F27 built from master branch and doesn't work on F28. It happens because on F28 openssl fails to verify the certificate. Fix Description: We should create CA with an appropriate flag. It can be done by setting an X.509 V3 Certificate Type Extension in the certificate to 'certSigning' value. https://pagure.io/389-ds-base/issue/49650 Author: mhonek Review by: spichugi, wibrown (Thanks!)
commit d214765932c9c701ea03e6663b03419b96426337 Author: Matúš Honěk <[email protected]> Date: Wed Apr 25 18:50:11 2018 +0200 Ticket 49650 - lib389 enable_tls doesn't work on F28 Bug Description: In the lib389 we have the method inst.enable_tls(). It creates certificates and sets up the server for TLS communication. It works on F27 built from master branch and doesn't work on F28. It happens because on F28 openssl fails to verify the certificate. Fix Description: We should create CA with an appropriate flag. It can be done by setting an X.509 V3 Certificate Type Extension in the certificate to 'certSigning' value. https://pagure.io/389-ds-base/issue/49650 Author: mhonek Review by: spichugi, wibrown (Thanks!) diff --git a/src/lib389/lib389/nss_ssl.py b/src/lib389/lib389/nss_ssl.py index 7c78d4911..26c8f70ec 100644 --- a/src/lib389/lib389/nss_ssl.py +++ b/src/lib389/lib389/nss_ssl.py @@ -176,6 +176,8 @@ class NssSsl(object): 'CT,,', '-v', '%s' % VALID, + '--keyUsage', + 'certSigning', '-d', self._certdb, '-z',
0
404e278e52b54b3e427ee0e2ba59dfd661381896
389ds/389-ds-base
Issue 4615 - log message when psearch first exceeds max threads per conn Desciption: When a connection hits max threads per conn for the first time log a message in the error. This will help customers diagnosis misbehaving clients. Fixes: https://github.com/389ds/389-ds-base/issues/4615 Reviewed by: progier389(Thanks!)
commit 404e278e52b54b3e427ee0e2ba59dfd661381896 Author: Mark Reynolds <[email protected]> Date: Fri Feb 12 13:05:32 2021 -0500 Issue 4615 - log message when psearch first exceeds max threads per conn Desciption: When a connection hits max threads per conn for the first time log a message in the error. This will help customers diagnosis misbehaving clients. Fixes: https://github.com/389ds/389-ds-base/issues/4615 Reviewed by: progier389(Thanks!) diff --git a/dirsrvtests/tests/suites/psearch/psearch_test.py b/dirsrvtests/tests/suites/psearch/psearch_test.py index 585fe0f35..f1b1a9094 100644 --- a/dirsrvtests/tests/suites/psearch/psearch_test.py +++ b/dirsrvtests/tests/suites/psearch/psearch_test.py @@ -1,16 +1,17 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2020 Red Hat, Inc. +# Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # +import ldap +import os import pytest from lib389._constants import DEFAULT_SUFFIX from lib389.topologies import topology_st from lib389.idm.group import Groups -import ldap from ldap.controls.psearch import PersistentSearchControl,EntryChangeNotificationControl pytestmark = pytest.mark.tier1 diff --git a/ldap/servers/plugins/sync/sync.h b/ldap/servers/plugins/sync/sync.h index f52c9e648..1aaee0dd8 100644 --- a/ldap/servers/plugins/sync/sync.h +++ b/ldap/servers/plugins/sync/sync.h @@ -111,7 +111,7 @@ typedef struct OPERATION_PL_CTX OPERATION_PL_CTX_T * get_thread_primary_op(void); void set_thread_primary_op(OPERATION_PL_CTX_T *op); -const op_ext_ident_t * sync_persist_get_operation_extension(Slapi_PBlock *pb); +op_ext_ident_t * sync_persist_get_operation_extension(Slapi_PBlock *pb); void sync_persist_set_operation_extension(Slapi_PBlock *pb, op_ext_ident_t *op_ident); void sync_register_allow_openldap_compat(PRBool allow); diff --git a/ldap/servers/plugins/sync/sync_init.c b/ldap/servers/plugins/sync/sync_init.c index 332eb0ba2..752aaecab 100644 --- a/ldap/servers/plugins/sync/sync_init.c +++ b/ldap/servers/plugins/sync/sync_init.c @@ -278,7 +278,7 @@ set_thread_primary_op(OPERATION_PL_CTX_T *op) static int sync_persist_extension_type; /* initialized in sync_persist_register_operation_extension */ static int sync_persist_extension_handle; /* initialized in sync_persist_register_operation_extension */ -const op_ext_ident_t * +op_ext_ident_t * sync_persist_get_operation_extension(Slapi_PBlock *pb) { Slapi_Operation *op; @@ -289,7 +289,7 @@ sync_persist_get_operation_extension(Slapi_PBlock *pb) sync_persist_extension_handle); slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_get_operation_extension operation (op=0x%lx) -> %d\n", (ulong) op, ident ? ident->idx_pl : -1); - return (const op_ext_ident_t *) ident; + return (op_ext_ident_t *) ident; } @@ -328,4 +328,4 @@ sync_persist_register_operation_extension(void) sync_persist_operation_extension_destructor, &sync_persist_extension_type, &sync_persist_extension_handle); -} \ No newline at end of file +} diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c index 1883fe711..615aa3dbe 100644 --- a/ldap/servers/slapd/connection.c +++ b/ldap/servers/slapd/connection.c @@ -1,6 +1,6 @@ /** BEGIN COPYRIGHT BLOCK * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. - * Copyright (C) 2020 Red Hat, Inc. + * Copyright (C) 2021 Red Hat, Inc. * All rights reserved. * * License: GPL (version 3 or any later version). @@ -1734,6 +1734,13 @@ connection_threadmain() } else { /* keep count of how many times maxthreads has blocked an operation */ conn->c_maxthreadsblocked++; + if (conn->c_maxthreadsblocked == 1 && connection_has_psearch(conn)) { + slapi_log_err(SLAPI_LOG_NOTICE, "connection_threadmain", + "Connection (conn=%" PRIu64 ") has a running persistent search " + "that has exceeded the maximum allowed threads per connection. " + "New operations will be blocked.\n", + conn->c_connid); + } } pthread_mutex_unlock(&(conn->c_mutex)); } @@ -2313,15 +2320,7 @@ disconnect_server_nomutex_ext(Connection *conn, PRUint64 opconnid, int opid, PRE * ding all the persistent searches to get them * to notice that their operations have been abandoned. */ - int found_ps = 0; - Operation *o; - - for (o = conn->c_ops; !found_ps && o != NULL; o = o->o_next) { - if (o->o_flags & OP_FLAG_PS) { - found_ps = 1; - } - } - if (found_ps) { + if (connection_has_psearch(conn)) { if (NULL == ps_wakeup_all_fn) { if (get_entry_point(ENTRY_POINT_PS_WAKEUP_ALL, (caddr_t *)(&ps_wakeup_all_fn)) == 0) { @@ -2391,3 +2390,17 @@ connection_call_io_layer_callbacks(Connection *c) return rv; } + +int32_t +connection_has_psearch(Connection *c) +{ + Operation *o; + + for (o = c->c_ops; o != NULL; o = o->o_next) { + if (o->o_flags & OP_FLAG_PS) { + return 1; + } + } + + return 0; +} diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c index eddc0cd56..15d5be892 100644 --- a/ldap/servers/slapd/daemon.c +++ b/ldap/servers/slapd/daemon.c @@ -1,6 +1,6 @@ /** BEGIN COPYRIGHT BLOCK * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. - * Copyright (C) 2020 Red Hat, Inc. + * Copyright (C) 2021 Red Hat, Inc. * All rights reserved. * * License: GPL (version 3 or any later version). @@ -1459,6 +1459,13 @@ setup_pr_read_pds(Connection_Table *ct) } else { if (c->c_threadnumber >= c->c_max_threads_per_conn) { c->c_maxthreadsblocked++; + if (c->c_maxthreadsblocked == 1 && connection_has_psearch(c)) { + slapi_log_err(SLAPI_LOG_NOTICE, "connection_threadmain", + "Connection (conn=%" PRIu64 ") has a running persistent search " + "that has exceeded the maximum allowed threads per connection. " + "New operations will be blocked.\n", + c->c_connid); + } } c->c_fdi = SLAPD_INVALID_SOCKET_INDEX; } diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h index e7f354969..5e0228211 100644 --- a/ldap/servers/slapd/proto-slap.h +++ b/ldap/servers/slapd/proto-slap.h @@ -1489,6 +1489,7 @@ int connection_release_nolock_ext(Connection *conn, int release_only); int connection_is_free(Connection *conn, int user_lock); int connection_is_active_nolock(Connection *conn); ber_slen_t openldap_read_function(Sockbuf_IO_Desc *sbiod, void *buf, ber_len_t len); +int32_t connection_has_psearch(Connection *c); /* * saslbind.c
0
d48891ad98082bce4358fa0b143b85ec6ff06ac6
389ds/389-ds-base
Resolves: bug 252263 Bug Description: enabling chain-on-update causes replica to act as a master Reviewed by: nkinder (Thanks!) Fix Description: This fix is only minor - we must use sym_load() to look up plugins, including the entry distribution plugin we use for chain on update. But I don't believe this was causing the problem. Chain on update does not work if you BIND as directory manager. You must bind as a regular user. It may be difficult to change this. We need to do more testing to see if, in general, proxy BIND and operations work with directory manager. The chaining backend cannot use directory manager as the proxy user. Platforms tested: RHEL5 Flag Day: no Doc impact: Yes, we need to make sure we document exactly how chain on update is to be used.
commit d48891ad98082bce4358fa0b143b85ec6ff06ac6 Author: Rich Megginson <[email protected]> Date: Thu Aug 23 20:52:13 2007 +0000 Resolves: bug 252263 Bug Description: enabling chain-on-update causes replica to act as a master Reviewed by: nkinder (Thanks!) Fix Description: This fix is only minor - we must use sym_load() to look up plugins, including the entry distribution plugin we use for chain on update. But I don't believe this was causing the problem. Chain on update does not work if you BIND as directory manager. You must bind as a regular user. It may be difficult to change this. We need to do more testing to see if, in general, proxy BIND and operations work with directory manager. The chaining backend cannot use directory manager as the proxy user. Platforms tested: RHEL5 Flag Day: no Doc impact: Yes, we need to make sure we document exactly how chain on update is to be used. diff --git a/ldap/servers/slapd/mapping_tree.c b/ldap/servers/slapd/mapping_tree.c index 3947dd600..f2d4abddf 100644 --- a/ldap/servers/slapd/mapping_tree.c +++ b/ldap/servers/slapd/mapping_tree.c @@ -792,18 +792,8 @@ mapping_tree_entry_add(Slapi_Entry *entry, mapping_tree_node **newnodep ) if (plugin_lib && plugin_funct) { - PRLibrary *lib = PR_LoadLibrary(plugin_lib); - if (lib) - { - plugin = (mtn_distrib_fct) PR_FindSymbol(lib, plugin_funct); - } - else - { - LDAPDebug(LDAP_DEBUG_ANY, "ERROR: can't load plugin lib %s. " - SLAPI_COMPONENT_NAME_NSPR " %d (%s)\n", - plugin_lib, PR_GetError(), slapd_pr_strerror(PR_GetError())); - } - + plugin = (mtn_distrib_fct)sym_load(plugin_lib, plugin_funct, + "Entry Distribution", 1); if (plugin == NULL) { LDAPDebug(LDAP_DEBUG_ANY, @@ -1315,9 +1305,7 @@ int mapping_tree_entry_modify_callback(Slapi_PBlock *pb, Slapi_Entry* entryBefor { if (plugin_lib && plugin_fct) { - PRLibrary *lib = PR_LoadLibrary(plugin_lib); - if (lib) - plugin = (mtn_distrib_fct) PR_FindSymbol(lib, plugin_fct); + plugin = (mtn_distrib_fct) sym_load(plugin_lib, plugin_fct, "Entry Distribution", 1); if (plugin == NULL) {
0
d2dfda95c543f106443f898436151b00c68e4270
389ds/389-ds-base
Ticket 47969 - COS memory leak when rebuilding the cache Bug Description: When the COS cache is released, not all of the schema objectclasses are freed. So every time we rebuild the COS cache we leak memory. Fix Description: After we free the schema attributes, the very first attribute still needs to be freed. It is not freed initially because of the duplicate checking logic, so it is now done after the loop. https://fedorahosted.org/389/ticket/47969 Reviewed by: nhosoi(Thanks!)
commit d2dfda95c543f106443f898436151b00c68e4270 Author: Mark Reynolds <[email protected]> Date: Tue Dec 2 13:38:06 2014 -0500 Ticket 47969 - COS memory leak when rebuilding the cache Bug Description: When the COS cache is released, not all of the schema objectclasses are freed. So every time we rebuild the COS cache we leak memory. Fix Description: After we free the schema attributes, the very first attribute still needs to be freed. It is not freed initially because of the duplicate checking logic, so it is now done after the loop. https://fedorahosted.org/389/ticket/47969 Reviewed by: nhosoi(Thanks!) diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c index a7993c810..fd163f9e2 100644 --- a/ldap/servers/plugins/cos/cos_cache.c +++ b/ldap/servers/plugins/cos/cos_cache.c @@ -1864,6 +1864,8 @@ static void cos_cache_del_schema(cosCache *pCache) } } } + /* Finally, remove the first attribute's objectclass list */ + cos_cache_del_attrval_list(&(pCache->ppAttrIndex[0]->pObjectclasses)); LDAPDebug( LDAP_DEBUG_TRACE, "<-- cos_cache_del_schema\n",0,0,0); }
0
162a56a1258fcd5f02c81d799db59488132b6352
389ds/389-ds-base
Issue 5487 - Fix various issues with logconv.pl Description: Latency stats were inconsistent depending if "-V" was used. This was due to a missing usage check and uninitialized latency array Fixed CSV format relates: https://github.com/389ds/389-ds-base/issues/5487 Reviewed by: spichugi(Thanks!)
commit 162a56a1258fcd5f02c81d799db59488132b6352 Author: Mark Reynolds <[email protected]> Date: Thu Oct 13 09:24:36 2022 -0400 Issue 5487 - Fix various issues with logconv.pl Description: Latency stats were inconsistent depending if "-V" was used. This was due to a missing usage check and uninitialized latency array Fixed CSV format relates: https://github.com/389ds/389-ds-base/issues/5487 Reviewed by: spichugi(Thanks!) diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl index 301be80f1..59ed66256 100755 --- a/ldap/admin/src/logconv.pl +++ b/ldap/admin/src/logconv.pl @@ -2,7 +2,7 @@ # # BEGIN COPYRIGHT BLOCK # Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -# Copyright (C) 2021 Red Hat, Inc. +# Copyright (C) 2022 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). @@ -63,6 +63,13 @@ my $xi = 0; my $bindReportDN; my $usage = ""; my @latency; +$latency[0] = 0; +$latency[1] = 0; +$latency[2] = 0; +$latency[3] = 0; +$latency[4] = 0; +$latency[5] = 0; +$latency[6] = 0; # key is conn number - val is IP address my %openConnection; my @errorCode; @@ -1701,11 +1708,11 @@ if ($usage =~ /j/i || $verb eq "yes"){ print "\n----- Recommendations -----\n"; my $recCount = "1"; if ($unindexedSrchCountNotesA > 0){ - print "\n $recCount. You have unindexed searches, this can be caused from a search on an unindexed attribute, or your returned results exceeded the allidsthreshold. Unindexed searches are not recommended. To refuse unindexed searches, switch \'nsslapd-require-index\' to \'on\' under your database entry (e.g. cn=UserRoot,cn=ldbm database,cn=plugins,cn=config).\n"; + print "\n $recCount. You have unindexed searches, this can be caused from a search on an unindexed attribute, or your returned results exceeded the nsslapd-idlistscanlimit. Unindexed searches are very resource intensive and should be prevented or corrected. To refuse unindexed searches, switch \'nsslapd-require-index\' to \'on\' under your database entry (e.g. cn=UserRoot,cn=ldbm database,cn=plugins,cn=config).\n"; $recCount++; } if ($unindexedSrchCountNotesU > 0){ - print "\n $recCount. You have unindexed components, this can be caused from a search on an unindexed attribute, or your returned results exceeded the allidsthreshold. Unindexed components are not recommended. To refuse unindexed searches, switch \'nsslapd-require-index\' to \'on\' under your database entry (e.g. cn=UserRoot,cn=ldbm database,cn=plugins,cn=config).\n"; + print "\n $recCount. You have unindexed components, this can be caused from a search on an unindexed attribute, or your returned results exceeded the nsslapd-idlistscanlimit. Unindexed components are not recommended. To refuse unindexed searches, switch \'nsslapd-require-index\' to \'on\' under your database entry (e.g. cn=UserRoot,cn=ldbm database,cn=plugins,cn=config).\n"; $recCount++; } if (defined($conncount->{"T1"}) and $conncount->{"T1"} > 0){ @@ -1742,8 +1749,8 @@ if ($usage =~ /j/i || $verb eq "yes"){ print "\n $recCount. Your average etime is $etimeAvg, you may want to investigate this performance problem.\n"; $recCount++; } - if (sprintf("%.2f", $wtimeAvg) > 0){ - print "\n $recCount. Your average wtime is $wtimeAvg, you may want to investigate this performance problem.\n"; + if (sprintf("%.1f", $wtimeAvg) > 0.5){ + print "\n $recCount. Your average wtime is $wtimeAvg, you may need to increase the number of worker threads (nsslapd-threadnumber).\n"; $recCount++; } if (sprintf("%.1f", $optimeAvg) > 0){ @@ -2228,7 +2235,7 @@ sub parseLineNormal if ($simConnection > $maxsimConnection) { $maxsimConnection = $simConnection; } - if ($verb eq "yes" || $usage =~ /p/ || $reportStats){ + if ($verb eq "yes" || $usage =~ /p/ || $usage =~ /y/ || $reportStats){ ($connID) = $_ =~ /conn=(\d*)\s/; $openConnection{$connID} = $ip; if ($reportStats or ($verb eq "yes") || ($usage =~ /y/)) { @@ -2845,7 +2852,6 @@ print_stats_block $stats->{'unbind'}, $stats->{'notesA'}, $stats->{'notesU'}, - $stats->{'notesF'}, $stats->{'etime'}), "\n" ); } else {
0
09f55c6069bc6a708edd4bd5fe60d8906377b1cd
389ds/389-ds-base
Bug(s) fixed: 179723 Bug Description: crash after succesful pwdchange via ldappasswd Reviewed by: Pete, Nathan (Thanks!) Fix Description: The passwd_extop code does an internal operation to change the password. Some of this code is only intended to be called for external operations where you have a conn structure. The one place in particular which caused this bug is in update_pw_info, where it is only triggered if you must change the password or password expiration is in effect. The fix is to just check to see if the pb_conn is not null. Platforms tested: Fedora Core 4 Flag Day: no Doc impact: no
commit 09f55c6069bc6a708edd4bd5fe60d8906377b1cd Author: Rich Megginson <[email protected]> Date: Mon Mar 6 20:02:06 2006 +0000 Bug(s) fixed: 179723 Bug Description: crash after succesful pwdchange via ldappasswd Reviewed by: Pete, Nathan (Thanks!) Fix Description: The passwd_extop code does an internal operation to change the password. Some of this code is only intended to be called for external operations where you have a conn structure. The one place in particular which caused this bug is in update_pw_info, where it is only triggered if you must change the password or password expiration is in effect. The fix is to just check to see if the pb_conn is not null. Platforms tested: Fedora Core 4 Flag Day: no Doc impact: no diff --git a/ldap/servers/slapd/passwd_extop.c b/ldap/servers/slapd/passwd_extop.c index f7dae57c0..2ee477a0a 100644 --- a/ldap/servers/slapd/passwd_extop.c +++ b/ldap/servers/slapd/passwd_extop.c @@ -135,7 +135,6 @@ passwd_modify_getEntry( const char *dn, Slapi_Entry **e2 ) { static int passwd_apply_mods(const char *dn, Slapi_Mods *mods) { Slapi_PBlock pb; - Slapi_Operation *operation= NULL; int ret=0; LDAPDebug( LDAP_DEBUG_TRACE, "=> passwd_apply_mods\n", 0, 0, 0 ); @@ -150,9 +149,6 @@ static int passwd_apply_mods(const char *dn, Slapi_Mods *mods) pw_get_componentID(), /* PluginID */ 0); /* Flags */ - /* Plugin operations are INTERNAL by default, bypass it to enforce ACL checks */ - slapi_pblock_get (&pb, SLAPI_OPERATION, &operation); - ret =slapi_modify_internal_pb (&pb); slapi_pblock_get(&pb, SLAPI_PLUGIN_INTOP_RESULT, &ret); diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c index c7d6896d2..7665a2617 100644 --- a/ldap/servers/slapd/pw.c +++ b/ldap/servers/slapd/pw.c @@ -647,8 +647,10 @@ update_pw_info ( Slapi_PBlock *pb , char *old_pw) { pw_apply_mods(dn, &smods); slapi_mods_done(&smods); - /* reset c_needpw to 0 */ - pb->pb_conn->c_needpw = 0; + if (pb->pb_conn) { /* no conn for internal op */ + /* reset c_needpw to 0 */ + pb->pb_conn->c_needpw = 0; + } return 0; }
0
f67e638bada2b2377081a19dc2546622eaa6b055
389ds/389-ds-base
Ticket 47368 - fix memory leaks Fixed some potential memory leaks when adding/deleting repl agreements. https://fedorahosted.org/389/ticket/47368 Reviewed by: richm(Thanks!)
commit f67e638bada2b2377081a19dc2546622eaa6b055 Author: Mark Reynolds <[email protected]> Date: Thu Dec 5 12:28:23 2013 -0500 Ticket 47368 - fix memory leaks Fixed some potential memory leaks when adding/deleting repl agreements. https://fedorahosted.org/389/ticket/47368 Reviewed by: richm(Thanks!) diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c index 55a74c876..a1f5a8b0b 100644 --- a/ldap/servers/plugins/replication/repl5_agmt.c +++ b/ldap/servers/plugins/replication/repl5_agmt.c @@ -599,6 +599,8 @@ agmt_delete(void **rap) repl_session_plugin_call_destroy_agmt_cb(ra); } + slapi_sdn_free((Slapi_DN **)&ra->dn); + slapi_rdn_free((Slapi_RDN **)&ra->rdn); slapi_ch_free_string(&ra->hostname); slapi_ch_free_string(&ra->binddn); slapi_ch_array_free(ra->frac_attrs); @@ -2812,7 +2814,8 @@ add_agmt_maxcsns(Slapi_Entry *e, Replica *r) } /* - * Create a smod of all the agmt maxcsns to add to the tombstone entry + * Create a smod of all the agmt maxcsns to add to the tombstone entry. + * Regardless if there is an error, smod always needs to be freed by the caller. */ int agmt_maxcsn_to_smod (Replica *r, Slapi_Mod *smod) @@ -2821,13 +2824,14 @@ agmt_maxcsn_to_smod (Replica *r, Slapi_Mod *smod) Repl_Agmt *agmt; int rc = 1; + slapi_mod_init (smod, replica_get_agmt_count(r) + 1); + slapi_mod_set_type (smod, type_agmtMaxCSN); + slapi_mod_set_operation (smod, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES); + agmt_obj = agmtlist_get_first_agreement_for_replica (r); if(agmt_obj == NULL){ /* no agreements */ return rc; } - slapi_mod_init (smod, replica_get_agmt_count(r) + 1); - slapi_mod_set_type (smod, type_agmtMaxCSN); - slapi_mod_set_operation (smod, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES); while (agmt_obj){ struct berval val; @@ -2929,6 +2933,7 @@ agmt_set_maxcsn(Repl_Agmt *ra) slapi_rdn_get_value_by_ref(slapi_rdn_get_rdn(ra->rdn)), ra->hostname, ra->port); if(strstr(maxcsns[i], buf) || strstr(maxcsns[i], unavail_buf)){ + slapi_ch_free_string(&ra->maxcsn); ra->maxcsn = slapi_ch_strdup(maxcsns[i]); ra->consumerRID = agmt_maxcsn_get_rid(maxcsns[i]); ra->tmpConsumerRID = 1; @@ -2993,7 +2998,6 @@ agmt_remove_maxcsn(Repl_Agmt *ra) slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "agmt_set_maxcsn: Out of memory\n"); goto done; } - ra->maxcsn = NULL; repl_obj = prot_get_replica_object(ra->protocol); if(repl_obj){ @@ -3003,6 +3007,7 @@ agmt_remove_maxcsn(Repl_Agmt *ra) slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "agmt_set_maxcsn: Failed to get repl object.\n"); goto done; } + slapi_ch_free_string(&ra->maxcsn); attrs[0] = (char*)type_agmtMaxCSN; attrs[1] = NULL; diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c index 26935ca34..fc277fffe 100644 --- a/ldap/servers/plugins/replication/repl5_replica.c +++ b/ldap/servers/plugins/replication/repl5_replica.c @@ -2584,7 +2584,6 @@ replica_write_ruv (Replica *r) Slapi_Mod smod_last_modified; LDAPMod *mods [4]; Slapi_PBlock *pb; - int free_rmod = 0; PR_ASSERT(r); @@ -2607,7 +2606,6 @@ replica_write_ruv (Replica *r) mods [1] = (LDAPMod *)slapi_mod_get_ldapmod_byref(&smod_last_modified); if(agmt_maxcsn_to_smod(r,&rmod) == LDAP_SUCCESS){ mods [2] = (LDAPMod *)slapi_mod_get_ldapmod_byref(&rmod); - free_rmod = 1; } else { mods [2] = NULL; } @@ -2654,7 +2652,7 @@ replica_write_ruv (Replica *r) PR_Unlock(r->repl_lock); slapi_mod_done (&smod); - if(free_rmod) slapi_mod_done (&rmod); + slapi_mod_done (&rmod); slapi_mod_done (&smod_last_modified); slapi_pblock_destroy (pb);
0
2bfbe594a2c5593fd1ae695c118be94939651264
389ds/389-ds-base
Bug 614242 - C99/ANSI C++ related compile errors on HP-UX https://bugzilla.redhat.com/show_bug.cgi?id=614242 Resolves: bug 614242 Bug Description: C99/ANSI C++ related compile errors on HP-UX Reviewed by: rmeggins Branch: HEAD Fix Description: In lib/ldaputil/vtable.c, a global static struct is declared and initialized to {}. I see gcc allows this, and aCC does also but only with -AA ("newly supported ANSI C++ Standard features"). In ldap/servers/slapd/tools/ldclt/ldclt.c, an array size is set using a variable, which I think is legal in C99. Since the size is not computed, and this is the only place C99 is taken advantage of, this can be changed to a then I would instead add the -AC99 compiler flag for HP-UX. Platforms tested: HP-UX Flag Day: no Doc impact: no Patch submitted by: Ulf Weltman <[email protected]>
commit 2bfbe594a2c5593fd1ae695c118be94939651264 Author: Rich Megginson <[email protected]> Date: Wed Jul 14 15:19:01 2010 -0600 Bug 614242 - C99/ANSI C++ related compile errors on HP-UX https://bugzilla.redhat.com/show_bug.cgi?id=614242 Resolves: bug 614242 Bug Description: C99/ANSI C++ related compile errors on HP-UX Reviewed by: rmeggins Branch: HEAD Fix Description: In lib/ldaputil/vtable.c, a global static struct is declared and initialized to {}. I see gcc allows this, and aCC does also but only with -AA ("newly supported ANSI C++ Standard features"). In ldap/servers/slapd/tools/ldclt/ldclt.c, an array size is set using a variable, which I think is legal in C99. Since the size is not computed, and this is the only place C99 is taken advantage of, this can be changed to a then I would instead add the -AC99 compiler flag for HP-UX. Platforms tested: HP-UX Flag Day: no Doc impact: no Patch submitted by: Ulf Weltman <[email protected]> diff --git a/ldap/servers/slapd/tools/ldclt/ldclt.c b/ldap/servers/slapd/tools/ldclt/ldclt.c index 53115ad68..cabdea84c 100644 --- a/ldap/servers/slapd/tools/ldclt/ldclt.c +++ b/ldap/servers/slapd/tools/ldclt/ldclt.c @@ -1259,6 +1259,7 @@ parseFilter ( RETURN : -1 if error, 0 else. DESCRIPTION : *****************************************************************************/ +#define BUFFERSIZE 1024 /* buffer size for buffer */ int basicInit (void) { @@ -1270,8 +1271,7 @@ basicInit (void) int oflags;/* open() flags */ /*JLS 05-04-01*/ struct stat file_st ; /* file status checker for attreplacefile option */ FILE *attrF; /* file pointer for attreplacefile option */ - int buffersize=1024; /* buffer size for buffer */ - char buffer[buffersize]; /* buffer used to read attreplacefile content */ + char buffer[BUFFERSIZE]; /* buffer used to read attreplacefile content */ /* * Misc inits @@ -1560,17 +1560,17 @@ basicInit (void) /* start to read file content */ mctx.attrplFileContent = (char *)malloc(mctx.attrplFileSize + 1); i=0; - while ( fread(buffer, buffersize , 1, attrF) ) + while ( fread(buffer, BUFFERSIZE , 1, attrF) ) { - memcpy(mctx.attrplFileContent+i, buffer , buffersize ); - memset(buffer ,'\0', buffersize ); - i = i + buffersize; + memcpy(mctx.attrplFileContent+i, buffer , BUFFERSIZE ); + memset(buffer ,'\0', BUFFERSIZE ); + i = i + BUFFERSIZE; } /* copy remainding content into mctx.attrplFileContent */ if (i<mctx.attrplFileSize) { memcpy(mctx.attrplFileContent+i, buffer , (mctx.attrplFileSize - 1 - i)); - memset(buffer ,'\0', buffersize ); /* clear the buffer */ + memset(buffer ,'\0', BUFFERSIZE ); /* clear the buffer */ } mctx.attrplFileContent[mctx.attrplFileSize]='\0'; // append the close bit diff --git a/lib/ldaputil/vtable.c b/lib/ldaputil/vtable.c index 18ac0a4d8..7886006f7 100644 --- a/lib/ldaputil/vtable.c +++ b/lib/ldaputil/vtable.c @@ -43,7 +43,7 @@ #include "ldaputili.h" #include <ldap.h> -static LDAPUVTable_t ldapu_VTable = {}; +static LDAPUVTable_t ldapu_VTable = {0}; /* Replace ldapu_VTable. Subsequently, ldaputil will call the functions in 'from' (not the LDAP API) to access the directory.
0
bf9ef718cfd48c26eaf11662f522451d866e7681
389ds/389-ds-base
Ticket #48190 - idm/ipa 389-ds-base entry cache converges to 500 KB in dblayer_is_cachesize_sane Description: This issue was introduced by the fix for Ticket 47499 commit 1e035d1111f6abcb87e760a2b9e41fa9e05a7ebd. The function dblayer_is_cachesize_sane was originally implemented for db cache to check if the given db cache size is larger than the available memory or not. The function resets the size to the available memory size if it is larger. Also, considering the extra metadata size needed for the db cache, it multiplies by 0.8 every time it starts the server. It is not needed even for the db cache. The code is old and we don't have to save the memory there. Thus, this patch removes the resetting code. https://fedorahosted.org/389/ticket/48190 Reviewed by [email protected] (Thank you, Mark!!)
commit bf9ef718cfd48c26eaf11662f522451d866e7681 Author: Noriko Hosoi <[email protected]> Date: Sun May 31 17:08:29 2015 -0700 Ticket #48190 - idm/ipa 389-ds-base entry cache converges to 500 KB in dblayer_is_cachesize_sane Description: This issue was introduced by the fix for Ticket 47499 commit 1e035d1111f6abcb87e760a2b9e41fa9e05a7ebd. The function dblayer_is_cachesize_sane was originally implemented for db cache to check if the given db cache size is larger than the available memory or not. The function resets the size to the available memory size if it is larger. Also, considering the extra metadata size needed for the db cache, it multiplies by 0.8 every time it starts the server. It is not needed even for the db cache. The code is old and we don't have to save the memory there. Thus, this patch removes the resetting code. https://fedorahosted.org/389/ticket/48190 Reviewed by [email protected] (Thank you, Mark!!) diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c index 21a560e48..ac315bb32 100644 --- a/ldap/servers/slapd/back-ldbm/dblayer.c +++ b/ldap/servers/slapd/back-ldbm/dblayer.c @@ -1116,13 +1116,6 @@ int dblayer_is_cachesize_sane(size_t *cachesize) if (!issane) { *cachesize = (size_t)((pages - procpages) * pagesize); } - /* We now compensate for DB's own compensation for metadata size - * They increase the actual cache size by 25%, but only for sizes - * less than 500Meg. - */ - if (*cachesize < 500*MEGABYTE) { - *cachesize = (size_t)((double)*cachesize * (double)0.8); - } return issane; } diff --git a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c index 9b93f9a27..f75ca9759 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c @@ -121,11 +121,12 @@ ldbm_instance_config_cachememsize_set(void *arg, void *value, char *errorbuf, in ldbm_instance *inst = (ldbm_instance *) arg; int retval = LDAP_SUCCESS; size_t val = (size_t) value; + size_t chkval = val; /* Do whatever we can to make sure the data is ok. */ if (apply) { - if (!dblayer_is_cachesize_sane(&val)){ + if (!dblayer_is_cachesize_sane(&chkval)){ PR_snprintf(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: cachememsize value is too large."); LDAPDebug( LDAP_DEBUG_ANY,"Error: cachememsize value is too large.\n", @@ -152,11 +153,12 @@ ldbm_instance_config_dncachememsize_set(void *arg, void *value, char *errorbuf, ldbm_instance *inst = (ldbm_instance *) arg; int retval = LDAP_SUCCESS; size_t val = (size_t)value; + size_t chkval = val; /* Do whatever we can to make sure the data is ok. */ if (apply) { - if (!dblayer_is_cachesize_sane(&val)){ + if (!dblayer_is_cachesize_sane(&chkval)){ PR_snprintf(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: dncachememsize value is too large."); LDAPDebug( LDAP_DEBUG_ANY,"Error: dncachememsize value is too large.\n",
0
c3b4c0c2ab30fd219e35f1ab4d8a05dd065f685c
389ds/389-ds-base
Ticket #48294 - CI test: added test cases for ticket 48294 Description: Linked Attributes plug-in - won't update links after MODRDN operation https://fedorahosted.org/389/ticket/48294
commit c3b4c0c2ab30fd219e35f1ab4d8a05dd065f685c Author: Noriko Hosoi <[email protected]> Date: Fri Dec 18 17:13:33 2015 -0800 Ticket #48294 - CI test: added test cases for ticket 48294 Description: Linked Attributes plug-in - won't update links after MODRDN operation https://fedorahosted.org/389/ticket/48294 diff --git a/dirsrvtests/tickets/ticket48294_test.py b/dirsrvtests/tickets/ticket48294_test.py new file mode 100644 index 000000000..109a67efe --- /dev/null +++ b/dirsrvtests/tickets/ticket48294_test.py @@ -0,0 +1,290 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +import shutil +from lib389 import DirSrv, Entry, tools +from lib389 import DirSrvTools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +log = logging.getLogger(__name__) + +installation_prefix = None + +LINKEDATTR_PLUGIN = 'cn=Linked Attributes,cn=plugins,cn=config' +MANAGER_LINK = 'cn=Manager Link,' + LINKEDATTR_PLUGIN +OU_PEOPLE = 'ou=People,' + DEFAULT_SUFFIX +LINKTYPE = 'directReport' +MANAGEDTYPE = 'manager' + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + [email protected](scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def _header(topology, label): + topology.standalone.log.info("###############################################") + topology.standalone.log.info("####### %s" % label) + topology.standalone.log.info("###############################################") + +def check_attr_val(topology, dn, attr, expected): + try: + centry = topology.standalone.search_s(dn, ldap.SCOPE_BASE, 'uid=*') + if centry: + val = centry[0].getValue(attr) + if val.lower() == expected.lower(): + log.info('Value of %s is %s' % (attr, expected)) + else: + log.info('Value of %s is not %s, but %s' % (attr, expected, val)) + assert False + else: + log.fatal('Failed to get %s' % dn) + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search ' + dn + ': ' + e.message['desc']) + assert False + + +def _modrdn_entry(topology=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): + assert topology is not None + assert entry_dn is not None + assert new_rdn is not None + + topology.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn) + try: + if new_superior: + topology.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) + else: + topology.standalone.rename_s(entry_dn, new_rdn, delold=del_old) + except ldap.NO_SUCH_ATTRIBUTE: + topology.standalone.log.info("accepted failure due to 47833: modrdn reports error.. but succeeds") + attempt = 0 + if new_superior: + dn = "%s,%s" % (new_rdn, new_superior) + base = new_superior + else: + base = ','.join(entry_dn.split(",")[1:]) + dn = "%s, %s" % (new_rdn, base) + myfilter = entry_dn.split(',')[0] + + while attempt < 10: + try: + ent = topology.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) + break + except ldap.NO_SUCH_OBJECT: + topology.standalone.log.info("Accept failure due to 47833: unable to find (base) a modrdn entry") + attempt += 1 + time.sleep(1) + if attempt == 10: + ent = topology.standalone.getEntry(base, ldap.SCOPE_SUBTREE, myfilter) + ent = topology.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) + + +def _48294_init(topology): + """ + Set up Linked Attribute + """ + _header(topology, 'Testing Ticket 48294 - Linked Attributes plug-in - won\'t update links after MODRDN operation') + + log.info('Enable Dynamic plugins, and the linked Attrs plugin') + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) + except ldap.LDAPError as e: + ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc']) + assert False + + try: + topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) + except ValueError as e: + ldap.fatal('Failed to enable linked attributes plugin!' + e.message['desc']) + assert False + + log.info('Add the plugin config entry') + try: + topology.standalone.add_s(Entry((MANAGER_LINK, { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'Manager Link', + 'linkType': LINKTYPE, + 'managedType': MANAGEDTYPE + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add linked attr config entry: error ' + e.message['desc']) + assert False + + log.info('Add 2 entries: manager1 and employee1') + try: + topology.standalone.add_s(Entry(('uid=manager1,%s' % OU_PEOPLE, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'manager1'}))) + except ldap.LDAPError as e: + log.fatal('Add manager1 failed: error ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry(('uid=employee1,%s' % OU_PEOPLE, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'employee1'}))) + except ldap.LDAPError as e: + log.fatal('Add employee1 failed: error ' + e.message['desc']) + assert False + + log.info('Add linktype to manager1') + topology.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, + [(ldap.MOD_ADD, LINKTYPE, 'uid=employee1,%s' % OU_PEOPLE)]) + + log.info('Check managed attribute') + check_attr_val(topology, 'uid=employee1,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager1,%s' % OU_PEOPLE) + + log.info('PASSED') + + +def _48294_run_0(topology): + """ + Rename employee1 to employee2 and adjust the value of directReport by replace + """ + _header(topology, 'Case 0 - Rename employee1 and adjust the link type value by replace') + + log.info('Rename employee1 to employee2') + _modrdn_entry(topology, entry_dn='uid=employee1,%s' % OU_PEOPLE, new_rdn='uid=employee2') + + log.info('Modify the value of directReport to uid=employee2') + try: + topology.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, + [(ldap.MOD_REPLACE, LINKTYPE, 'uid=employee2,%s' % OU_PEOPLE)]) + except ldap.LDAPError as e: + log.fatal('Failed to replace uid=employee1 with employee2: ' + e.message['desc']) + assert False + + log.info('Check managed attribute') + check_attr_val(topology, 'uid=employee2,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager1,%s' % OU_PEOPLE) + + log.info('PASSED') + + +def _48294_run_1(topology): + """ + Rename employee2 to employee3 and adjust the value of directReport by delete and add + """ + _header(topology, 'Case 1 - Rename employee2 and adjust the link type value by delete and add') + + log.info('Rename employee2 to employee3') + _modrdn_entry(topology, entry_dn='uid=employee2,%s' % OU_PEOPLE, new_rdn='uid=employee3') + + log.info('Modify the value of directReport to uid=employee3') + try: + topology.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, + [(ldap.MOD_DELETE, LINKTYPE, 'uid=employee2,%s' % OU_PEOPLE)]) + except ldap.LDAPError as e: + log.fatal('Failed to delete employee2: ' + e.message['desc']) + assert False + + try: + topology.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, + [(ldap.MOD_ADD, LINKTYPE, 'uid=employee3,%s' % OU_PEOPLE)]) + except ldap.LDAPError as e: + log.fatal('Failed to add employee3: ' + e.message['desc']) + assert False + + log.info('Check managed attribute') + check_attr_val(topology, 'uid=employee3,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager1,%s' % OU_PEOPLE) + + log.info('PASSED') + + +def _48294_run_2(topology): + """ + Rename manager1 to manager2 and make sure the managed attribute value is updated + """ + _header(topology, 'Case 2 - Rename manager1 to manager2 and make sure the managed attribute value is updated') + + log.info('Rename manager1 to manager2') + _modrdn_entry(topology, entry_dn='uid=manager1,%s' % OU_PEOPLE, new_rdn='uid=manager2') + + log.info('Check managed attribute') + check_attr_val(topology, 'uid=employee3,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager2,%s' % OU_PEOPLE) + + log.info('PASSED') + + +def _48294_final(topology): + topology.standalone.delete() + log.info('All PASSED') + + +def test_ticket48294(topology): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + _48294_init(topology) + + _48294_run_0(topology) + _48294_run_1(topology) + _48294_run_2(topology) + + _48294_final(topology) + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE)
0
22e54fac8d7ee3995b3a27a9109cc4a7b36dfa61
389ds/389-ds-base
Bump version to 1.4.0.1
commit 22e54fac8d7ee3995b3a27a9109cc4a7b36dfa61 Author: Mark Reynolds <[email protected]> Date: Mon Oct 9 10:10:40 2017 -0400 Bump version to 1.4.0.1 diff --git a/VERSION.sh b/VERSION.sh index 32357f3fd..66e1016a5 100644 --- a/VERSION.sh +++ b/VERSION.sh @@ -10,7 +10,7 @@ vendor="389 Project" # PACKAGE_VERSION is constructed from these VERSION_MAJOR=1 VERSION_MINOR=4 -VERSION_MAINT=0.0 +VERSION_MAINT=0.1 # NOTE: VERSION_PREREL is automatically set for builds made out of a git tree VERSION_PREREL= VERSION_DATE=$(date -u +%Y%m%d)
0
da14d3c8a14b015d17f8b8bbcb16f9aa7d6ccd86
389ds/389-ds-base
Bug 676053 - export task followed by import task causes cache assertion https://bugzilla.redhat.com/show_bug.cgi?id=676053 Description: There were 3 places where an entry was not released by CACHE_RETURN (== not decrementing refcnt). If an entry has positive refcnt in the entry cache, it won't be freed even if the entry never be accessed again. 1. When a search request with VLV and/or SORT control failed. 2. When comparing entries in compare_entries_sv (sort.c), and the second entry was not found, the first entry was not released. 3. vlv_trim_candidates_byvalue (vlv.c) retrieves entries for performing binary search over the candidate list and put them into the cache. They were not released. This patch adds CACHE_RETURN call for the above cases.
commit da14d3c8a14b015d17f8b8bbcb16f9aa7d6ccd86 Author: Noriko Hosoi <[email protected]> Date: Wed Feb 16 17:30:51 2011 -0800 Bug 676053 - export task followed by import task causes cache assertion https://bugzilla.redhat.com/show_bug.cgi?id=676053 Description: There were 3 places where an entry was not released by CACHE_RETURN (== not decrementing refcnt). If an entry has positive refcnt in the entry cache, it won't be freed even if the entry never be accessed again. 1. When a search request with VLV and/or SORT control failed. 2. When comparing entries in compare_entries_sv (sort.c), and the second entry was not found, the first entry was not released. 3. vlv_trim_candidates_byvalue (vlv.c) retrieves entries for performing binary search over the candidate list and put them into the cache. They were not released. This patch adds CACHE_RETURN call for the above cases. diff --git a/ldap/servers/slapd/back-ldbm/cache.c b/ldap/servers/slapd/back-ldbm/cache.c index 8e39c5136..f21cd8732 100644 --- a/ldap/servers/slapd/back-ldbm/cache.c +++ b/ldap/servers/slapd/back-ldbm/cache.c @@ -286,7 +286,6 @@ dump_hash(Hashtable *ht) char *p; int ids_size = 80; - LDAPDebug0Args(LDAP_DEBUG_ANY, "entry cache:\n"); p = ep_ids; for (i = 0; i < ht->size; i++) { int len; @@ -301,8 +300,8 @@ dump_hash(Hashtable *ht) LDAPDebug1Arg(LDAP_DEBUG_ANY, "%s\n", ep_ids); p = ep_ids; ids_size = 80; } - PR_snprintf(p, ids_size, "%s", ep_id); - p += len; ids_size -= len + 1; + PR_snprintf(p, ids_size, "%s:", ep_id); + p += len + 1; ids_size -= len + 1; } while (e = HASH_NEXT(ht, e)); } if (p != ep_ids) { @@ -614,6 +613,7 @@ static void entrycache_clear_int(struct cache *cache) "entrycache_clear_int: there are still %ld entries " "in the entry cache.\n", cache->c_curentries); #ifdef LDAP_CACHE_DEBUG + LDAPDebug0Args(LDAP_DEBUG_ANY, "ID(s) in entry cache:\n"); dump_hash(cache->c_idtable); #endif } diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c index 5565578b6..88af80f34 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_search.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c @@ -106,9 +106,24 @@ berval_done(struct berval *val) /* * We call this function as we exit ldbm_back_search */ -int ldbm_back_search_cleanup(Slapi_PBlock *pb, struct ldbminfo *li, sort_spec_thing *sort_control, int ldap_result, char* ldap_result_description, int function_result, Slapi_DN *sdn, struct vlv_request *vlv_request_control) +static int +ldbm_back_search_cleanup(Slapi_PBlock *pb, + struct ldbminfo *li, + sort_spec_thing *sort_control, + int ldap_result, + char* ldap_result_description, + int function_result, + Slapi_DN *sdn, + struct vlv_request *vlv_request_control, + struct backentry *e) { int estimate = 0; /* estimated search result count */ + backend *be; + ldbm_instance *inst; + + slapi_pblock_get( pb, SLAPI_BACKEND, &be ); + inst = (ldbm_instance *) be->be_instance_info; + CACHE_RETURN(&inst->inst_cache, &e); /* NULL e is handled correctly */ if(sort_control!=NULL) { @@ -220,7 +235,10 @@ ldbm_back_search( Slapi_PBlock *pb ) if(r!=0) { /* Badly formed SORT control */ - return ldbm_back_search_cleanup(pb, li, sort_control, LDAP_PROTOCOL_ERROR, "Sort Control", SLAPI_FAIL_GENERAL, &basesdn, NULL); + return ldbm_back_search_cleanup(pb, li, sort_control, + LDAP_PROTOCOL_ERROR, "Sort Control", + SLAPI_FAIL_GENERAL, &basesdn, + NULL, NULL); } /* set this operation includes the server side sorting */ operation->o_flags |= OP_FLAG_SERVER_SIDE_SORTING; @@ -236,7 +254,10 @@ ldbm_back_search( Slapi_PBlock *pb ) if(r!=LDAP_SUCCESS) { /* Badly formed VLV control */ - return ldbm_back_search_cleanup(pb, li, sort_control, r, "VLV Control", SLAPI_FAIL_GENERAL, &basesdn, &vlv_request_control); + return ldbm_back_search_cleanup(pb, li, sort_control, + r, "VLV Control", + SLAPI_FAIL_GENERAL, &basesdn, + &vlv_request_control, NULL); } { /* Access Control Check to see if the client is allowed to use the VLV Control. */ @@ -255,7 +276,10 @@ ldbm_back_search( Slapi_PBlock *pb ) if(r!=LDAP_SUCCESS) { /* Client isn't allowed to do this. */ - return ldbm_back_search_cleanup(pb, li, sort_control, r, "VLV Control", SLAPI_FAIL_GENERAL, &basesdn, &vlv_request_control); + return ldbm_back_search_cleanup(pb, li, sort_control, + r, "VLV Control", + SLAPI_FAIL_GENERAL, &basesdn, + &vlv_request_control, NULL); } } /* @@ -267,7 +291,10 @@ ldbm_back_search( Slapi_PBlock *pb ) else { /* Can't have a VLV control without a SORT control */ - return ldbm_back_search_cleanup(pb, li, sort_control, LDAP_SORT_CONTROL_MISSING, "VLV Control", SLAPI_FAIL_GENERAL, &basesdn, &vlv_request_control); + return ldbm_back_search_cleanup(pb, li, sort_control, + LDAP_SORT_CONTROL_MISSING, "VLV Control", + SLAPI_FAIL_GENERAL, &basesdn, + &vlv_request_control, NULL); } } } @@ -320,13 +347,15 @@ ldbm_back_search( Slapi_PBlock *pb ) { return ldbm_back_search_cleanup(pb, li, sort_control, LDAP_UNWILLING_TO_PERFORM, ctrlstr, - SLAPI_FAIL_GENERAL, &basesdn, &vlv_request_control); + SLAPI_FAIL_GENERAL, &basesdn, + &vlv_request_control, NULL); } else { return ldbm_back_search_cleanup(pb, li, sort_control, LDAP_VIRTUAL_LIST_VIEW_ERROR, ctrlstr, - SLAPI_FAIL_GENERAL, &basesdn, &vlv_request_control); + SLAPI_FAIL_GENERAL, &basesdn, + &vlv_request_control, NULL); } } else @@ -341,7 +370,8 @@ ldbm_back_search( Slapi_PBlock *pb ) sort_make_sort_response_control(pb, LDAP_UNWILLING_TO_PERFORM, NULL); return ldbm_back_search_cleanup(pb, li, sort_control, LDAP_UNAVAILABLE_CRITICAL_EXTENSION, ctrlstr, - SLAPI_FAIL_GENERAL, &basesdn, &vlv_request_control); + SLAPI_FAIL_GENERAL, &basesdn, + &vlv_request_control, NULL); } else /* vlv and sorting are not critical, so ignore the control */ { @@ -374,7 +404,8 @@ ldbm_back_search( Slapi_PBlock *pb ) if ( ( e = find_entry( pb, be, addr, NULL )) == NULL ) { /* error or referral sent by find_entry */ - return ldbm_back_search_cleanup(pb, li, sort_control, -1, NULL, 1, &basesdn, &vlv_request_control); + return ldbm_back_search_cleanup(pb, li, sort_control, + -1, NULL, 1, &basesdn, &vlv_request_control, NULL); } } @@ -407,12 +438,13 @@ ldbm_back_search( Slapi_PBlock *pb ) return ldbm_back_search_cleanup(pb, li, sort_control, vlv_rc, "VLV Control", SLAPI_FAIL_GENERAL, &basesdn, - &vlv_request_control); + &vlv_request_control, e); case VLV_BLD_LIST_FAILED: return ldbm_back_search_cleanup(pb, li, sort_control, vlv_response_control.result, NULL, SLAPI_FAIL_GENERAL, - &basesdn, &vlv_request_control); + &basesdn, &vlv_request_control, + e); case LDAP_SUCCESS: /* Log to the access log the particulars of this sort request */ @@ -434,7 +466,7 @@ ldbm_back_search( Slapi_PBlock *pb ) "Sort Response Control", SLAPI_FAIL_GENERAL, &basesdn, - &vlv_request_control); + &vlv_request_control, e); } } } @@ -447,7 +479,7 @@ ldbm_back_search( Slapi_PBlock *pb ) /* Error result sent by build_candidate_list */ return ldbm_back_search_cleanup(pb, li, sort_control, -1, NULL, rc, &basesdn, - &vlv_request_control); + &vlv_request_control, e); } /* * If we're sorting then we must check what administrative @@ -491,7 +523,7 @@ ldbm_back_search( Slapi_PBlock *pb ) { return ldbm_back_search_cleanup(pb, li, sort_control, r, NULL, -1, &basesdn, - &vlv_request_control); + &vlv_request_control, e); } } /* @@ -509,7 +541,7 @@ ldbm_back_search( Slapi_PBlock *pb ) return ldbm_back_search_cleanup(pb, li, sort_control, LDAP_PROTOCOL_ERROR, "Sort Response Control", -1, - &basesdn, &vlv_request_control); + &basesdn, &vlv_request_control, e); } } else @@ -551,7 +583,7 @@ ldbm_back_search( Slapi_PBlock *pb ) LDAP_PROTOCOL_ERROR, "Sort Control", -1, &basesdn, - &vlv_request_control); + &vlv_request_control, e); case LDAP_UNWILLING_TO_PERFORM: /* Too hard */ case LDAP_OPERATIONS_ERROR: /* Operation error */ case LDAP_TIMELIMIT_EXCEEDED: /* Timeout */ @@ -593,7 +625,7 @@ ldbm_back_search( Slapi_PBlock *pb ) return ldbm_back_search_cleanup(pb, li, sort_control, (abandoned?-1:LDAP_PROTOCOL_ERROR), "Sort Response Control", -1, - &basesdn, &vlv_request_control); + &basesdn, &vlv_request_control, e); } } } @@ -619,7 +651,7 @@ ldbm_back_search( Slapi_PBlock *pb ) return ldbm_back_search_cleanup(pb, li, sort_control, vlv_response_control.result, NULL, -1, &basesdn, - &vlv_request_control); + &vlv_request_control, e); } } else @@ -638,7 +670,7 @@ ldbm_back_search( Slapi_PBlock *pb ) return ldbm_back_search_cleanup(pb, li, sort_control, (abandoned?-1:LDAP_PROTOCOL_ERROR), "VLV Response Control", -1, - &basesdn, &vlv_request_control); + &basesdn, &vlv_request_control, e); } /* Log the VLV operation */ vlv_print_access_log(pb,&vlv_request_control,&vlv_response_control); @@ -702,7 +734,7 @@ ldbm_back_search( Slapi_PBlock *pb ) /* tmp_err == -1: no error */ return ldbm_back_search_cleanup(pb, li, sort_control, tmp_err, tmp_desc, (tmp_err == -1 ? 0 : -1), &basesdn, - &vlv_request_control); + &vlv_request_control, NULL); /* end Fix for bugid #394184 */ } diff --git a/ldap/servers/slapd/back-ldbm/sort.c b/ldap/servers/slapd/back-ldbm/sort.c index 7a2a9c972..2c9595d3c 100644 --- a/ldap/servers/slapd/back-ldbm/sort.c +++ b/ldap/servers/slapd/back-ldbm/sort.c @@ -632,6 +632,7 @@ static int compare_entries_sv(ID *id_a, ID *id_b, sort_spec *s,baggage_carrier * if (0 != err ) { LDAPDebug(LDAP_DEBUG_TRACE,"compare_entries db err %d\n",err,0,0); } + CACHE_RETURN(&inst->inst_cache,&a); return 0; } /* OK, now we have the entries, so we work our way down the attribute list comparing as we go */ diff --git a/ldap/servers/slapd/back-ldbm/vlv.c b/ldap/servers/slapd/back-ldbm/vlv.c index c68ce6429..ea08fd605 100644 --- a/ldap/servers/slapd/back-ldbm/vlv.c +++ b/ldap/servers/slapd/back-ldbm/vlv.c @@ -1634,10 +1634,10 @@ retry: { match= sort_attr_compare((struct berval**)typedown_value, entry_value, compare_fn); } - if (needFree) { - ber_bvecfree((struct berval**)entry_value); - entry_value = NULL; - } + if (needFree) { + ber_bvecfree((struct berval**)entry_value); + entry_value = NULL; + } } else { @@ -1690,6 +1690,8 @@ retry: LDAPDebug( LDAP_DEBUG_TRACE, "<= vlv_trim_candidates_byvalue: Found. Index %lu\n",si, 0, 0 ); } } + CACHE_RETURN(&(((ldbm_instance *)be->be_instance_info)->inst_cache), + &e); } } while (!found); ber_bvecfree((struct berval**)typedown_value);
0
35f8ea348b5f4c08f8e09130db59abddd849d870
389ds/389-ds-base
Bug(s) fixed: 171066 Bug Description: Get rid of nsperl; use perldap with system perl Reviewed by: Noriko, Rob, Nathan (Thanks!) Branch: HEAD Fix Description: All perl scripts are made executable by using the #!/usr/bin/env perl *nix trick. This means that the correct version of perl must be in the user's PATH e.g. 5.6.1 or later. This version is either shipped with the OS or available on all platforms. On HP/ux, it is available as a depot which is installed in /opt/perl. For CGI perl scripts, the PATH can be set in the admserv.conf, so we may have to do that for HP/ux. To make perldap work, some ugly hacks are involved. Each perl script that uses perldap has a BEGIN section that figures out where it is in the server root, sets a server root variable, and sets LD_LIBRARY_PATH and SHLIB_PATH to point to serverroot/shared/lib. Perldap will be installed under serverroot/lib/perl. This directory will have 3 subdirectories: arch - containing the binary files; auto - containing autoloaded perl modules; and Mozilla - containing the base perldap .pm files. The BEGIN section also sets the perl INC path to find those modules. The directory gets rid of nsperl plus a lot of old crufty perl building code that we do not use anymore. Those are the removed files. The admin server code also gets rid of the perl.c wrapper. Noriko pointed out that this does not take care of upgrade install, so I added several more files and diffs to take care of that case. Basically, go through the tasks in o=netscaperoot and replace perl?scriptname with just scriptname. Also, go through all of the template generated scripts and replace the shebang line with #!/usr/bin/env perl, and make sure they are chmod +x. I also found a few more places that referenced nsperl and removed them. Platforms tested: RHEL4 Flag Day: no Doc impact: no QA impact: should be covered by regular nightly and manual testing New Tests integrated into TET: none
commit 35f8ea348b5f4c08f8e09130db59abddd849d870 Author: Rich Megginson <[email protected]> Date: Fri Oct 21 19:21:19 2005 +0000 Bug(s) fixed: 171066 Bug Description: Get rid of nsperl; use perldap with system perl Reviewed by: Noriko, Rob, Nathan (Thanks!) Branch: HEAD Fix Description: All perl scripts are made executable by using the #!/usr/bin/env perl *nix trick. This means that the correct version of perl must be in the user's PATH e.g. 5.6.1 or later. This version is either shipped with the OS or available on all platforms. On HP/ux, it is available as a depot which is installed in /opt/perl. For CGI perl scripts, the PATH can be set in the admserv.conf, so we may have to do that for HP/ux. To make perldap work, some ugly hacks are involved. Each perl script that uses perldap has a BEGIN section that figures out where it is in the server root, sets a server root variable, and sets LD_LIBRARY_PATH and SHLIB_PATH to point to serverroot/shared/lib. Perldap will be installed under serverroot/lib/perl. This directory will have 3 subdirectories: arch - containing the binary files; auto - containing autoloaded perl modules; and Mozilla - containing the base perldap .pm files. The BEGIN section also sets the perl INC path to find those modules. The directory gets rid of nsperl plus a lot of old crufty perl building code that we do not use anymore. Those are the removed files. The admin server code also gets rid of the perl.c wrapper. Noriko pointed out that this does not take care of upgrade install, so I added several more files and diffs to take care of that case. Basically, go through the tasks in o=netscaperoot and replace perl?scriptname with just scriptname. Also, go through all of the template generated scripts and replace the shebang line with #!/usr/bin/env perl, and make sure they are chmod +x. I also found a few more places that referenced nsperl and removed them. Platforms tested: RHEL4 Flag Day: no Doc impact: no QA impact: should be covered by regular nightly and manual testing New Tests integrated into TET: none diff --git a/buildpaths.mk b/buildpaths.mk index d512d3552..83ee4069e 100644 --- a/buildpaths.mk +++ b/buildpaths.mk @@ -135,3 +135,5 @@ LDAPCONSOLE_SOURCE_ROOT = $(BUILD_ROOT)/../directoryconsole # JAF - activation.jar - http://java.sun.com/products/javabeans/glasgow/jaf.html # JWSDP - jaxrpc-api.jar,jaxrpc.jar,saaj.jar - http://java.sun.com/webservices/downloads/webservicespack.html # Crimson - crimson.jar - http://xml.apache.org/crimson/ + +PERLDAP_SOURCE_ROOT = $(MOZILLA_SOURCE_ROOT) diff --git a/components.mk b/components.mk index 78d9985b0..fb258f75f 100644 --- a/components.mk +++ b/components.mk @@ -741,3 +741,15 @@ else -@echo "MAVEN is not required except on Windows." endif #WINNT +########### PerLDAP ############# +ifdef PERLDAP_SOURCE_ROOT + PERLDAP_BUILT_DIR = $(PERLDAP_SOURCE_ROOT)/directory/perldap/blib + PERLDAP_ARCHLIB_DIR = $(PERLDAP_BUILT_DIR)/arch + PERLDAP_LIB_DIR = $(PERLDAP_BUILT_DIR)/lib/Mozilla + PERLDAP_AUTOLIB_DIR = $(PERLDAP_BUILT_DIR)/lib/auto +# under the serverroot/lib directory, we should have a perl directory which contains arch/, auto/, and Mozilla/ + PACKAGE_SRC_DEST += $(PERLDAP_ARCHLIB_DIR) lib/perl + PACKAGE_SRC_DEST += $(PERLDAP_LIB_DIR) lib/perl + PACKAGE_SRC_DEST += $(PERLDAP_AUTOLIB_DIR) lib/perl +# else we're using the pre-built zip file - see ldap/cm/Makefile +endif diff --git a/httpd/src/Makefile b/httpd/src/Makefile index 3064968d9..fc18bf036 100644 --- a/httpd/src/Makefile +++ b/httpd/src/Makefile @@ -147,8 +147,6 @@ ifeq ($(ARCH), NCR) #OBJS+=$(HTTPD_DIR)/httpd-lib/nspr20/uxwrap.o endif -DAEMONLIB= - ifeq ($(ARCH), WINNT) # Don't define DEPLIBS for NT because standard macros like LINK_EXE # and LINK_DLL automatically pick up DEPLIBS which we don't always wan't. @@ -168,7 +166,6 @@ else ifndef DEPLIBS DEPLIBS = $(addsuffix .$(LIB_SUFFIX), \ $(addprefix $(OBJDIR)/lib/lib,$(HTTPD_LIBS)) \ - $(DAEMONLIB) \ $(OBJDIR)/lib/libaccess \ $(OBJDIR)/lib/libbase \ $(OBJDIR)/lib/libsi18n \ @@ -179,7 +176,6 @@ DEPLIBS += $(SECURITY_DEP) \ DEPLINK = $(addsuffix .$(LIB_SUFFIX), \ $(addprefix $(OBJDIR)/lib/lib,$(HTTPD_LIBS)) \ - $(DAEMONLIB) \ $(OBJDIR)/lib/libaccess \ $(OBJDIR)/lib/libbase \ $(OBJDIR)/lib/libsi18n \ diff --git a/ldap/admin/src/AddPerlHeader.pl b/ldap/admin/src/AddPerlHeader.pl deleted file mode 100644 index 1e64c401b..000000000 --- a/ldap/admin/src/AddPerlHeader.pl +++ /dev/null @@ -1,92 +0,0 @@ -# -# BEGIN COPYRIGHT BLOCK -# This Program is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free Software -# Foundation; version 2 of the License. -# -# This Program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along with -# this Program; if not, write to the Free Software Foundation, Inc., 59 Temple -# Place, Suite 330, Boston, MA 02111-1307 USA. -# -# In addition, as a special exception, Red Hat, Inc. gives You the additional -# right to link the code of this Program with code not covered under the GNU -# General Public License ("Non-GPL Code") and to distribute linked combinations -# including the two, subject to the limitations in this paragraph. Non-GPL Code -# permitted under this exception must only link to the code of this Program -# through those well defined interfaces identified in the file named EXCEPTION -# found in the source code files (the "Approved Interfaces"). The files of -# Non-GPL Code may instantiate templates or use macros or inline functions from -# the Approved Interfaces without causing the resulting work to be covered by -# the GNU General Public License. Only Red Hat, Inc. may make changes or -# additions to the list of Approved Interfaces. You must obey the GNU General -# Public License in all respects for all of the Program code and other code used -# in conjunction with the Program except the Non-GPL Code covered by this -# exception. If you modify this file, you may extend this exception to your -# version of the file, but you are not obligated to do so. If you do not wish to -# provide this exception without modification, you must delete this exception -# statement from your version and license this file solely under the GPL without -# exception. -# -# -# Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -# Copyright (C) 2005 Red Hat, Inc. -# All rights reserved. -# END COPYRIGHT BLOCK -# -# used to add the perl preamble to all perl scripts used by the directory -# server run time; the problem is that the INC paths are compiled into -# the executable and cannot be overridden at run time, so we have to make -# sure we fix them - -# the first argument is the source path for the INC path -# the second argument is the token to replace with the server root -# at install time -# the third argument is the source perl script -# the fourth argument is the destination perl script -# - -($sourceLibPath, $serverRootToken, $sourceScript, $destScript) = @ARGV; -open SRC, $sourceScript or die "Error: could not open $sourceScript: $!"; -open DEST, ">$destScript" or die "Error: could not write $destScript: $!"; - -$isNT = -d '\\'; - -print DEST<<EOF1; -#!perl -# This preamble must be at the beginning of every perl script so that we can -# find packages and dynamically load code at run time; SERVER_ROOT must -# be replaced with the absolute path to the server root at installation -# time; it is assumed that the perl library will be installed as -# server root/install/perl -BEGIN { -EOF1 - -# if on NT, just assume we are using activeState, which looks like this -# server root/install/lib -# /bin -# /site/lib -# there is no arch subdir on NT -if ($isNT) { - print DEST "\t\@INC = qw( $serverRootToken/install/lib $serverRootToken/install/site/lib . );\n"; - print DEST "}\n"; -} else { - print DEST<<EOF2; - \$BUILD_PERL_PATH = \"$sourceLibPath\"; - \$RUN_PERL_PATH = \"$serverRootToken/install\"; - # make sure we use the unix path conventions - grep { s#\$BUILD_PERL_PATH#\$RUN_PERL_PATH#g } \@INC; -} -EOF2 -} - -# copy the rest of the file -while (<SRC>) { - print DEST; -} - -close DEST; -close SRC; diff --git a/ldap/admin/src/Base.def b/ldap/admin/src/Base.def deleted file mode 100644 index 0dd8f263e..000000000 --- a/ldap/admin/src/Base.def +++ /dev/null @@ -1,45 +0,0 @@ -; BEGIN COPYRIGHT BLOCK -; This Program is free software; you can redistribute it and/or modify it under -; the terms of the GNU General Public License as published by the Free Software -; Foundation; version 2 of the License. -; -; This Program is distributed in the hope that it will be useful, but WITHOUT -; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -; FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. -; -; You should have received a copy of the GNU General Public License along with -; this Program; if not, write to the Free Software Foundation, Inc., 59 Temple -; Place, Suite 330, Boston, MA 02111-1307 USA. -; -; In addition, as a special exception, Red Hat, Inc. gives You the additional -; right to link the code of this Program with code not covered under the GNU -; General Public License ("Non-GPL Code") and to distribute linked combinations -; including the two, subject to the limitations in this paragraph. Non-GPL Code -; permitted under this exception must only link to the code of this Program -; through those well defined interfaces identified in the file named EXCEPTION -; found in the source code files (the "Approved Interfaces"). The files of -; Non-GPL Code may instantiate templates or use macros or inline functions from -; the Approved Interfaces without causing the resulting work to be covered by -; the GNU General Public License. Only Red Hat, Inc. may make changes or -; additions to the list of Approved Interfaces. You must obey the GNU General -; Public License in all respects for all of the Program code and other code used -; in conjunction with the Program except the Non-GPL Code covered by this -; exception. If you modify this file, you may extend this exception to your -; version of the file, but you are not obligated to do so. If you do not wish to -; provide this exception without modification, you must delete this exception -; statement from your version and license this file solely under the GPL without -; exception. -; -; -; Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -; Copyright (C) 2005 Red Hat, Inc. -; All rights reserved. -; END COPYRIGHT BLOCK -; -DESCRIPTION 'Setup SDK Dynamic perl module' -CODE SHARED READ EXECUTE -DATA SHARED READ WRITE -EXPORTS - XS_NSSetupSDK__Base_toLocal @2 - XS_NSSetupSDK__Base_toUTF8 @3 - boot_NSSetupSDK__Base @4 diff --git a/ldap/admin/src/Base.pm b/ldap/admin/src/Base.pm deleted file mode 100644 index 0edf682df..000000000 --- a/ldap/admin/src/Base.pm +++ /dev/null @@ -1,95 +0,0 @@ -# -# BEGIN COPYRIGHT BLOCK -# This Program is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free Software -# Foundation; version 2 of the License. -# -# This Program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along with -# this Program; if not, write to the Free Software Foundation, Inc., 59 Temple -# Place, Suite 330, Boston, MA 02111-1307 USA. -# -# In addition, as a special exception, Red Hat, Inc. gives You the additional -# right to link the code of this Program with code not covered under the GNU -# General Public License ("Non-GPL Code") and to distribute linked combinations -# including the two, subject to the limitations in this paragraph. Non-GPL Code -# permitted under this exception must only link to the code of this Program -# through those well defined interfaces identified in the file named EXCEPTION -# found in the source code files (the "Approved Interfaces"). The files of -# Non-GPL Code may instantiate templates or use macros or inline functions from -# the Approved Interfaces without causing the resulting work to be covered by -# the GNU General Public License. Only Red Hat, Inc. may make changes or -# additions to the list of Approved Interfaces. You must obey the GNU General -# Public License in all respects for all of the Program code and other code used -# in conjunction with the Program except the Non-GPL Code covered by this -# exception. If you modify this file, you may extend this exception to your -# version of the file, but you are not obligated to do so. If you do not wish to -# provide this exception without modification, you must delete this exception -# statement from your version and license this file solely under the GPL without -# exception. -# -# -# Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -# Copyright (C) 2005 Red Hat, Inc. -# All rights reserved. -# END COPYRIGHT BLOCK -# - -package NSSetupSDK::Base; - -use POSIX; - -use strict; -use vars qw($VERSION @ISA @EXPORT @EXPORT_OK $AUTOLOAD); - -require Exporter; -require DynaLoader; -require AutoLoader; - -@ISA = qw(Exporter DynaLoader); -# Items to export into callers namespace by default. Note: do not export -# names by default without a very good reason. Use EXPORT_OK instead. -# Do not simply export all your public functions/methods/constants. - -# Items to export into callers namespace by default. Note: do not export -# names by default without a very good reason. Use EXPORT_OK instead. -# Do not simply export all your public functions/methods/constants. -@EXPORT = qw( - toLocal toUTF8 -); -$VERSION = '1.00'; - -bootstrap NSSetupSDK::Base $VERSION; - -# Autoload methods go after =cut, and are processed by the autosplit program. - -1; -__END__ -# Below is the stub of documentation for your module. You better edit it! - -=head1 NAME - -NSSetupSDK::Base - Perl extension for directory server administrative utility functions - -=head1 SYNOPSIS - - use NSSetupSDK::Base; - -=head1 DESCRIPTION - -The NSSetupSDK::Base module is used by directory server administration scripts, such as -those used for installation/uninstallation, instance creation/removal, CGIs, -etc. - -=head1 AUTHOR - -Richard Megginson [email protected] - -=head1 SEE ALSO - -perl(1). - -=cut diff --git a/ldap/admin/src/CreateInstall.pl b/ldap/admin/src/CreateInstall.pl deleted file mode 100644 index de15fdac6..000000000 --- a/ldap/admin/src/CreateInstall.pl +++ /dev/null @@ -1,66 +0,0 @@ -#!perl -# -# BEGIN COPYRIGHT BLOCK -# This Program is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free Software -# Foundation; version 2 of the License. -# -# This Program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along with -# this Program; if not, write to the Free Software Foundation, Inc., 59 Temple -# Place, Suite 330, Boston, MA 02111-1307 USA. -# -# In addition, as a special exception, Red Hat, Inc. gives You the additional -# right to link the code of this Program with code not covered under the GNU -# General Public License ("Non-GPL Code") and to distribute linked combinations -# including the two, subject to the limitations in this paragraph. Non-GPL Code -# permitted under this exception must only link to the code of this Program -# through those well defined interfaces identified in the file named EXCEPTION -# found in the source code files (the "Approved Interfaces"). The files of -# Non-GPL Code may instantiate templates or use macros or inline functions from -# the Approved Interfaces without causing the resulting work to be covered by -# the GNU General Public License. Only Red Hat, Inc. may make changes or -# additions to the list of Approved Interfaces. You must obey the GNU General -# Public License in all respects for all of the Program code and other code used -# in conjunction with the Program except the Non-GPL Code covered by this -# exception. If you modify this file, you may extend this exception to your -# version of the file, but you are not obligated to do so. If you do not wish to -# provide this exception without modification, you must delete this exception -# statement from your version and license this file solely under the GPL without -# exception. -# -# -# Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -# Copyright (C) 2005 Red Hat, Inc. -# All rights reserved. -# END COPYRIGHT BLOCK -# -# figure out what the server root assuming the path to this script is -# server root/bin/slapd/admin/bin - -($serverRoot = $0) =~ s@[\\/]?bin[\\/]slapd[\\/]admin[\\/]bin.*$@@g; - -# run the post install program -$isNT = -d '\\'; -$quote = $isNT ? "\"" : ""; -# make sure the arguments are correctly quoted on NT -@fixargs = map { /^[\"].*[\"]$/ ? $_ : $quote . $_ . $quote } @ARGV; -if (! $serverRoot) { - $serverRoot = "."; -} -chdir "$serverRoot/bin/slapd/admin/bin"; - -# note: exec on NT doesn't work the same way as exec on Unix. On Unix, exec replaces the calling -# process with the called process. The parent, if waiting for the calling process, will happily -# wait for it's replacement. On NT however, the parent thinks the calling process has gone, and -# it doesn't know about the called process, so it stops waiting. So, we have to do a system() -# on NT to force the calling process to wait for the called process. On Unix, we can do the -# faster and more memory efficient exec() call. -if ($isNT) { - system {'./ds_create'} './ds_create', @fixargs; -} else { - exec {'./ds_create'} './ds_create', @fixargs; -} diff --git a/ldap/admin/src/DSAdmin.def b/ldap/admin/src/DSAdmin.def deleted file mode 100644 index 06b766b85..000000000 --- a/ldap/admin/src/DSAdmin.def +++ /dev/null @@ -1,43 +0,0 @@ -; BEGIN COPYRIGHT BLOCK -; This Program is free software; you can redistribute it and/or modify it under -; the terms of the GNU General Public License as published by the Free Software -; Foundation; version 2 of the License. -; -; This Program is distributed in the hope that it will be useful, but WITHOUT -; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -; FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. -; -; You should have received a copy of the GNU General Public License along with -; this Program; if not, write to the Free Software Foundation, Inc., 59 Temple -; Place, Suite 330, Boston, MA 02111-1307 USA. -; -; In addition, as a special exception, Red Hat, Inc. gives You the additional -; right to link the code of this Program with code not covered under the GNU -; General Public License ("Non-GPL Code") and to distribute linked combinations -; including the two, subject to the limitations in this paragraph. Non-GPL Code -; permitted under this exception must only link to the code of this Program -; through those well defined interfaces identified in the file named EXCEPTION -; found in the source code files (the "Approved Interfaces"). The files of -; Non-GPL Code may instantiate templates or use macros or inline functions from -; the Approved Interfaces without causing the resulting work to be covered by -; the GNU General Public License. Only Red Hat, Inc. may make changes or -; additions to the list of Approved Interfaces. You must obey the GNU General -; Public License in all respects for all of the Program code and other code used -; in conjunction with the Program except the Non-GPL Code covered by this -; exception. If you modify this file, you may extend this exception to your -; version of the file, but you are not obligated to do so. If you do not wish to -; provide this exception without modification, you must delete this exception -; statement from your version and license this file solely under the GPL without -; exception. -; -; -; Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -; Copyright (C) 2005 Red Hat, Inc. -; All rights reserved. -; END COPYRIGHT BLOCK -; -LIBRARY "DSAdmin" -EXPORTS - boot_DSAdmin - _boot_DSAdmin = boot_DSAdmin - diff --git a/ldap/admin/src/DSAdmin.mk b/ldap/admin/src/DSAdmin.mk deleted file mode 100644 index 0a1db8f2d..000000000 --- a/ldap/admin/src/DSAdmin.mk +++ /dev/null @@ -1,163 +0,0 @@ -# -# BEGIN COPYRIGHT BLOCK -# This Program is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free Software -# Foundation; version 2 of the License. -# -# This Program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along with -# this Program; if not, write to the Free Software Foundation, Inc., 59 Temple -# Place, Suite 330, Boston, MA 02111-1307 USA. -# -# In addition, as a special exception, Red Hat, Inc. gives You the additional -# right to link the code of this Program with code not covered under the GNU -# General Public License ("Non-GPL Code") and to distribute linked combinations -# including the two, subject to the limitations in this paragraph. Non-GPL Code -# permitted under this exception must only link to the code of this Program -# through those well defined interfaces identified in the file named EXCEPTION -# found in the source code files (the "Approved Interfaces"). The files of -# Non-GPL Code may instantiate templates or use macros or inline functions from -# the Approved Interfaces without causing the resulting work to be covered by -# the GNU General Public License. Only Red Hat, Inc. may make changes or -# additions to the list of Approved Interfaces. You must obey the GNU General -# Public License in all respects for all of the Program code and other code used -# in conjunction with the Program except the Non-GPL Code covered by this -# exception. If you modify this file, you may extend this exception to your -# version of the file, but you are not obligated to do so. If you do not wish to -# provide this exception without modification, you must delete this exception -# statement from your version and license this file solely under the GPL without -# exception. -# -# -# Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -# Copyright (C) 2005 Red Hat, Inc. -# All rights reserved. -# END COPYRIGHT BLOCK -# -# Makefile for the DSAdmin dynamic loaded perl module - -LDAP_SRC = ../.. -BUILD_ROOT = ../../.. - -NOSTDCLEAN=true # don't let nsconfig.mk define target clean -NOSTDSTRIP=true # don't let nsconfig.mk define target strip -NSPR20=true # probably should be defined somewhere else (not sure where) - -include $(BUILD_ROOT)/nsconfig.mk -include $(LDAP_SRC)/nsldap.mk - -BINDIR=$(LDAP_ADMIN_BIN_RELDIR) -OBJDEST=$(LDAP_ADMOBJDIR) - -INCLUDES += -I$(LDAP_SRC)/admin/include - -DS_SERVER_DEFS = -DNS_DS - -INFO = $(OBJDIR)/$(DIR) - -ifneq ($(ARCH), WINNT) -EXTRALDFLAGS += $(SSLLIBFLAG) -endif - -EXTRA_LIBS += $(LIBPERL_A) $(SETUPUTIL_S_LINK) $(LDAP_ADMLIB) \ - $(LDAPLINK) $(DEPLINK) $(ADMINUTIL_LINK) \ - $(NSPRLINK) $(NLSLINK) \ - $(NLSLINK_CONV_STATIC) - -# the security libs are statically linked into libds_admin.so; osf doesn't like -# to link against them again, it thinks they are multiply defined -ifneq ($(ARCH), OSF1) -EXTRA_LIBS += $(SECURITYLINK) $(DBMLINK) -else -#DLL_LDFLAGS=-shared -all -error_unresolved -taso -#EXTRA_LIBS += -lcxx -lcxxstd -lcurses -lc -endif - -ifeq ($(ARCH), WINNT) -PLATFORM_INCLUDE = -I$(BUILD_ROOT)/include/nt -SUBSYSTEM=console -EXTRA_LIBS+=comctl32.lib $(LDAP_SDK_LIBLDAP_DLL) $(LDAP_LIBUTIL) -EXTRA_LIBS_DEP+=$(LDAP_LIBUTIL_DEP) -DEF_FILE:=DSAdmin.def -LINK_DLL += /NODEFAULTLIB:msvcrtd.lib -endif - -ifeq ($(ARCH), AIX) -EXTRA_LIBS += $(DLL_EXTRA_LIBS) -lbsd -LD=ld -endif - -# absolutely do not try to build perl-stuff with warnings-as-errors. -# (duh.) -ifeq ($(ARCH), Linux) -CFLAGS := $(subst -Werror,,$(CFLAGS)) -endif - -DSADMIN_OBJS = DSAdmin.o -DSADMIN_BASENAME = DSAdmin$(DLL_PRESUFFIX).$(DLL_SUFFIX) - -OBJS= $(addprefix $(OBJDEST)/, $(DSADMIN_OBJS)) -DSADMIN_SO = $(addprefix $(BINDIR)/, $(DSADMIN_BASENAME)) - -EXTRA_LIBS_DEP = $(SETUPUTIL_DEP) - -# for Solaris, our most common unix build platform, we check for undefined -# symbols at link time so we don't catch them at run time. To do this, we -# set the -z defs flag. We also have to add explicitly link with the C and -# C++ runtime libraries (e.g., -lc) because, even though ld and CC link -# with them implicitly, -z defs will throw errors if we do not link with -# them explicitly. -ifeq ($(ARCH), SOLARIS) -LINK_DLL += -z defs -# removed -lcx from the following line -EXTRA_LIBS += -lCstd -lCrun -lm -lw -lc -# with the Forte 6 and later compilers, we must use CC to link -LD=CC -endif - -all: $(OBJDEST) $(BINDIR) $(DSADMIN_SO) - -dummy: - -@echo PERL_EXE = $(PERL_EXE) - -@echo PERL_EXENT = $(PERL_EXENT) - -@echo PERL_BASEDIR = $(PERL_BASEDIR) - -@echo PERL_ROOT = $(PERL_ROOT) - -@echo IS_ACTIVESTATE = $(IS_ACTIVESTATE) - -@echo PERL_CONFIG = $(PERL_CONFIG) - -@echo PERL_ROOT = $(PERL_ROOT) - -@echo PERL = $(PERL) - -@echo PERL_LIB = $(PERL_LIB) - -@echo PERL_ARCHLIB = $(PERL_ARCHLIB) - -@echo EXTRA_LIBS_DEP = $(EXTRA_LIBS_DEP) - abort - -ifeq ($(ARCH), WINNT) -$(DSADMIN_SO): $(OBJS) $(EXTRA_LIBS_DEP) $(DEF_FILE) - $(LINK_DLL) /DEF:$(DEF_FILE) -# linking this file causes a .exp and a .lib file to be generated which don't seem -# to be required while running, so I get rid of them - $(RM) $(subst .dll,.exp,$@) $(subst .dll,.lib,$@) -else -$(DSADMIN_SO): $(OBJS) - $(LINK_DLL) $(EXTRA_LIBS) -endif - -$(OBJDEST)/DSAdmin.o: $(OBJDEST)/DSAdmin.c -ifeq ($(ARCH), WINNT) - $(CC) -c $(CFLAGS) $(PERL_CFLAGS) $(MCC_INCLUDE) $(SETUPSDK_INCLUDE) $(PERL_INC) $< $(OFFLAG)$@ -else - $(CXX) $(EXCEPTIONS) -c $(CFLAGS) $(PERL_CFLAGS) $(MCC_INCLUDE) $(SETUPSDK_INCLUDE) $(PERL_INC) $< $(OFFLAG)$@ -endif - -$(OBJDEST)/DSAdmin.c: DSAdmin.xs - $(PERL) -w -I$(PERL_ARCHLIB) -I$(PERL_LIB) $(XSUBPP) $(XSPROTOARG) $(XSUBPPARGS) $< > $@ - -#MYCMD := "Mksymlists('NAME' => 'DSAdmin', 'DLBASE' => 'DSAdmin');" -#$(DEF_FILE): DSAdmin.xs -# $(PERL) -w "-I$(PERL_ARCHLIB)" "-I$(PERL_LIB)" -MExtUtils::Mksymlists -e "$(MYCMD)" - -clean: - -$(RM) $(OBJDEST)/DSAdmin.c $(OBJS) $(DSADMIN_SO) diff --git a/ldap/admin/src/DSAdmin.pm b/ldap/admin/src/DSAdmin.pm deleted file mode 100644 index a2c40d6a0..000000000 --- a/ldap/admin/src/DSAdmin.pm +++ /dev/null @@ -1,262 +0,0 @@ -# -# BEGIN COPYRIGHT BLOCK -# This Program is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free Software -# Foundation; version 2 of the License. -# -# This Program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along with -# this Program; if not, write to the Free Software Foundation, Inc., 59 Temple -# Place, Suite 330, Boston, MA 02111-1307 USA. -# -# In addition, as a special exception, Red Hat, Inc. gives You the additional -# right to link the code of this Program with code not covered under the GNU -# General Public License ("Non-GPL Code") and to distribute linked combinations -# including the two, subject to the limitations in this paragraph. Non-GPL Code -# permitted under this exception must only link to the code of this Program -# through those well defined interfaces identified in the file named EXCEPTION -# found in the source code files (the "Approved Interfaces"). The files of -# Non-GPL Code may instantiate templates or use macros or inline functions from -# the Approved Interfaces without causing the resulting work to be covered by -# the GNU General Public License. Only Red Hat, Inc. may make changes or -# additions to the list of Approved Interfaces. You must obey the GNU General -# Public License in all respects for all of the Program code and other code used -# in conjunction with the Program except the Non-GPL Code covered by this -# exception. If you modify this file, you may extend this exception to your -# version of the file, but you are not obligated to do so. If you do not wish to -# provide this exception without modification, you must delete this exception -# statement from your version and license this file solely under the GPL without -# exception. -# -# -# Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -# Copyright (C) 2005 Red Hat, Inc. -# All rights reserved. -# END COPYRIGHT BLOCK -# - -package DSAdmin; - -use POSIX; - -use strict; -use vars qw($VERSION @ISA @EXPORT @EXPORT_OK $AUTOLOAD -$isNT $PATHSEP $quote $script_suffix $exe_suffix $os -$dll_suffix $argumentative @args $first $rest $errs $pos -); - -require Exporter; -require DynaLoader; -require AutoLoader; - -@ISA = qw(Exporter DynaLoader); -# Items to export into callers namespace by default. Note: do not export -# names by default without a very good reason. Use EXPORT_OK instead. -# Do not simply export all your public functions/methods/constants. - -# Items to export into callers namespace by default. Note: do not export -# names by default without a very good reason. Use EXPORT_OK instead. -# Do not simply export all your public functions/methods/constants. -@EXPORT = qw( - normalizeDN toLocal toUTF8 -); -$VERSION = '1.00'; - -bootstrap DSAdmin $VERSION; - -BEGIN { - require 'uname.lib'; - $isNT = -d '\\'; -# @INC = ( '.', '../../../admin/admin/bin' ); -# grep { s@/@\\@g } @INC if $isNT; - $PATHSEP = $isNT ? '\\' : '/'; - # NT needs quotes around some things unix doesn't - $quote = $isNT ? "\"" : ""; - - $script_suffix = $isNT ? ".bat" : ""; - $exe_suffix = $isNT ? ".exe" : ""; - if ($isNT) { - $os = "WINNT"; - } else { - $os = &uname("-s"); - } - - # dll suffix for shared libraries in old instance; note that the dll suffix - # may have changed for the new instance e.g. AIX now uses .so - if ( $os eq "AIX" ) { - $dll_suffix = "_shr.a"; - } - elsif ( $os eq "HP-UX" ) { - $arch = &uname("-p"); - if ( $arch eq "ia64" ) { - $dll_suffix = ".so"; - } else { - $dll_suffix = ".sl"; - } - } - elsif ( $os eq "WINNT" ) { - $dll_suffix = ".dll"; - } - else { - $dll_suffix = ".so"; - } -} - -sub getCwd { - my $command = $isNT ? "cd" : "/bin/pwd"; - open(PWDCMD, "$command 2>&1 |") or - die "Error: could not execute $command: $!"; - # without the following sleep, reading from the pipe will - # return nothing; I guess it gives the pwd command time - # to get some data to read . . . - sleep(1); - my $curdir; - while (<PWDCMD>) { - if (!$curdir) { - chomp($curdir = $_); - } - } - my $code = close(PWDCMD); -# if ($code || $?) { -# print "$command returned code=$code status=$? dir=$curdir\n"; -# } -# print "getCwd curdir=\[$curdir\]\n"; - return $curdir; -} - -# this is used to run the system() call, capture exit and signal codes, -# and die() upon badness; the first argument is a directory to change -# dir to, if any, and the rest are passed to system() -sub mySystem { - my $rc = &mySystemNoDie(@_); - my ($dir, @args) = @_; - if ($rc == 0) { -# success - } elsif ($rc == 0xff00) { - die "Error executing @args: error code $rc: $!"; - } elsif ($rc > 0x80) { - $rc >>= 8; - die "Error executing @args: error code $rc: $!"; - } else { - if ($rc & 0x80) { - $rc &= ~0x80; - } - die "Error executing @args: received signal $rc: $!"; - } - - # usually won't get return value - return $rc; -} - -# This version does not die but just returns the error code -sub mySystemNoDie { - my ($dir, @args) = @_; - if ($dir && ($dir ne "")) { - chdir($dir) or die "Could not change directory to $dir: $!"; - } - my $cmd = $args[0]; - # the system {$cmd} avoids some NT shell quoting problems if the $cmd - # needs to be quoted e.g. contains spaces; the map puts double quotes - # around the arguments on NT which are stripped by the command - # interpreter cmd.exe; but don't quote things which are already quoted - my @fixargs = map { /^[\"].*[\"]$/ ? $_ : $quote . $_ . $quote } @args; - my $rc = 0; - if ($cmd =~ /[.](bat|cmd)$/) { - # we have to pass batch files directly to the NT command interpreter - $cmd = $ENV{COMSPEC}; - if (!$cmd) { - $cmd = 'c:\winnt\system32\cmd.exe'; - } -# print "system $cmd /c \"@fixargs\"\n"; - $rc = 0xffff & system {$cmd} '/c', "\"@fixargs\""; - } else { - print "system $cmd @fixargs\n"; - $rc = 0xffff & system {$cmd} @fixargs; - } - return $rc; -} - -sub getTempFileName { - my $tmp = tmpnam(); - while (-f $tmp) { - $tmp = tmpnam(); - } - - return $tmp; -} - -sub getopts { - local($argumentative) = @_; - local(@args,$_,$first,$rest); - local($errs) = 0; - local($[) = 0; - - @args = split( / */, $argumentative ); - while(@ARGV && ($_ = $ARGV[0]) =~ /^-(.)(.*)/) { - ($first,$rest) = ($1,$2); - $pos = index($argumentative,$first); - if($pos >= $[) { - if($args[$pos+1] eq ':') { - shift(@ARGV); - if($rest eq '') { - ++$errs unless @ARGV; - $rest = shift(@ARGV); - } - eval "\$main::opt_$first = \$rest;"; - } - else { - eval "\$main::opt_$first = 1"; - if($rest eq '') { - shift(@ARGV); - } - else { - $ARGV[0] = "-$rest"; - } - } - } - else { - print STDERR "Unknown option: $first\n"; - ++$errs; - if($rest ne '') { - $ARGV[0] = "-$rest"; - } - else { - shift(@ARGV); - } - } - } - $errs == 0; -} - -# Autoload methods go after =cut, and are processed by the autosplit program. - -1; -__END__ -# Below is the stub of documentation for your module. You better edit it! - -=head1 NAME - -DSAdmin - Perl extension for directory server administrative utility functions - -=head1 SYNOPSIS - - use DSAdmin; - -=head1 DESCRIPTION - -The DSAdmin module is used by directory server administration scripts, such as -those used for installation/uninstallation, instance creation/removal, CGIs, -etc. - -=head1 AUTHOR - -Richard Megginson [email protected] - -=head1 SEE ALSO - -perl(1). - -=cut diff --git a/ldap/admin/src/DSAdmin.xs b/ldap/admin/src/DSAdmin.xs deleted file mode 100644 index f25909a67..000000000 --- a/ldap/admin/src/DSAdmin.xs +++ /dev/null @@ -1,108 +0,0 @@ -/** BEGIN COPYRIGHT BLOCK - * This Program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free Software - * Foundation; version 2 of the License. - * - * This Program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along with - * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place, Suite 330, Boston, MA 02111-1307 USA. - * - * In addition, as a special exception, Red Hat, Inc. gives You the additional - * right to link the code of this Program with code not covered under the GNU - * General Public License ("Non-GPL Code") and to distribute linked combinations - * including the two, subject to the limitations in this paragraph. Non-GPL Code - * permitted under this exception must only link to the code of this Program - * through those well defined interfaces identified in the file named EXCEPTION - * found in the source code files (the "Approved Interfaces"). The files of - * Non-GPL Code may instantiate templates or use macros or inline functions from - * the Approved Interfaces without causing the resulting work to be covered by - * the GNU General Public License. Only Red Hat, Inc. may make changes or - * additions to the list of Approved Interfaces. You must obey the GNU General - * Public License in all respects for all of the Program code and other code used - * in conjunction with the Program except the Non-GPL Code covered by this - * exception. If you modify this file, you may extend this exception to your - * version of the file, but you are not obligated to do so. If you do not wish to - * provide this exception without modification, you must delete this exception - * statement from your version and license this file solely under the GPL without - * exception. - * - * - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. - * Copyright (C) 2005 Red Hat, Inc. - * All rights reserved. - * END COPYRIGHT BLOCK **/ -/* - This file contains the definitions of C functions callable from perl. - The perl interface for these functions is found in DSAdmin.pm. -*/ - -#include "dsalib.h" - -#include "nsutils.h" -#include "utf8.h" - -/* these are the perl include files needed */ -#ifdef __cplusplus -extern "C" { -#endif -#include "EXTERN.h" -/* The next two lines are hacks because someone build perl with gcc which -has this feature call __attribute__ which is not present with sun cc */ -#define HASATTRIBUTE -#define __attribute__(_attr_) - -#ifdef HPUX11 /* conflict with perl 'struct magic' and hpux 'struct magic' */ -#define magic p_magic -#define MAGIC p_MAGIC -#endif /* HPUX */ - -#include "perl.h" -#include "XSUB.h" -#ifdef __cplusplus -} -#endif - - -MODULE = DSAdmin PACKAGE = DSAdmin - -PROTOTYPES: DISABLE - -SV * -normalizeDN(dn) - char* dn - PREINIT: - char* temp_dn; - CODE: - /* duplicate the DN since dn_normalize_convert modifies the argument */ - temp_dn = (char *)malloc(strlen(dn) + 1); - strcpy(temp_dn, dn); - ST(0) = sv_newmortal(); - /* dn_normalize_convert returns its argument */ - sv_setpv( ST(0), dn_normalize_convert(temp_dn) ); - free(temp_dn); - -SV * -toLocal(s) - char* s - PREINIT: - char* temp_s; - CODE: - temp_s = UTF8ToLocal(s); - ST(0) = sv_newmortal(); - sv_setpv( ST(0), temp_s ); - nsSetupFree(temp_s); - -SV * -toUTF8(s) - char* s - PREINIT: - char* temp_s; - CODE: - temp_s = localToUTF8(s); - ST(0) = sv_newmortal(); - sv_setpv( ST(0), temp_s ); - nsSetupFree(temp_s); diff --git a/ldap/admin/src/Inf.pm b/ldap/admin/src/Inf.pm deleted file mode 100644 index 4eee2ade2..000000000 --- a/ldap/admin/src/Inf.pm +++ /dev/null @@ -1,275 +0,0 @@ -#!perl -# -# BEGIN COPYRIGHT BLOCK -# This Program is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free Software -# Foundation; version 2 of the License. -# -# This Program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along with -# this Program; if not, write to the Free Software Foundation, Inc., 59 Temple -# Place, Suite 330, Boston, MA 02111-1307 USA. -# -# In addition, as a special exception, Red Hat, Inc. gives You the additional -# right to link the code of this Program with code not covered under the GNU -# General Public License ("Non-GPL Code") and to distribute linked combinations -# including the two, subject to the limitations in this paragraph. Non-GPL Code -# permitted under this exception must only link to the code of this Program -# through those well defined interfaces identified in the file named EXCEPTION -# found in the source code files (the "Approved Interfaces"). The files of -# Non-GPL Code may instantiate templates or use macros or inline functions from -# the Approved Interfaces without causing the resulting work to be covered by -# the GNU General Public License. Only Red Hat, Inc. may make changes or -# additions to the list of Approved Interfaces. You must obey the GNU General -# Public License in all respects for all of the Program code and other code used -# in conjunction with the Program except the Non-GPL Code covered by this -# exception. If you modify this file, you may extend this exception to your -# version of the file, but you are not obligated to do so. If you do not wish to -# provide this exception without modification, you must delete this exception -# statement from your version and license this file solely under the GPL without -# exception. -# -# -# Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -# Copyright (C) 2005 Red Hat, Inc. -# All rights reserved. -# END COPYRIGHT BLOCK -# - -package NSSetupSDK::Inf; - -use NSSetupSDK::Base; - -%otherEnc = ('local' => "utf8", utf8 => "local"); -# mapping of encoding to the subroutine which converts from that encoding -%convertEnc = ('local' => \&toUTF8, utf8 => \&toLocal); - -############################################################################# -# Creator, the argument (optional) is the INF file to read -# -sub new { - my ($class, $filename) = @_; - my $self = {}; - - bless $self, $class; - - if ($filename) { - $self->read($filename); - } - - return $self; -} - -############################################################################# -# Read in and initialize ourself with the given INF file. The file will be -# encoded in either local or utf8 encoding. The type of encoding is given -# in the $enc argument. We first read in the values in the given encoding, -# then convert the file to the other encoding, then read in and initialize -# the values for the other encoding -# -sub read { - my ($self, $filename, $enc) = @_; - my $inf = {}; # the contents of the inf file - my @sectionList = (); # a list of section names, in order - my $sectionName = "General"; # default section name - - open INF, $filename or return undef; - - push @sectionList, $sectionName; - while (<INF>) { - next if /^\s*#/; # skip comments - next if /^\s*$/; # skip blank lines - if (/^\s*\[([^\]]+)\]/) { - $sectionName = $1; # new section - if ($sectionName cmp "General") { - # General is already in the list - push @sectionList, $sectionName; - } - } else { - chop; - ($name, $value) = split(/\s*=\s*/, $_, 2); -# print "name=$name value=$value\n"; - $inf->{$sectionName}{$enc}{$name} = $value; - $inf->{$sectionName}{$otherEnc{$enc}}{$name} = - &{$convertEnc{$enc}}($value); - } - } - close INF; - - $self->{inf} = $inf; - $self->{sections} = [ @sectionList ]; - -# foreach $section (keys %inf) { -# print '[', $section, ']', "\n"; -# foreach $name (keys %{ $inf{$section} }) { -# print "local $name=$inf{$section}{local}{$name}\n"; -# print "UTF8 $name=$inf{$section}{utf8}{$name}\n"; -# } -# } - - return 1; -} - -sub readLocal { - my $self = shift; - return $self->read(@_, 'local'); -} - -sub readUTF8 { - my $self = shift; - return $self->read(@_, 'utf8'); -} - -############################################################################# -# Init from a hash; used to create a subsection as another inf -# -sub init { - my ($self, $hashRef) = @_; - my $inf = {}; - $inf->{General} = $hashRef; - $self->{inf} = $inf; - $self->{sections} = [ "General" ]; - - return 1; -} - -############################################################################# -# return the number of sections -# -sub numSections { - my $self = shift; - return scalar(@{$self->{sections}}); -} - -############################################################################# -# return the section corresponding to the given name or number -# -sub getSection { - my ($self, $section) = @_; - if ($section =~ /\d+/) { # section is a number - $section = $self->{sections}->[$section]; - } - - my $newSec = new Inf; - $newSec->init($self->{inf}->{$section}); - return $newSec; -} - -############################################################################# -# return the value of the given name in local encoding -# -sub getLocal { - my ($self, $name) = @_; - return getFromSection($self, "General", $name, "local"); -} - -############################################################################# -# return the value of the given name in UTF8 encoding -# -sub getUTF8 { - my ($self, $name) = @_; - return getFromSection($self, "General", $name, "utf8"); -} - -############################################################################# -# return the value of the given name in UTF8 encoding -# -sub get { - my ($self, $name) = @_; - return getFromSection($self, "General", $name, "utf8"); -} - -############################################################################# -# return the value of the given name in the given section -# -sub getFromSection { - my ($self, $section, $name, $enc) = @_; -# print "self inf = ", %{ $self->{inf} }, "\n"; -# print "self inf section = ", %{ $self->{inf}->{$section} }, "\n"; - return $self->{inf}->{$section}{$enc}{$name}; -} - -############################################################################# -# set the value -# -sub setInSection { - my ($self, $section, $name, $value, $enc) = @_; - if (!$enc) { - $enc = 'utf8'; - } - $self->{inf}->{$section}{$enc}{$name} = $value; - $self->{inf}->{$section}{$otherEnc{$enc}}{$name} = - &{$convertEnc{$enc}}($value); -} - -############################################################################# -# set the value; value is locally encoded -# -sub setLocal { - my ($self, $name, $value) = @_; - setInSection($self, "General", $name, $value, "local"); -} - -############################################################################# -# set the value; value is UTF-8 encoded -# -sub setUTF8 { - my ($self, $name, $value) = @_; - setInSection($self, "General", $name, $value, "utf8"); -} - -############################################################################# -# set the value, assume UTF-8 encoding -# -sub set { - my ($self, $name, $value) = @_; - setInSection($self, "General", $name, $value, "utf8"); -} - -sub write { - my ($self, $ref, $enc) = @_; - my $needClose = undef; - if (!$enc) { - $enc = "local"; # write file in local encoding by default - } - if (!$ref) { - # no filehandle given - $ref = \*STDOUT; - } elsif (!ref($ref)) { # not a ref, assume scalar filename - # filename - open(OUTPUT, ">$ref") or die "Error: could not write file $ref: $!"; - $ref = \*OUTPUT; - $needClose = 1; # be sure to close - } elsif (ref($ref) eq 'SCALAR') { - # filename - open(OUTPUT, ">$$ref") or die "Error: could not write file $$ref: $!"; - $ref = \*OUTPUT; - $needClose = 1; # be sure to close - } # else already a file handle ref - foreach $secName (@{ $self->{sections} }) { - print $ref "[", $secName, "]\n"; - foreach $name (keys %{ $self->{inf}->{$secName}{$enc} }) { - $value = $self->{inf}->{$secName}{$enc}{$name}; - print $ref $name, "=", $value, "\n"; - } - print $ref "\n"; - } - if ($needClose) { - close $ref; - } -} - -sub writeLocal { - my ($self, $ref) = @_; - $self->write($ref, 'local'); -} - -sub writeUTF8 { - my ($self, $ref) = @_; - $self->write($ref, 'utf8'); -} - -1; # the mandatory TRUE return from the package diff --git a/ldap/admin/src/Makefile b/ldap/admin/src/Makefile index 5210982ca..e5e727937 100644 --- a/ldap/admin/src/Makefile +++ b/ldap/admin/src/Makefile @@ -348,10 +348,12 @@ installPerlFiles: $(BINDIR) $(PERL_SCRIPTS_DEST) $(BINDIR)/%: % -@$(RM) $@ $(CP) $< $@ + chmod +x $@ $(LDAP_SERVER_RELDIR)/%: % $(LDAP_SERVER_RELDIR) -@$(RM) $@ $(CP) $< $@ + chmod +x $@ $(INST_INCLUDES): install_keywords.h -@$(RM) $@ @@ -360,3 +362,4 @@ $(INST_INCLUDES): install_keywords.h $(SCRIPTSDIR)/template-%: scripts/template-% $(SCRIPTSDIR) -@$(RM) $@ $(CP) $< $@ + chmod +x $@ diff --git a/ldap/admin/src/configure_instance.cpp b/ldap/admin/src/configure_instance.cpp index 50cced266..5beb9ff2e 100644 --- a/ldap/admin/src/configure_instance.cpp +++ b/ldap/admin/src/configure_instance.cpp @@ -1964,5 +1964,60 @@ reconfigure_instance(int argc, char *argv[]) } while (ldapent.next() == OKAY); + // we no longer use nsperl - any CGIs which we used to invoke via perl?perlscript + // are now invoked directly by making the perl script executable - we need to + // search for all nsexecref: perl?perlscript and replace them with + // nsexecref: perlscript + filter = NSString("(nsexecref=perl*)"); + scope = LDAP_SCOPE_SUBTREE; + baseDN = name_netscaperootDN; + + ldapent.clear(); + le = ldapent.retrieve(filter, scope, baseDN); + if (le != OKAY) + { + if (le == NOT_FOUND) { + dsLogMessage(SETUP_LOG_INFO, "Slapd", + "No old nsperl references found"); + } else { + dsLogMessage(SETUP_LOG_FATAL, "Slapd", + "ERROR: Could not find old nsperl references\n" + "URL %s user id %s DN %s (%d:%s)", + installInfo->get(SLAPD_KEY_K_LDAP_URL), + installInfo->get(SLAPD_KEY_SERVER_ADMIN_ID), + (const char *)baseDN, + le.errorCode(), le.msg()); + return le.errorCode(); + } + } else { + do + { + LdapEntry repEntry(ldapent.ldap()); + repEntry.retrieve(ldapent.entryDN()); + char *val = repEntry.getAttribute("nsexecref"); + const char *ptr = 0; + if (val && *val && (ptr = strstr(val, "perl?"))) { + ptr = strchr(ptr, '?'); + ptr++; + NSString newscript = NSString(ptr); + repEntry.setAttribute("nsexecref", newscript); + } + + le = repEntry.replace(repEntry.entryDN()); + if (le != OKAY) + { + dsLogMessage(SETUP_LOG_FATAL, "Slapd", + "ERROR: Could not fix old nsperl reference\n" + "URL %s user id %s DN %s (%d:%s)" , + installInfo->get(SLAPD_KEY_K_LDAP_URL), + installInfo->get(SLAPD_KEY_SERVER_ADMIN_ID), + (const char *)repEntry.entryDN(), + le.errorCode(), le.msg()); + return le.errorCode(); + } + } + while (ldapent.next() == OKAY); + } + return 0; } diff --git a/ldap/admin/src/create_instance.c b/ldap/admin/src/create_instance.c index 11ff03e8a..08136ccf5 100644 --- a/ldap/admin/src/create_instance.c +++ b/ldap/admin/src/create_instance.c @@ -765,9 +765,13 @@ char *gen_perl_script_auto(char *s_root, char *cs_path, char *name, FILE_PATHSEP, FILE_PATHSEP, FILE_PATHSEP, FILE_PATHSEP, FILE_PATHSEP, name); PR_snprintf(fn, sizeof(fn), "%s%c%s", cs_path, FILE_PATHSEP, name); +#ifdef USE_NSPERL PR_snprintf(myperl, sizeof(myperl), "!%s%cbin%cslapd%cadmin%cbin%cperl", s_root, FILE_PATHSEP, FILE_PATHSEP, FILE_PATHSEP, FILE_PATHSEP, FILE_PATHSEP); +#else + strcpy(myperl, "!/usr/bin/env perl"); +#endif table[0][0] = "DS-ROOT"; table[0][1] = s_root; @@ -812,9 +816,13 @@ char *gen_perl_script_auto_for_migration(char *s_root, char *cs_path, char *name FILE_PATHSEP, name); PR_snprintf(fn, sizeof(fn), "%s%cbin%cslapd%cadmin%cbin%c%s", s_root, FILE_PATHSEP, FILE_PATHSEP, FILE_PATHSEP, FILE_PATHSEP, FILE_PATHSEP, name); +#ifdef USE_NSPERL PR_snprintf(myperl, sizeof(myperl), "!%s%cbin%cslapd%cadmin%cbin%cperl", s_root, FILE_PATHSEP, FILE_PATHSEP, FILE_PATHSEP, FILE_PATHSEP, FILE_PATHSEP); +#else + strcpy(myperl, "!/usr/bin/env perl"); +#endif table[0][0] = "DS-ROOT"; table[0][1] = s_root; diff --git a/ldap/admin/src/ds_newinst.pl b/ldap/admin/src/ds_newinst.pl index 401d4db08..81dde5dd5 100644 --- a/ldap/admin/src/ds_newinst.pl +++ b/ldap/admin/src/ds_newinst.pl @@ -1,3 +1,4 @@ +#!/usr/bin/env perl # BEGIN COPYRIGHT BLOCK # This Program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software diff --git a/ldap/admin/src/ds_viewlog.pl b/ldap/admin/src/ds_viewlog.pl index 358d7d7b8..2581cdded 100644 --- a/ldap/admin/src/ds_viewlog.pl +++ b/ldap/admin/src/ds_viewlog.pl @@ -1,4 +1,4 @@ -#!perl +#!/usr/bin/env perl # # BEGIN COPYRIGHT BLOCK # This Program is free software; you can redistribute it and/or modify it under diff --git a/ldap/admin/src/getConfigInfo b/ldap/admin/src/getConfigInfo index f3e4b3561..981bcec9d 100644 --- a/ldap/admin/src/getConfigInfo +++ b/ldap/admin/src/getConfigInfo @@ -1,4 +1,4 @@ -#!perl +#!/usr/bin/env perl # # BEGIN COPYRIGHT BLOCK # This Program is free software; you can redistribute it and/or modify it under diff --git a/ldap/admin/src/import2info b/ldap/admin/src/import2info index 9c65d9149..ad225e68a 100755 --- a/ldap/admin/src/import2info +++ b/ldap/admin/src/import2info @@ -1,4 +1,4 @@ -#!perl +#!/usr/bin/env perl # # BEGIN COPYRIGHT BLOCK # This Program is free software; you can redistribute it and/or modify it under diff --git a/ldap/admin/src/makemccvlvindexes b/ldap/admin/src/makemccvlvindexes index fb63e47d3..03717b000 100644 --- a/ldap/admin/src/makemccvlvindexes +++ b/ldap/admin/src/makemccvlvindexes @@ -1,4 +1,4 @@ -#!perl +#!/usr/bin/env perl # # BEGIN COPYRIGHT BLOCK # This Program is free software; you can redistribute it and/or modify it under diff --git a/ldap/admin/src/makevlvindex b/ldap/admin/src/makevlvindex index 322ecc24d..592bd2dc4 100644 --- a/ldap/admin/src/makevlvindex +++ b/ldap/admin/src/makevlvindex @@ -1,4 +1,4 @@ -#!perl +#!/usr/bin/env perl # # BEGIN COPYRIGHT BLOCK # This Program is free software; you can redistribute it and/or modify it under diff --git a/ldap/admin/src/makevlvsearch b/ldap/admin/src/makevlvsearch index 0423767d3..45ee5a1fd 100644 --- a/ldap/admin/src/makevlvsearch +++ b/ldap/admin/src/makevlvsearch @@ -1,4 +1,4 @@ -#!perl +#!/usr/bin/env perl # # BEGIN COPYRIGHT BLOCK # This Program is free software; you can redistribute it and/or modify it under diff --git a/ldap/admin/src/migrateInstance b/ldap/admin/src/migrateInstance index 14cd5a547..95cab4b17 100644 --- a/ldap/admin/src/migrateInstance +++ b/ldap/admin/src/migrateInstance @@ -1,4 +1,4 @@ -#!perl +#!/usr/bin/env perl # # BEGIN COPYRIGHT BLOCK # This Program is free software; you can redistribute it and/or modify it under diff --git a/ldap/admin/src/migrateLocalDB b/ldap/admin/src/migrateLocalDB index 522532adc..90e9a97ff 100644 --- a/ldap/admin/src/migrateLocalDB +++ b/ldap/admin/src/migrateLocalDB @@ -1,4 +1,4 @@ -#!perl +#!/usr/bin/env perl # # BEGIN COPYRIGHT BLOCK # This Program is free software; you can redistribute it and/or modify it under diff --git a/ldap/admin/src/migratePwdFile b/ldap/admin/src/migratePwdFile index 7677012ef..5b76381a3 100644 --- a/ldap/admin/src/migratePwdFile +++ b/ldap/admin/src/migratePwdFile @@ -1,4 +1,4 @@ -#!perl +#!/usr/bin/env perl # # BEGIN COPYRIGHT BLOCK # This Program is free software; you can redistribute it and/or modify it under diff --git a/ldap/admin/src/migrateTo4 b/ldap/admin/src/migrateTo4 index 22c17c52e..ca8e3d149 100644 --- a/ldap/admin/src/migrateTo4 +++ b/ldap/admin/src/migrateTo4 @@ -1,4 +1,4 @@ -#!perl +#!/usr/bin/env perl # # BEGIN COPYRIGHT BLOCK # This Program is free software; you can redistribute it and/or modify it under diff --git a/ldap/admin/src/migratedsgw b/ldap/admin/src/migratedsgw index a15c580d7..9801788cf 100755 --- a/ldap/admin/src/migratedsgw +++ b/ldap/admin/src/migratedsgw @@ -1,4 +1,4 @@ -#!perl +#!/usr/bin/env perl # # BEGIN COPYRIGHT BLOCK # This Program is free software; you can redistribute it and/or modify it under diff --git a/ldap/admin/src/ns-newpwpolicy.pl b/ldap/admin/src/ns-newpwpolicy.pl index c91aa822c..7a1a3c5d4 100644 --- a/ldap/admin/src/ns-newpwpolicy.pl +++ b/ldap/admin/src/ns-newpwpolicy.pl @@ -1,4 +1,4 @@ -# perl script +#!/usr/bin/env perl # # BEGIN COPYRIGHT BLOCK # This Program is free software; you can redistribute it and/or modify it under @@ -39,6 +39,28 @@ # END COPYRIGHT BLOCK # +# enable the use of our bundled perldap with our bundled ldapsdk libraries +# all of this nonsense can be omitted if the mozldapsdk and perldap are +# installed in the operating system locations (e.g. /usr/lib /usr/lib/perl5) +BEGIN { + my $savedir = `pwd`; + my $dirname = `dirname $0`; + chdir $dirname; + my $sroot = `pwd`; + $sroot =~ s@/slapd-.*@@; + chomp($sroot); + chdir $savedir; + push @INC, "$sroot/lib/perl/arch", "$sroot/lib/perl"; + if ($ENV{LD_LIBRARY_PATH}) { + $ENV{LD_LIBRARY_PATH} .= ":"; + } + $ENV{LD_LIBRARY_PATH} .= "$sroot/shared/lib"; + # this is only needed for HP/ux PA-RISC, but it doesn't hurt other platforms + if ($ENV{SHLIB_PATH}) { + $ENV{SHLIB_PATH} .= ":"; + } + $ENV{SHLIB_PATH} .= "$sroot/shared/lib"; +} # Add new password policy specific entries ############################################################################# diff --git a/ldap/admin/src/scripts/template-cl-dump.pl b/ldap/admin/src/scripts/template-cl-dump.pl index 009e64164..747a306a4 100755 --- a/ldap/admin/src/scripts/template-cl-dump.pl +++ b/ldap/admin/src/scripts/template-cl-dump.pl @@ -96,6 +96,23 @@ # DSHOME/bin/slapd/admin/scripts # ################################################################################ +# enable the use of our bundled perldap with our bundled ldapsdk libraries +# all of this nonsense can be omitted if the mozldapsdk and perldap are +# installed in the operating system locations (e.g. /usr/lib /usr/lib/perl5) +BEGIN { + my $sroot = "{{DS-ROOT}}"; + push @INC, "$sroot/lib/perl/arch", "$sroot/lib/perl"; + if ($ENV{LD_LIBRARY_PATH}) { + $ENV{LD_LIBRARY_PATH} .= ":"; + } + $ENV{LD_LIBRARY_PATH} .= "$sroot/shared/lib"; + # this is only needed for HP/ux PA-RISC, but it doesn't hurt other platforms + if ($ENV{SHLIB_PATH}) { + $ENV{SHLIB_PATH} .= ":"; + } + $ENV{SHLIB_PATH} .= "$sroot/shared/lib"; +} + $usage="Usage: $0 [-h host] [-p port] [-D bind-dn] [-w bind-password | -P bind-cert] [-r replica-roots] [-o output-file] [-c] [-v]\n\n $0 -i changelog-ldif-file-with-base64encoding [-o output-file] [-c]"; use Getopt::Std; # Parse command line arguments diff --git a/ldap/admin/src/scripts/template-migrate50to51 b/ldap/admin/src/scripts/template-migrate50to51 index 518a978b0..768860e80 100644 --- a/ldap/admin/src/scripts/template-migrate50to51 +++ b/ldap/admin/src/scripts/template-migrate50to51 @@ -39,6 +39,22 @@ # END COPYRIGHT BLOCK # +# enable the use of our bundled perldap with our bundled ldapsdk libraries +# all of this nonsense can be omitted if the mozldapsdk and perldap are +# installed in the operating system locations (e.g. /usr/lib /usr/lib/perl5) +BEGIN { + my $sroot = "{{DS-ROOT}}"; + push @INC, "$sroot/lib/perl/arch", "$sroot/lib/perl"; + if ($ENV{LD_LIBRARY_PATH}) { + $ENV{LD_LIBRARY_PATH} .= ":"; + } + $ENV{LD_LIBRARY_PATH} .= "$sroot/shared/lib"; + # this is only needed for HP/ux PA-RISC, but it doesn't hurt other platforms + if ($ENV{SHLIB_PATH}) { + $ENV{SHLIB_PATH} .= ":"; + } + $ENV{SHLIB_PATH} .= "$sroot/shared/lib"; +} # Migrate a 5.0 directory server to a 5.1 directory server ####################################################################################################### diff --git a/ldap/admin/src/scripts/template-migrate5to6 b/ldap/admin/src/scripts/template-migrate5to6 index 63c5774d2..9a8b25772 100644 --- a/ldap/admin/src/scripts/template-migrate5to6 +++ b/ldap/admin/src/scripts/template-migrate5to6 @@ -39,6 +39,22 @@ # END COPYRIGHT BLOCK # +# enable the use of our bundled perldap with our bundled ldapsdk libraries +# all of this nonsense can be omitted if the mozldapsdk and perldap are +# installed in the operating system locations (e.g. /usr/lib /usr/lib/perl5) +BEGIN { + my $sroot = "{{DS-ROOT}}"; + push @INC, "$sroot/lib/perl/arch", "$sroot/lib/perl"; + if ($ENV{LD_LIBRARY_PATH}) { + $ENV{LD_LIBRARY_PATH} .= ":"; + } + $ENV{LD_LIBRARY_PATH} .= "$sroot/shared/lib"; + # this is only needed for HP/ux PA-RISC, but it doesn't hurt other platforms + if ($ENV{SHLIB_PATH}) { + $ENV{SHLIB_PATH} .= ":"; + } + $ENV{SHLIB_PATH} .= "$sroot/shared/lib"; +} # Migrate a 5.x directory server to a 6.2 directory server ####################################################################################################### diff --git a/ldap/admin/src/scripts/template-migrate5to7 b/ldap/admin/src/scripts/template-migrate5to7 index 622b22166..f2e10e2ab 100644 --- a/ldap/admin/src/scripts/template-migrate5to7 +++ b/ldap/admin/src/scripts/template-migrate5to7 @@ -39,6 +39,22 @@ # END COPYRIGHT BLOCK # +# enable the use of our bundled perldap with our bundled ldapsdk libraries +# all of this nonsense can be omitted if the mozldapsdk and perldap are +# installed in the operating system locations (e.g. /usr/lib /usr/lib/perl5) +BEGIN { + my $sroot = "{{DS-ROOT}}"; + push @INC, "$sroot/lib/perl/arch", "$sroot/lib/perl"; + if ($ENV{LD_LIBRARY_PATH}) { + $ENV{LD_LIBRARY_PATH} .= ":"; + } + $ENV{LD_LIBRARY_PATH} .= "$sroot/shared/lib"; + # this is only needed for HP/ux PA-RISC, but it doesn't hurt other platforms + if ($ENV{SHLIB_PATH}) { + $ENV{SHLIB_PATH} .= ":"; + } + $ENV{SHLIB_PATH} .= "$sroot/shared/lib"; +} # Migrate a 5.x directory server to a 7.0 directory server ####################################################################################################### diff --git a/ldap/admin/src/scripts/template-migrate6to7 b/ldap/admin/src/scripts/template-migrate6to7 index da7996103..284fac9e5 100644 --- a/ldap/admin/src/scripts/template-migrate6to7 +++ b/ldap/admin/src/scripts/template-migrate6to7 @@ -39,6 +39,22 @@ # END COPYRIGHT BLOCK # +# enable the use of our bundled perldap with our bundled ldapsdk libraries +# all of this nonsense can be omitted if the mozldapsdk and perldap are +# installed in the operating system locations (e.g. /usr/lib /usr/lib/perl5) +BEGIN { + my $sroot = "{{DS-ROOT}}"; + push @INC, "$sroot/lib/perl/arch", "$sroot/lib/perl"; + if ($ENV{LD_LIBRARY_PATH}) { + $ENV{LD_LIBRARY_PATH} .= ":"; + } + $ENV{LD_LIBRARY_PATH} .= "$sroot/shared/lib"; + # this is only needed for HP/ux PA-RISC, but it doesn't hurt other platforms + if ($ENV{SHLIB_PATH}) { + $ENV{SHLIB_PATH} .= ":"; + } + $ENV{SHLIB_PATH} .= "$sroot/shared/lib"; +} # Migrate a 6.x directory server to a 7.0 directory server ####################################################################################################### diff --git a/ldap/admin/src/scripts/template-migrateInstance5 b/ldap/admin/src/scripts/template-migrateInstance5 index f7224e8a5..7c3a7004e 100644 --- a/ldap/admin/src/scripts/template-migrateInstance5 +++ b/ldap/admin/src/scripts/template-migrateInstance5 @@ -140,16 +140,6 @@ if (!(-d $serverHome)) { exit(1); } $ENV{"$LIB_PATH"} = "$root${PATHSEP}lib${SEP}".$ENV{"$LIB_PATH"} ; -if ($isSolaris9) { - $ENV{"$LIB_PATH"} = "$root${PATHSEP}lib${PATHSEP}nsPerl5.005_03${PATHSEP}lib${PATHSEP}sun4-solaris${PATHSEP}CORE${SEP}".$ENV{"$LIB_PATH"} ; -} - -if ($isNT) { - $ENV{"PERL5LIB"} = "$root${PATHSEP}lib${PATHSEP}nsPerl5.005_03${PATHSEP}site${PATHSEP}lib${SEP}".$ENV{"PERL5LIB"} ; -} -else { - $ENV{"PERL5LIB"} = "$root${PATHSEP}lib${PATHSEP}nsPerl5.005_03${PATHSEP}lib${PATHSEP}site${SEP}".$ENV{"PERL5LIB"} ; -} # get the version of the DS to migrate ($oldVersion, $oldMinor) = &getVersion($oldDir); diff --git a/ldap/admin/src/scripts/template-migrateInstance6 b/ldap/admin/src/scripts/template-migrateInstance6 index 422e94502..fcca538ec 100644 --- a/ldap/admin/src/scripts/template-migrateInstance6 +++ b/ldap/admin/src/scripts/template-migrateInstance6 @@ -144,16 +144,6 @@ if ($olddatadir && !(-d $olddatadir)) { exit(1); } $ENV{"$LIB_PATH"} = "$root${PATHSEP}lib${SEP}".$ENV{"$LIB_PATH"} ; -if ($isSolaris9) { - $ENV{"$LIB_PATH"} = "$root${PATHSEP}lib${PATHSEP}nsPerl5.6.1${PATHSEP}lib${PATHSEP}sun4-solaris${PATHSEP}CORE${SEP}".$ENV{"$LIB_PATH"} ; -} - -if ($isNT) { - $ENV{"PERL5LIB"} = "$root${PATHSEP}lib${PATHSEP}nsPerl5.6.1${PATHSEP}site${PATHSEP}lib${SEP}".$ENV{"PERL5LIB"} ; -} -else { - $ENV{"PERL5LIB"} = "$root${PATHSEP}lib${PATHSEP}nsPerl5.6.1${PATHSEP}lib${PATHSEP}site${SEP}".$ENV{"PERL5LIB"} ; -} # get the version of the DS to migrate ($oldVersion, $oldMinor) = &getVersion($oldDir, $oldversionstr); diff --git a/ldap/admin/src/scripts/template-migrateInstance7 b/ldap/admin/src/scripts/template-migrateInstance7 index 2493a82e4..acc0e987b 100644 --- a/ldap/admin/src/scripts/template-migrateInstance7 +++ b/ldap/admin/src/scripts/template-migrateInstance7 @@ -144,16 +144,6 @@ if ($olddatadir && !(-d $olddatadir)) { exit(1); } $ENV{"$LIB_PATH"} = "$root${PATHSEP}lib${SEP}".$ENV{"$LIB_PATH"} ; -if ($isSolaris9) { - $ENV{"$LIB_PATH"} = "$root${PATHSEP}lib${PATHSEP}nsPerl5.6.1${PATHSEP}lib${PATHSEP}sun4-solaris${PATHSEP}CORE${SEP}".$ENV{"$LIB_PATH"} ; -} - -if ($isNT) { - $ENV{"PERL5LIB"} = "$root${PATHSEP}lib${PATHSEP}nsPerl5.6.1${PATHSEP}site${PATHSEP}lib${SEP}".$ENV{"PERL5LIB"} ; -} -else { - $ENV{"PERL5LIB"} = "$root${PATHSEP}lib${PATHSEP}nsPerl5.6.1${PATHSEP}lib${PATHSEP}site${SEP}".$ENV{"PERL5LIB"} ; -} # get the version of the DS to migrate ($oldVersion, $oldMinor) = &getVersion($oldDir, $oldversionstr); diff --git a/ldap/admin/src/scripts/template-migrateTo5 b/ldap/admin/src/scripts/template-migrateTo5 index d98495259..320236ad4 100755 --- a/ldap/admin/src/scripts/template-migrateTo5 +++ b/ldap/admin/src/scripts/template-migrateTo5 @@ -39,6 +39,22 @@ # END COPYRIGHT BLOCK # +# enable the use of our bundled perldap with our bundled ldapsdk libraries +# all of this nonsense can be omitted if the mozldapsdk and perldap are +# installed in the operating system locations (e.g. /usr/lib /usr/lib/perl5) +BEGIN { + my $sroot = "{{DS-ROOT}}"; + push @INC, "$sroot/lib/perl/arch", "$sroot/lib/perl"; + if ($ENV{LD_LIBRARY_PATH}) { + $ENV{LD_LIBRARY_PATH} .= ":"; + } + $ENV{LD_LIBRARY_PATH} .= "$sroot/shared/lib"; + # this is only needed for HP/ux PA-RISC, but it doesn't hurt other platforms + if ($ENV{SHLIB_PATH}) { + $ENV{SHLIB_PATH} .= ":"; + } + $ENV{SHLIB_PATH} .= "$sroot/shared/lib"; +} # Migrate a 4.0 directory server to a 5.x directory server ######################################################################################################## diff --git a/ldap/admin/src/scripts/template-migrateTo6 b/ldap/admin/src/scripts/template-migrateTo6 index 2f1c8c950..eeeb9e693 100644 --- a/ldap/admin/src/scripts/template-migrateTo6 +++ b/ldap/admin/src/scripts/template-migrateTo6 @@ -39,6 +39,22 @@ # END COPYRIGHT BLOCK # +# enable the use of our bundled perldap with our bundled ldapsdk libraries +# all of this nonsense can be omitted if the mozldapsdk and perldap are +# installed in the operating system locations (e.g. /usr/lib /usr/lib/perl5) +BEGIN { + my $sroot = "{{DS-ROOT}}"; + push @INC, "$sroot/lib/perl/arch", "$sroot/lib/perl"; + if ($ENV{LD_LIBRARY_PATH}) { + $ENV{LD_LIBRARY_PATH} .= ":"; + } + $ENV{LD_LIBRARY_PATH} .= "$sroot/shared/lib"; + # this is only needed for HP/ux PA-RISC, but it doesn't hurt other platforms + if ($ENV{SHLIB_PATH}) { + $ENV{SHLIB_PATH} .= ":"; + } + $ENV{SHLIB_PATH} .= "$sroot/shared/lib"; +} # Migrate a old directory server to a 6.2 directory server ######################################################################################################## diff --git a/ldap/admin/src/scripts/template-migrateTo7 b/ldap/admin/src/scripts/template-migrateTo7 index 4ee514e8f..4a7b2c378 100644 --- a/ldap/admin/src/scripts/template-migrateTo7 +++ b/ldap/admin/src/scripts/template-migrateTo7 @@ -39,6 +39,22 @@ # END COPYRIGHT BLOCK # +# enable the use of our bundled perldap with our bundled ldapsdk libraries +# all of this nonsense can be omitted if the mozldapsdk and perldap are +# installed in the operating system locations (e.g. /usr/lib /usr/lib/perl5) +BEGIN { + my $sroot = "{{DS-ROOT}}"; + push @INC, "$sroot/lib/perl/arch", "$sroot/lib/perl"; + if ($ENV{LD_LIBRARY_PATH}) { + $ENV{LD_LIBRARY_PATH} .= ":"; + } + $ENV{LD_LIBRARY_PATH} .= "$sroot/shared/lib"; + # this is only needed for HP/ux PA-RISC, but it doesn't hurt other platforms + if ($ENV{SHLIB_PATH}) { + $ENV{SHLIB_PATH} .= ":"; + } + $ENV{SHLIB_PATH} .= "$sroot/shared/lib"; +} # Migrate a old directory server to a 7.0 directory server ######################################################################################################## diff --git a/ldap/admin/src/scripts/template-ns-newpwpolicy.pl b/ldap/admin/src/scripts/template-ns-newpwpolicy.pl index 55388f337..a15e595d7 100755 --- a/ldap/admin/src/scripts/template-ns-newpwpolicy.pl +++ b/ldap/admin/src/scripts/template-ns-newpwpolicy.pl @@ -39,6 +39,22 @@ # END COPYRIGHT BLOCK # +# enable the use of our bundled perldap with our bundled ldapsdk libraries +# all of this nonsense can be omitted if the mozldapsdk and perldap are +# installed in the operating system locations (e.g. /usr/lib /usr/lib/perl5) +BEGIN { + my $sroot = "{{DS-ROOT}}"; + push @INC, "$sroot/lib/perl/arch", "$sroot/lib/perl"; + if ($ENV{LD_LIBRARY_PATH}) { + $ENV{LD_LIBRARY_PATH} .= ":"; + } + $ENV{LD_LIBRARY_PATH} .= "$sroot/shared/lib"; + # this is only needed for HP/ux PA-RISC, but it doesn't hurt other platforms + if ($ENV{SHLIB_PATH}) { + $ENV{SHLIB_PATH} .= ":"; + } + $ENV{SHLIB_PATH} .= "$sroot/shared/lib"; +} # Add new password policy specific entries ############################################################################# diff --git a/ldap/admin/src/scripts/template-repl-monitor-cgi.pl b/ldap/admin/src/scripts/template-repl-monitor-cgi.pl index ad88c1e61..c9cbd92f6 100755 --- a/ldap/admin/src/scripts/template-repl-monitor-cgi.pl +++ b/ldap/admin/src/scripts/template-repl-monitor-cgi.pl @@ -56,8 +56,8 @@ if ($cgiVars{'admurl'}) { $params .= " -u \"$admurl\""; } $siteroot = $cgiVars{'siteroot'}; -$perl = "$siteroot/bin/slapd/admin/bin/perl"; -$ENV{'LD_LIBRARY_PATH'} = "$siteroot/lib:$siteroot/lib/nsPerl5.005_03/lib"; +$ENV{'LD_LIBRARY_PATH'} = "$siteroot/shared/lib"; +$ENV{'SHLIB_PATH'} = "$siteroot/shared/lib"; # Save user-specified parameters as cookies in monreplication.properties. # Sync up with the property file so that monreplication2 is interval, and @@ -65,8 +65,8 @@ $ENV{'LD_LIBRARY_PATH'} = "$siteroot/lib:$siteroot/lib/nsPerl5.005_03/lib"; $propertyfile = "$siteroot/bin/admin/admin/bin/property/monreplication.properties"; $edit1 = "s#monreplication2=.*#monreplication2=$cgiVars{'refreshinterval'}#;"; $edit2 = "s#^monreplication3=.*#monreplication3=$cgiVars{'configfile'}#;"; -system("$perl -p -i.bak -e \"$edit1\" -e \"$edit2\" $propertyfile"); +system("perl -p -i.bak -e \"$edit1\" -e \"$edit2\" $propertyfile"); # Now the real work $replmon = "$siteroot/bin/slapd/admin/scripts/template-repl-monitor.pl"; -system("$perl $replmon $params"); +system("perl -I$siteroot/lib/perl/arch -I$siteroot/lib/perl $replmon $params"); diff --git a/ldap/admin/src/scripts/template-repl-monitor.pl b/ldap/admin/src/scripts/template-repl-monitor.pl index 918c92712..60a7b7bdf 100755 --- a/ldap/admin/src/scripts/template-repl-monitor.pl +++ b/ldap/admin/src/scripts/template-repl-monitor.pl @@ -146,16 +146,36 @@ # <DSHOME>/bin/slapd/admin/bin/perl repl-monitor.pl # # 3. Set environment variable PERL5LIB to your Perl lib dirs where -# Mozilla::LDAP module can be located. +# Mozilla::LDAP module can be located. This should be under serverroot/lib/perl +# e.g. PERL5LIB="serverroot/lib/perl/arch:serverroot/lib/perl" # -# 4. Invoke the script as follows if <MYPERLDIR>/lib/site contains -# Mozilla/LDAP: -# <MYPERLDIR>/bin/perl -I <MYPERLDIR>/lib/site repl-monitor.pl +# 4. Set LD_LIBRARY_PATH (or SHLIB_PATH) to point to the location of our +# bundled shared libraries e.g. LD_LIBRARY_PATH="serverroot/shared/lib" # -# If you get error "Can't load ...", try to set environment variable -# for library path to <DSHOME>/lib:<DSHOME>/lib/nsPerl5.005_03/lib +# 5. Invoke the script as follows if <MYPERLDIR> (serverroot/lib/perl) contains +# Mozilla/LDAP: +# <MYPERLDIR>/bin/perl -I <MYPERLDIR>/arch -I <MYPERLDIR> repl-monitor.pl # ############################################################################# +# enable the use of our bundled perldap with our bundled ldapsdk libraries +# all of this nonsense can be omitted if the mozldapsdk and perldap are +# installed in the operating system locations (e.g. /usr/lib /usr/lib/perl5) +# this script is always invoked by repl-monitor-cgi.pl, which sets all of these +# If using this script standalone, be sure to set the shared lib path and +# the path to the perldap modules. +# BEGIN { +# my $sroot = "{{DS-ROOT}}"; +# push @INC, "$sroot/lib/perl/arch", "$sroot/lib/perl"; +# if ($ENV{LD_LIBRARY_PATH}) { +# $ENV{LD_LIBRARY_PATH} .= ":"; +# } +# $ENV{LD_LIBRARY_PATH} .= "$sroot/shared/lib"; +# # this is only needed for HP/ux PA-RISC, but it doesn't hurt other platforms +# if ($ENV{SHLIB_PATH}) { +# $ENV{SHLIB_PATH} .= ":"; +# } +# $ENV{SHLIB_PATH} .= "$sroot/shared/lib"; +# } $usage = "\nusage: $0 -f configuration-file [-h host] [-p port] [-r] [-u refresh-url] [-t refresh-interval]\n\nor : $0 -v\n"; use Getopt::Std; # parse command line arguments diff --git a/ldap/admin/src/scripts/template-verify-db.pl b/ldap/admin/src/scripts/template-verify-db.pl index 4004f5ae3..a95ff21ed 100644 --- a/ldap/admin/src/scripts/template-verify-db.pl +++ b/ldap/admin/src/scripts/template-verify-db.pl @@ -95,6 +95,13 @@ sub getLastLogfile return \$logfile; } +$isWin = -d '\\'; +if ($isWin) { + $NULL = "nul"; +} else { + $NULL = "/dev/null"; +} + print("*****************************************************************\n"); print("verify-db: This tool should only be run if recovery start fails\n" . "and the server is down. If you run this tool while the server is\n" . @@ -109,7 +116,7 @@ for (my $i = 0; $i < @$dbdirs; $i++) { # run ../bin/slapd/server/db_printlog -h <dbdir> for each <dbdir> print "Verify log files in $$dbdirs[$i] ... "; - open(PRINTLOG, "..{{SEP}}bin{{SEP}}slapd{{SEP}}server{{SEP}}db_printlog -h $$dbdirs[$i] 2>&1 1> nul |"); + open(PRINTLOG, "..{{SEP}}bin{{SEP}}slapd{{SEP}}server{{SEP}}db_printlog -h $$dbdirs[$i] 2>&1 1> $NULL |"); sleep 1; my $haserr = 0; while ($l = <PRINTLOG>) @@ -147,7 +154,7 @@ for (my $i = 0; $i < @$dbdirs; $i++) { my $thisdb = $$dbdirs[$i] . "{{SEP}}" . $db; print "Verify $thisdb ... "; - open(DBVERIFY, "..{{SEP}}bin{{SEP}}slapd{{SEP}}server{{SEP}}db_verify $thisdb 2>&1 1> nul |"); + open(DBVERIFY, "..{{SEP}}bin{{SEP}}slapd{{SEP}}server{{SEP}}db_verify $thisdb 2>&1 1> $NULL |"); sleep 1; my $haserr = 0; while ($l = <DBVERIFY>) @@ -188,7 +195,7 @@ for (my $i = 0; $i < @$dbdirs; $i++) { my $thisdb = $$instdirs[$j] . "{{SEP}}" . $db; print "Verify $thisdb ... "; - open(DBVERIFY, "..{{SEP}}bin{{SEP}}slapd{{SEP}}server{{SEP}}db_verify $thisdb 2>&1 1> null |"); + open(DBVERIFY, "..{{SEP}}bin{{SEP}}slapd{{SEP}}server{{SEP}}db_verify $thisdb 2>&1 1> $NULL |"); sleep 1; my $haserr = 0; while ($l = <DBVERIFY>) diff --git a/ldap/admin/src/updatedsgw b/ldap/admin/src/updatedsgw index a0318d320..e7b055ba8 100755 --- a/ldap/admin/src/updatedsgw +++ b/ldap/admin/src/updatedsgw @@ -1,4 +1,4 @@ -#!perl +#!/usr/bin/env perl # # BEGIN COPYRIGHT BLOCK # This Program is free software; you can redistribute it and/or modify it under diff --git a/ldap/admin/src/upgradeServer b/ldap/admin/src/upgradeServer index 7a462189d..afc26606b 100755 --- a/ldap/admin/src/upgradeServer +++ b/ldap/admin/src/upgradeServer @@ -1,4 +1,4 @@ -#!perl +#!/usr/bin/env perl # # BEGIN COPYRIGHT BLOCK # This Program is free software; you can redistribute it and/or modify it under @@ -266,7 +266,7 @@ sub get_changelog_dir { last; # not found, just abort } if ($inClEntry && /^nsslapd-changelogdir:\s*/i) { - $clDir = $'; + $clDir = $'; #' chomp($clDir); last; } @@ -407,6 +407,48 @@ sub getChangelogVersion { return $version; } +sub instantiate_new_package_scripts { + + my @newtemplates = ( + "$sroot/bin/slapd/admin/scripts/template-cl-dump.pl", + "$sroot/bin/slapd/admin/scripts/template-migrate5to7", + "$sroot/bin/slapd/admin/scripts/template-migrate6to7", + "$sroot/bin/slapd/admin/scripts/template-migrateInstance7", + "$sroot/bin/slapd/admin/scripts/template-migrateTo4", + "$sroot/bin/slapd/admin/scripts/template-migrateTo7", + "$sroot/bin/slapd/admin/scripts/template-repl-monitor-cgi.pl", + ); + + foreach $src ( @newtemplates ) { + $dest = "$sroot/bin/slapd/admin/bin/$1" if $src =~ /.*template-(.*)$/; + # repl mon cgi script goes in bin/admin/admin/bin + if ($src =~ /repl-monitor-cgi.pl/) { + $dest =~ s@/bin/slapd/admin/bin/@/bin/admin/admin/bin/@; + } + unless ( open ( template, $src )) { + print "Can't open $src: $!\n"; + next; + } + unless ( open ( target, ">$dest" )) { + print "Can't open $dest: $!\n"; + close template; + next; + } + while ( <template> ) { + s#{{PERL-EXEC}}#!/usr/bin/env perl#g; + s#{{DS-ROOT}}#$sroot#g; + s#{{SEP}}#${PS}#g; + print target; + } + close template; + close target; + + chmod 0755, $dest; + } + + return 0; +} + # # Some scripts generated by create_instance may not # get generated during in-place upgrade. This function @@ -418,8 +460,18 @@ sub getChangelogVersion { # sub instantiate_new_scripts { - @newtemplates = ( - "$sroot/bin/slapd/admin/scripts/template-db2index.pl" + my @newtemplates = ( + "$sroot/bin/slapd/admin/scripts/template-verify-db.pl", + "$sroot/bin/slapd/admin/scripts/template-db2index.pl", + "$sroot/bin/slapd/admin/scripts/template-bak2db.pl", + "$sroot/bin/slapd/admin/scripts/template-db2bak.pl", + "$sroot/bin/slapd/admin/scripts/template-db2ldif.pl", + "$sroot/bin/slapd/admin/scripts/template-dsml-activate.pl", + "$sroot/bin/slapd/admin/scripts/template-ldif2db.pl", + "$sroot/bin/slapd/admin/scripts/template-ns-accountstatus.pl", + "$sroot/bin/slapd/admin/scripts/template-ns-activate.pl", + "$sroot/bin/slapd/admin/scripts/template-ns-inactivate.pl", + "$sroot/bin/slapd/admin/scripts/template-ns-newpwpolicy.pl" ); $host = localhost; @@ -444,7 +496,7 @@ sub instantiate_new_scripts { next; } while ( <template> ) { - s#{{PERL-EXEC}}#!$sroot/bin/slapd/admin/bin/perl#g; + s#{{PERL-EXEC}}#!/usr/bin/env perl#g; s#{{DS-ROOT}}#$sroot#g; s#{{SEP}}#${PS}#g; s#{{ROOT-DN}}#$rootdn#g; @@ -460,20 +512,28 @@ sub instantiate_new_scripts { } # copy schema is safe even if same version -copy_schema_files; +copy_schema_files if ($prefix); # modify only if necessary -modify_dse_ldif; +modify_dse_ldif if ($prefix); # fix changelog is safe even if same version - no op -my $clDir = get_changelog_dir; -if ($clDir && -d $clDir) { - my $oldclversion = getChangelogVersion($clDir); - my $clversion = "2.0"; # with DS 6.1 - - if ($oldclversion < $clversion) { - fix_changelog($clDir, $clversion); - } +if ($prefix) { + my $clDir = get_changelog_dir; + if ($clDir && -d $clDir) { + my $oldclversion = getChangelogVersion($clDir); + my $clversion = "2.0"; # with DS 6.1 + + if ($oldclversion < $clversion) { + fix_changelog($clDir, $clversion); + } + } } -instantiate_new_scripts (); +# fix instance specific scripts +if ($prefix) { + instantiate_new_scripts (); +} else { +# fix non instance specific (package) scripts + instantiate_new_package_scripts (); +} diff --git a/ldap/clients/orgchart/myorg.pl b/ldap/clients/orgchart/myorg.pl index 6a0561f2b..e00fd5ba0 100755 --- a/ldap/clients/orgchart/myorg.pl +++ b/ldap/clients/orgchart/myorg.pl @@ -1,4 +1,4 @@ -#!../../../bin/slapd/admin/bin/perl +#!/usr/bin/env perl # # BEGIN COPYRIGHT BLOCK # This Program is free software; you can redistribute it and/or modify it under diff --git a/ldap/clients/orgchart/org.pl b/ldap/clients/orgchart/org.pl index 44e649bd5..af1928e13 100755 --- a/ldap/clients/orgchart/org.pl +++ b/ldap/clients/orgchart/org.pl @@ -1,4 +1,4 @@ -#!../../../bin/slapd/admin/bin/perl +#!/usr/bin/env perl # # BEGIN COPYRIGHT BLOCK # This Program is free software; you can redistribute it and/or modify it under @@ -41,6 +41,29 @@ # #set ts=4 +# enable the use of our bundled perldap with our bundled ldapsdk libraries +# all of this nonsense can be omitted if the mozldapsdk and perldap are +# installed in the operating system locations (e.g. /usr/lib /usr/lib/perl5) +BEGIN { + my $savedir = `pwd`; + my $dirname = `dirname $0`; + chdir $dirname; + my $sroot = `pwd`; + $sroot =~ s@/clients/orgchart/bin*@@; + chomp($sroot); + chdir $savedir; + push @INC, "$sroot/lib/perl/arch", "$sroot/lib/perl"; + if ($ENV{LD_LIBRARY_PATH}) { + $ENV{LD_LIBRARY_PATH} .= ":"; + } + $ENV{LD_LIBRARY_PATH} .= "$sroot/shared/lib"; + # this is only needed for HP/ux PA-RISC, but it doesn't hurt other platforms + if ($ENV{SHLIB_PATH}) { + $ENV{SHLIB_PATH} .= ":"; + } + $ENV{SHLIB_PATH} .= "$sroot/shared/lib"; +} + # ------------ # # Notes for anybody reading the code below: diff --git a/ldap/cm/Makefile b/ldap/cm/Makefile index 70bb9f55d..6764e876c 100644 --- a/ldap/cm/Makefile +++ b/ldap/cm/Makefile @@ -185,6 +185,10 @@ DOTEXE = .exe PACKAGE_STAGE_DIR=$(OBJDIR)/package endif +ifdef INTERNAL_BUILD + PERLDAP := perldap +endif + # these are files and directories in the import adminsrv directory which we don't # make a local copy of, we just import directly into the tar file or create a # symlink to @@ -565,29 +569,6 @@ ifdef INTERNAL_BUILD $(PERL) -w fixPerlDAPInf.pl $(dir $@)/perldap.inf endif -# this is the rule to pull nsPerl -ifndef NSPERL_PULL_METHOD -NSPERL_PULL_METHOD = FTP -endif - -$(INSTDIR)/nsperl/$(NSPERL_ZIP_FILE): -ifdef INTERNAL_BUILD - $(RM) $@ - $(FTP_PULL) -method $(NSPERL_PULL_METHOD) \ - -objdir $(dir $@) \ - -componentdir $(NSPERL_COMPONENT_DIR) \ - -files $(notdir $@),nsperl.inf - @if [ ! -f $@ ] ; \ - then echo "Error: could not get component NSPERL file $@" ; \ - exit 1 ; \ - fi - $(PERL) -w fixNSPerlInf.pl $(dir $@)/nsperl.inf nsperl561 -# one more hack to nsperl - we must remove LDIF.pm because it -# conflicts with the one in perldap - bug 600138 -# SITEHACK is defined in nsperl.mk -# $(ZIP) -d $(dir $@)/$(NSPERL_ZIP_FILE) lib/nsPerl5.6.1/$(SITEHACK)/Mozilla/LDAP/LDIF.pm -endif - $(INSTDIR)/slapd: $(MKDIR) -p $@ @@ -597,7 +578,6 @@ ifneq ($(ARCH), WINNT) # ---THE UNIX PACKAGE--- packageDirectory: $(INSTDIR)/slapd \ - $(INSTDIR)/nsperl/$(NSPERL_ZIP_FILE) \ $(INSTDIR)/perldap/$(PERLDAP_ZIP_FILE) \ $(ADMSERV_DEP) @@ -712,15 +692,15 @@ ifndef NO_INSTALLER_TAR_FILES # build the combined packages tar file; use h flag to follow symlinks ifdef BUILD_SHIP ifndef BUILD_PATCH - cd $(INSTDIR); $(TAR) cvfh - setup.inf setup slapd nsperl \ - perldap dsktune $(ADMIN_IMPORTS) | gzip -f > $(BUILD_SHIP)/$(FTPNAMEGZ) + cd $(INSTDIR); $(TAR) cvfh - setup.inf setup slapd \ + $(PERLDAP) dsktune $(ADMIN_IMPORTS) | gzip -f > $(BUILD_SHIP)/$(FTPNAMEGZ) endif ifeq ($(DEBUG), optimize) # $(REMSH) "/u/svbld/bin/preRtm $(BUILD_SHIP) $(FTPNAMEGZ) svbld" endif else - cd $(INSTDIR); $(TAR) cvfh - setup.inf setup slapd nsperl \ - perldap dsktune $(ADMIN_IMPORTS) | gzip -f > ../all$(NS_BUILD_FLAVOR).tar.gz + cd $(INSTDIR); $(TAR) cvfh - setup.inf setup slapd \ + $(PERLDAP) dsktune $(ADMIN_IMPORTS) | gzip -f > ../all$(NS_BUILD_FLAVOR).tar.gz endif # BUILD_SHIP #cp $(INSTDIR).tar.gz $(BUILD_SHIP) #cp $(INSTDIR)/all$(NS_BUILD_FLAVOR).tar.gz $(BUILD_SHIP) @@ -891,8 +871,7 @@ ifeq ($(ARCH), WINNT) SLAPD_DIR=slapd -_perl: $(INSTDIR)/nsperl/$(NSPERL_ZIP_FILE) \ - $(INSTDIR)/perldap/$(PERLDAP_ZIP_FILE) +_perl: $(INSTDIR)/perldap/$(PERLDAP_ZIP_FILE) # ------------------------- all below this line is packageDirectory -------------------------- diff --git a/ldap/cm/fixSetupInf.pl b/ldap/cm/fixSetupInf.pl index 50ee87f83..ca9b990bc 100644 --- a/ldap/cm/fixSetupInf.pl +++ b/ldap/cm/fixSetupInf.pl @@ -65,10 +65,6 @@ while ( <FILE> ) { $_ .= ", slapd"; $addedSlapd = 1; } - if (! /nsperl/) { - $_ .= ", nsperl"; - $addedNSperl = 1; - } if (! /perldap/) { $_ .= ", perldap"; $addedPerLDAP = 1; @@ -99,11 +95,6 @@ if ($addedSlapd) { print OUT "ComponentInfoFile = slapd/slapd.inf\n"; } -if ($addedNSperl) { - print OUT "\n[nsperl]\n"; - print OUT "ComponentInfoFile = nsperl/nsperl.inf\n"; -} - if ($addedPerLDAP) { print OUT "\n[perldap]\n"; print OUT "ComponentInfoFile = perldap/perldap.inf\n"; diff --git a/ldap/cm/newinst/ns-update b/ldap/cm/newinst/ns-update index 24676d6a1..ba283995b 100755 --- a/ldap/cm/newinst/ns-update +++ b/ldap/cm/newinst/ns-update @@ -44,7 +44,7 @@ # dependencies sroot=`echo $0 | sed s#/bin/slapd/admin/bin/.\*##g` -PERL=$sroot/bin/slapd/admin/bin/perl +PERL=perl # just use perl from PATH start_server() { @@ -60,18 +60,6 @@ start_server() cd $cwd } -install_nsperl() -{ - # the current version of nsPerl to use is defined in the slapd.inf - nsperlinst=`grep '^NSPerlPostInstall' setup/slapd/slapd.inf | cut -f2 -d= 2> /dev/null` - if [ "$nsperlinst" ]; then - # run the nsperl installer - $nsperlinst > setup/nsperl/install.log - # use nsperl as our local copy of perl - cp `dirname $nsperlinst`/nsperl $PERL - fi -} - wrap_security_tools() { cwd=`pwd` @@ -148,8 +136,6 @@ if [ "$iDSISolaris" = "" ]; then fi done fi - - install_nsperl fi if [ $reconfig ] ; then @@ -171,6 +157,8 @@ if [ $reconfig ] ; then start_server $sroot $dir echo "" done + # fix any non-instance specific files - omit server instance argument + $PERL $sroot/bin/slapd/admin/bin/upgradeServer $sroot fi wrap_security_tools $sroot diff --git a/ldap/cm/newinst/setup.sh b/ldap/cm/newinst/setup.sh index b5c9b6a3d..640d416b6 100755 --- a/ldap/cm/newinst/setup.sh +++ b/ldap/cm/newinst/setup.sh @@ -39,11 +39,5 @@ setupdir=`dirname $0` cd ${setupdir} -# Configure nsPerl -if [ ! -f "./tools/perl" ]; then - ./tools/nsPerl5.6.1/install > /dev/null - ln -s ./nsPerl5.6.1/nsperl ./tools/perl -fi - # Kick off setup script ./setup.pl $* diff --git a/ldap/cm/newinst/slapd.inf b/ldap/cm/newinst/slapd.inf index c3c826e3a..863c8ab7b 100644 --- a/ldap/cm/newinst/slapd.inf +++ b/ldap/cm/newinst/slapd.inf @@ -52,7 +52,7 @@ Expires= %%%PUMPKIN_HOUR%%% Security= %%%SECURITY%%% Vendor= Fedora Project Description= %%%SERVER_NAME%%% -Dependencies= base/4.5, svrcore/4.5, nsperl561/1.13, perldap14/1.01 +Dependencies= base/4.5, svrcore/4.5 ProductName=Directory Server IsDirLite=%%%IS_DIR_LITE%%% SourcePath=slapd @@ -65,7 +65,6 @@ PostUninstall= Checked=True Mandatory=False IsLdap=True -NSPerlPostInstall=lib/nsPerl5.6.1/install [slapd-client] Name= Fedora Directory Server Console diff --git a/ldap/cm/newinstnt/slapd.inf b/ldap/cm/newinstnt/slapd.inf index b3df80aee..fc2917a3c 100644 --- a/ldap/cm/newinstnt/slapd.inf +++ b/ldap/cm/newinstnt/slapd.inf @@ -55,7 +55,7 @@ Security= %%%SECURITY%%% Vendor=Fedora Project Description=Fedora Directory Server DefaultAcceptLanguage=en -Dependencies=admin/4.5,nsperl561/1.10,perldap14/1.01 +Dependencies=admin/4.5 Revision= Checked=TRUE IsLdap=TRUE @@ -75,7 +75,6 @@ ReadGlobalCache=DSINST_ReadGlobalCache PostInstall=DSINST_PostInstall PreUninstall=DSINST_PreUnInstall PostUninstall=DSINST_PostUnInstall -NSPerlPostInstall=lib\nsPerl5.6.1\install.bat [slapd-client] NickName=slapd-client diff --git a/ldap/ldif/commonTasks.ldif b/ldap/ldif/commonTasks.ldif index 94ef71d77..d0027e1a8 100644 --- a/ldap/ldif/commonTasks.ldif +++ b/ldap/ldif/commonTasks.ldif @@ -56,7 +56,7 @@ dn: cn=Migrate, cn=Operation, cn=Tasks objectclass: top objectclass: nstask objectclass: nsAdminObject -nsexecref: perl?migrateInstance +nsexecref: migrateInstance dn: cn=Create, cn=Operation, cn=Tasks objectclass: top @@ -68,10 +68,10 @@ dn: cn=GetConfigInfo, cn=Operation, cn=Tasks objectclass: top objectclass: nstask objectclass: nsAdminObject -nsexecref: perl?getConfigInfo +nsexecref: getConfigInfo dn: cn=MigrateLocalDB, cn=Operation, cn=Tasks objectclass: top objectclass: nstask objectclass: nsAdminObject -nsexecref: perl?migrateLocalDB +nsexecref: migrateLocalDB diff --git a/ldap/ldif/tasks.ldif b/ldap/ldif/tasks.ldif index 7bd017918..5c3d87402 100644 --- a/ldap/ldif/tasks.ldif +++ b/ldap/ldif/tasks.ldif @@ -133,7 +133,7 @@ dn: cn=ViewLog, cn=operation, cn=Tasks objectclass: top objectclass: nstask objectclass: nsAdminObject -nsexecref: perl?ds_viewlog.pl +nsexecref: ds_viewlog.pl dn: cn=ListBackups, cn=operation, cn=Tasks objectclass: top diff --git a/nsperl.mk b/nsperl.mk index adbbf3e81..b7b060014 100755 --- a/nsperl.mk +++ b/nsperl.mk @@ -39,8 +39,6 @@ # NSPERL_RELDATE := 20020626 -NSPERL_VERSION := nsPerl5.6.1 -NSPERL_COMPONENT_DIR = $(COMPONENTS_DIR)/nsPerl/$(NSPERL_RELDATE)/$(NSOBJDIR_NAME_32) # default; will be redefined below for specific platform #PERL=$(NSPERL_COMPONENT_DIR)/lib/$(NSPERL_VERSION)/nsperl PERL=/share/builds/sbstools/nsPerl/$(NSPERL_RELDATE)/$(NSOBJDIR_NAME_32)/nsperl @@ -55,66 +53,3 @@ endif ifdef USE_PERL_FROM_PATH PERL = $(shell perl -e 'print "$$\n"') endif - -NSPERL_ZIP_FILE = nsperl561.zip - -# This makefile sets up the environment so that we can build and link -# perl xsubs. It assumes that you have a perl base directory that has -# a bin and lib subdir and that which perl yields base dir/bin/perl[.exe] -# also, this is only really necessary for NT, since this usually just -# works in the NFS world of unix -# for unix, we derive the paths from the Config information -ifdef USE_OLD_NTPERL -PERL_EXE = $(shell $(PERL) -e '($$foo = $$) =~ s@\\@/@g ; print "$$foo\n"') -PERL_EXENT = $(subst \,/,$(PERL_EXE)) -PERL_BASEDIR = $(dir $(PERL_EXENT)) -PERL_ROOT = $(subst /bin/,,$(PERL_BASEDIR)) -IS_ACTIVESTATE = $(shell $(PERL) -v | grep -i activestate) -else -PERL_CONFIG = $(shell $(PERL) -e 'use Config; foreach $$item (qw(installprivlib installarchlib installsitelib installsitearch prefixexp)) { ($$foo = $$Config{$$item}) =~ s@\\@/@g ; print "$$foo "; } print "\\\n"') -PERL_LIB = $(word 1, $(PERL_CONFIG)) -PERL_ARCHLIB = $(word 2, $(PERL_CONFIG)) -SITELIB = $(word 3, $(PERL_CONFIG)) -SITEARCH = $(word 4, $(PERL_CONFIG)) -PERL_ROOT = $(word 5, $(PERL_CONFIG)) -endif - -ifdef USE_OLD_NTPERL -PERL_LIB = $(PERL_ROOT)/lib -PERL_ARCHLIB = $(PERL_LIB) -PERL_SITE = site -SITELIB = $(PERL_ROOT)/$(PERL_SITE)/lib -SITEARCH = $(SITELIB) -endif - -INSTALLSITEARCH = $(SITEARCH) -INSTALLSITELIB = $(SITELIB) -SITEARCHEXP = $(SITEARCH) -SITELIBEXP = $(SITELIB) -XSUBPPDIR = $(PERL_LIB)/ExtUtils -XSUBPP = $(XSUBPPDIR)/xsubpp -XSPROTOARG = -XSUBPPDEPS = $(XSUBPPDIR)/typemap -XSUBPPARGS = -typemap $(XSUBPPDIR)/typemap -PERL_INC = -I$(PERL_ARCHLIB)/CORE - -SITEHACK = $(subst $(PERL_ROOT)/,,$(SITELIB)) -ARCHHACK = $(subst $(PERL_ROOT)/,,$(PERL_ARCHLIB)) - -ifeq ($(ARCH), WINNT) -ifdef IS_ACTIVESTATE -# C compilation/linking does not work for activestate; force C++ -PERL_CFLAGS = -TP -D_CONSOLE -DNO_STRICT -DPERL_OBJECT -ifeq ($(DEBUG), full) -PERL_CFLAGS += -DNDEBUG -endif -LIBPERL_A = /LIBPATH:$(PERL_ARCHLIB)/CORE perlCAPI.lib perlcore.lib PerlCRT.lib -else -LIBPERL_A = /LIBPATH:$(PERL_ARCHLIB)/CORE perl56.lib -endif -else -ifeq ($(DEBUG), full) -PERL_CFLAGS = -UDEBUG -endif -LIBPERL_A = -L$(PERL_ARCHLIB)/CORE -lperl -endif
0
c3f6ff604cf0e0458a50889f15c50e59297e1ddf
389ds/389-ds-base
513172 Simple Paged Results does not respect nsslapd-sizelimit SPR returns one page in one operation. Let the search_result_set keep the current sizelimit and make the sizelimit work beyond operations.
commit c3f6ff604cf0e0458a50889f15c50e59297e1ddf Author: Noriko Hosoi <[email protected]> Date: Wed Jul 22 16:03:02 2009 -0700 513172 Simple Paged Results does not respect nsslapd-sizelimit SPR returns one page in one operation. Let the search_result_set keep the current sizelimit and make the sizelimit work beyond operations. diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h index 3036bf7c2..733bfe38a 100644 --- a/ldap/servers/slapd/back-ldbm/back-ldbm.h +++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h @@ -715,7 +715,8 @@ typedef struct _back_search_result_set int sr_lookthroughlimit; /* how many can we examine? */ int sr_virtuallistview; /* is this a VLV Search */ Slapi_Entry* sr_vlventry; /* a special VLV Entry for when the ACL check fails */ - int sr_flags; /* Magic flags, defined below */ + int sr_flags; /* Magic flags, defined below */ + int sr_current_sizelimit; /* Current sizelimit */ } back_search_result_set; #define SR_FLAG_CAN_SKIP_FILTER_TEST 1 /* If set in sr_flags, means that we can safely skip the filter test */ diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c index 1c859c734..ead99388c 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_search.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c @@ -1122,6 +1122,18 @@ ldbm_back_next_search_entry_ext( Slapi_PBlock *pb, int use_extension ) slapi_pblock_get( pb, SLAPI_SEARCH_REFERRALS, &urls ); slapi_pblock_get( pb, SLAPI_SEARCH_RESULT_SET, &sr ); slapi_pblock_get( pb, SLAPI_TARGET_UNIQUEID, &target_uniqueid ); + + if (sr->sr_current_sizelimit >= 0) { + /* + * sr_current_sizelimit contains the current sizelimit. + * In case of paged results, getting one page is one operation, + * while the results on each page are from same back_search_result_set. + * To maintain sizelimit beyond operations, back_search_result_set + * holds the current sizelimit value. + * (The current sizelimit is valid inside an operation, as well.) + */ + slimit = sr->sr_current_sizelimit; + } inst = (ldbm_instance *) be->be_instance_info; @@ -1354,6 +1366,7 @@ ldbm_back_next_search_entry_ext( Slapi_PBlock *pb, int use_extension ) goto bail; } slapi_pblock_set( pb, SLAPI_SEARCH_SIZELIMIT, &slimit ); + sr->sr_current_sizelimit = slimit; } if ( (filter_test != 0) && sr->sr_virtuallistview) { @@ -1412,15 +1425,12 @@ bail: static back_search_result_set* new_search_result_set(IDList *idl, int vlv, int lookthroughlimit) { - back_search_result_set *p= (back_search_result_set *)slapi_ch_malloc( sizeof( back_search_result_set )); + back_search_result_set *p = (back_search_result_set *)slapi_ch_calloc( 1, sizeof( back_search_result_set )); p->sr_candidates = idl; p->sr_current = idl_iterator_init(idl); - p->sr_entry = NULL; - p->sr_lookthroughcount = 0; p->sr_lookthroughlimit = lookthroughlimit; - p->sr_virtuallistview= vlv; - p->sr_vlventry = NULL; - p->sr_flags = 0; + p->sr_virtuallistview = vlv; + p->sr_current_sizelimit = -1; return p; }
0
0228e570b6c30a52b365a7a8ef67e027908e78db
389ds/389-ds-base
Ticket #537 - Improvement of range search Fix description: The index range search function index_range _read_ext was written to call idl_fetch_ext to get an idlist belonging to one key. Then add it to the main idlist as long as the key satisfiles the range search filter condition. This patch introduces a new range search function idl_new_ range_fetch to the new idl code, which generates an idlist in one idl function that eliminates the redundancy such as generating idlist and cursor per key. This patch only implements the new idl version. If idl_new is not set, the existing code is executed. Additionally, idl_new_fetch did not abort the read transaction even if any error occurred in the transaction. Now, it switches between commit and abort based upon the result. https://fedorahosted.org/389/ticket/537 Reviewed by Rich (Thank you!!)
commit 0228e570b6c30a52b365a7a8ef67e027908e78db Author: Noriko Hosoi <[email protected]> Date: Thu Jan 10 17:39:47 2013 -0800 Ticket #537 - Improvement of range search Fix description: The index range search function index_range _read_ext was written to call idl_fetch_ext to get an idlist belonging to one key. Then add it to the main idlist as long as the key satisfiles the range search filter condition. This patch introduces a new range search function idl_new_ range_fetch to the new idl code, which generates an idlist in one idl function that eliminates the redundancy such as generating idlist and cursor per key. This patch only implements the new idl version. If idl_new is not set, the existing code is executed. Additionally, idl_new_fetch did not abort the read transaction even if any error occurred in the transaction. Now, it switches between commit and abort based upon the result. https://fedorahosted.org/389/ticket/537 Reviewed by Rich (Thank you!!) diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h index 6536664ca..726ac40f0 100644 --- a/ldap/servers/slapd/back-ldbm/back-ldbm.h +++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h @@ -885,4 +885,12 @@ typedef struct _back_search_result_set #define TOMBSTONE_INCLUDED 0x1 /* used by find_entry2modify_only_ext and entryrdn_index_read */ +#define DBT_FREE_PAYLOAD(d) slapi_ch_free(&((d).data)) +/* + * This only works with normalized keys, which should be ok because + * at this point both L and R should have already been normalized. + */ +#define DBT_EQ(L,R) \ + ((L)->dsize == (R)->dsize && !memcmp ((L)->dptr, (R)->dptr, (L)->dsize)) + #endif /* _back_ldbm_h_ */ diff --git a/ldap/servers/slapd/back-ldbm/filterindex.c b/ldap/servers/slapd/back-ldbm/filterindex.c index 27b20ec97..9dc9cbf15 100644 --- a/ldap/servers/slapd/back-ldbm/filterindex.c +++ b/ldap/servers/slapd/back-ldbm/filterindex.c @@ -616,7 +616,7 @@ range_candidates( low, NULL, 0, &txn, err, allidslimit); } else { idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY, - SLAPI_OP_GREATER_OR_EQUAL, + SLAPI_OP_LESS_OR_EQUAL, low, high, 1, &txn, err, allidslimit); } diff --git a/ldap/servers/slapd/back-ldbm/idl_new.c b/ldap/servers/slapd/back-ldbm/idl_new.c index 64ffc9d4c..5bc84857a 100644 --- a/ldap/servers/slapd/back-ldbm/idl_new.c +++ b/ldap/servers/slapd/back-ldbm/idl_new.c @@ -155,7 +155,8 @@ int idl_new_release_private(struct attrinfo *a) return 0; } -IDList * idl_new_fetch( +IDList * +idl_new_fetch( backend *be, DB* db, DBT *inkey, @@ -292,7 +293,7 @@ IDList * idl_new_fetch( } LDAPDebug(LDAP_DEBUG_TRACE, "bulk fetch buffer nids=%d\n", count, 0, 0); -#if defined(DB_ALLIDS_ON_READ) +#if defined(DB_ALLIDS_ON_READ) /* enforce the allids read limit */ if ((NEW_IDL_NO_ALLID != *flag_err) && (NULL != a) && (idl != NULL) && idl_new_exceeds_allidslimit(count, a, allidslimit)) { @@ -317,13 +318,14 @@ IDList * idl_new_fetch( /* we got another ID, add it to our IDL */ idl_rc = idl_append_extend(&idl, id); if (idl_rc) { - LDAPDebug(LDAP_DEBUG_ANY, "unable to extend id list (err=%d)\n", idl_rc); + LDAPDebug1Arg(LDAP_DEBUG_ANY, + "unable to extend id list (err=%d)\n", idl_rc); idl_free(idl); idl = NULL; goto error; } #if defined(DB_ALLIDS_ON_READ) /* enforce the allids read limit */ - if ((idl != NULL) && idl_new_exceeds_allidslimit(count, a, allidslimit)) { + if (idl && idl_new_exceeds_allidslimit(count, a, allidslimit)) { idl->b_nids = 1; idl->b_ids[0] = ALLID; ret = DB_NOTFOUND; /* fool the code below into thinking that we finished the dups */ @@ -365,11 +367,352 @@ error: } } } - dblayer_read_txn_commit(be, &s_txn); + if (ret) { + dblayer_read_txn_abort(be, &s_txn); + } else { + dblayer_read_txn_commit(be, &s_txn); + } *flag_err = ret; return idl; } +/* + * Perform the range search in the idl layer instead of the index layer + * to improve the performance. + */ +IDList * +idl_new_range_fetch( + backend *be, + DB* db, + DBT *lowerkey, + DBT *upperkey, + DB_TXN *txn, + struct attrinfo *ai, + int *flag_err, + int allidslimit, + int sizelimit, + time_t stoptime, + int lookthrough_limit, + int operator +) +{ + int ret = 0; + int idl_rc = 0; + DBC *cursor = NULL; + IDList *idl = NULL; + DBT cur_key; + DBT data; + ID id = 0; + size_t count = 0; +#ifdef DB_USE_BULK_FETCH + /* beware that a large buffer on the stack might cause a stack overflow on some platforms */ + char buffer[BULK_FETCH_BUFFER_SIZE]; + void *ptr; + DBT dataret; +#endif + back_txn s_txn; + struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private; + time_t curtime; + void *saved_key = NULL; + + if (NEW_IDL_NOOP == *flag_err) + { + *flag_err = 0; + return NULL; + } + + dblayer_txn_init(li, &s_txn); + if (txn) { + dblayer_read_txn_begin(be, txn, &s_txn); + } + + /* Make a cursor */ + ret = db->cursor(db, txn, &cursor, 0); + if (0 != ret) { + ldbm_nasty(filename,1,ret); + cursor = NULL; + goto error; + } + memset(&data, 0, sizeof(data)); +#ifdef DB_USE_BULK_FETCH + data.ulen = sizeof(buffer); + data.size = sizeof(buffer); + data.data = buffer; + data.flags = DB_DBT_USERMEM; + memset(&dataret, 0, sizeof(dataret)); +#else + data.ulen = sizeof(id); + data.size = sizeof(id); + data.data = &id; + data.flags = DB_DBT_USERMEM; +#endif + + /* + * We're not expecting the key to change in value + * so we can just use the input key as a buffer. + * This avoids memory management of the key. + */ + memset(&cur_key, 0, sizeof(cur_key)); + cur_key.ulen = lowerkey->size; + cur_key.size = lowerkey->size; + saved_key = cur_key.data = slapi_ch_malloc(lowerkey->size); + memcpy(cur_key.data, lowerkey->data, lowerkey->size); + cur_key.flags = DB_DBT_MALLOC; + + /* Position cursor at the first matching key */ +#ifdef DB_USE_BULK_FETCH + ret = cursor->c_get(cursor, &cur_key, &data, DB_SET|DB_MULTIPLE); +#else + ret = cursor->c_get(cursor, &cur_key, &data, DB_SET); +#endif + if (0 != ret) { + if (DB_NOTFOUND != ret) { +#ifdef DB_USE_BULK_FETCH + if (ret == DB_BUFFER_SMALL) { + LDAPDebug(LDAP_DEBUG_ANY, "database index is corrupt; " + "data item for key %s is too large for our buffer " + "(need=%d actual=%d)\n", + cur_key.data, data.size, data.ulen); + } +#endif + ldbm_nasty(filename,2,ret); + } + goto error; /* Not found is OK, return NULL IDL */ + } + + /* Iterate over the duplicates, amassing them into an IDL */ +#ifdef DB_USE_BULK_FETCH + while (cur_key.data && + (upperkey->data ? + ((operator == SLAPI_OP_LESS) ? + DBTcmp(&cur_key, upperkey, ai->ai_key_cmp_fn) < 0 : + DBTcmp(&cur_key, upperkey, ai->ai_key_cmp_fn) <= 0) : + PR_TRUE /* e.g., (x > a) */)) { + ID lastid = 0; + + DB_MULTIPLE_INIT(ptr, &data); + + /* lookthrough limit & sizelimit check */ + if (idl) { + if ((lookthrough_limit != -1) && + (idl->b_nids > (ID)lookthrough_limit)) { + idl_free(idl); + idl = idl_allids( be ); + LDAPDebug0Args(LDAP_DEBUG_TRACE, + "idl_new_range_fetch - lookthrough_limit exceeded\n"); + *flag_err = LDAP_ADMINLIMIT_EXCEEDED; + goto error; + } + if ((sizelimit > 0) && (idl->b_nids > (ID)sizelimit)) { + LDAPDebug0Args(LDAP_DEBUG_TRACE, + "idl_new_range_fetch - sizelimit exceeded\n"); + *flag_err = LDAP_SIZELIMIT_EXCEEDED; + goto error; + } + } + /* timelimit check */ + if (stoptime > 0) { /* timelimit is set */ + curtime = current_time(); + if (curtime >= stoptime) { + LDAPDebug0Args(LDAP_DEBUG_TRACE, + "idl_new_range_fetch - timelimit exceeded\n"); + *flag_err = LDAP_TIMELIMIT_EXCEEDED; + goto error; + } + } + while (PR_TRUE) { + DB_MULTIPLE_NEXT(ptr, &data, dataret.data, dataret.size); + if (dataret.data == NULL) break; + if (ptr == NULL) break; + + if (*(int32_t *)ptr < -1) { + LDAPDebug1Arg(LDAP_DEBUG_TRACE, "DB_MULTIPLE buffer is corrupt; " + "next offset [%d] is less than zero\n", + *(int32_t *)ptr); + /* retry the read */ + break; + } + if (dataret.size != sizeof(ID)) { + LDAPDebug(LDAP_DEBUG_ANY, "database index is corrupt; " + "key %s has a data item with the wrong size (%d)\n", + cur_key.data, dataret.size, 0); + goto error; + } + memcpy(&id, dataret.data, sizeof(ID)); + if (id == lastid) { /* dup */ + LDAPDebug1Arg(LDAP_DEBUG_TRACE, "Detedted duplicate id " + "%d due to DB_MULTIPLE error - skipping\n", + id); + continue; /* get next one */ + } + /* note the last id read to check for dups */ + lastid = id; + /* we got another ID, add it to our IDL */ + idl_rc = idl_append_extend(&idl, id); + if (idl_rc) { + LDAPDebug1Arg(LDAP_DEBUG_ANY, + "unable to extend id list (err=%d)\n", idl_rc); + idl_free(idl); idl = NULL; + goto error; + } + + count++; + } + + LDAPDebug(LDAP_DEBUG_TRACE, "bulk fetch buffer nids=%d\n", count, 0, 0); +#if defined(DB_ALLIDS_ON_READ) + /* enforce the allids read limit */ + if ((NEW_IDL_NO_ALLID != *flag_err) && ai && (idl != NULL) && + idl_new_exceeds_allidslimit(count, ai, allidslimit)) { + idl->b_nids = 1; + idl->b_ids[0] = ALLID; + ret = DB_NOTFOUND; /* fool the code below into thinking that we finished the dups */ + break; + } +#endif + ret = cursor->c_get(cursor, &cur_key, &data, DB_NEXT_DUP|DB_MULTIPLE); + if (ret) { + if (DBT_EQ(&cur_key, upperkey)) { /* this is the last key */ + break; + } + /* First set the cursor (DB_NEXT_NODUP does not take DB_MULTIPLE) */ + ret = cursor->c_get(cursor, &cur_key, &data, DB_NEXT_NODUP); + if (ret) { + break; + } + /* Read the dup data */ + ret = cursor->c_get(cursor, &cur_key, &data, DB_SET|DB_MULTIPLE); + if (saved_key != cur_key.data) { + /* key was allocated in c_get */ + slapi_ch_free(&saved_key); + saved_key = cur_key.data; + } + if (ret) { + break; + } + } + } +#else + while (upperkey->data ? + ((operator == SLAPI_OP_LESS) ? + DBTcmp(&cur_key, upperkey, ai->ai_key_cmp_fn) < 0 : + DBTcmp(&cur_key, upperkey, ai->ai_key_cmp_fn) <= 0) : + PR_TRUE /* e.g., (x > a) */) { + /* lookthrough limit & sizelimit check */ + if (idl) { + if ((lookthrough_limit != -1) && + (idl->b_nids > (ID)lookthrough_limit)) { + idl_free(idl); + idl = idl_allids( be ); + LDAPDebug0Args(LDAP_DEBUG_TRACE, + "idl_new_range_fetch - lookthrough_limit exceeded\n"); + *flag_err = LDAP_ADMINLIMIT_EXCEEDED; + goto error; + } + if ((sizelimit > 0) && (idl->b_nids > (ID)sizelimit)) { + LDAPDebug0Args(LDAP_DEBUG_TRACE, + "idl_new_range_fetch - sizelimit exceeded\n"); + *flag_err = LDAP_SIZELIMIT_EXCEEDED; + goto error; + } + } + /* timelimit check */ + if (stoptime > 0) { /* timelimit is set */ + curtime = current_time(); + if (curtime >= stoptime) { + LDAPDebug0Args(LDAP_DEBUG_TRACE, + "idl_new_range_fetch - timelimit exceeded\n"); + *flag_err = LDAP_TIMELIMIT_EXCEEDED; + goto error; + } + } + ret = cursor->c_get(cursor,&cur_key,&data,DB_NEXT_DUP); + count++; + if (ret) { + if (DBT_EQ(&cur_key, upperkey)) { /* this is the last key */ + break; + } + DBT_FREE_PAYLOAD(cur_key); + ret = cursor->c_get(cursor, &cur_key, &data, DB_NEXT_NODUP); + if (saved_key != cur_key.data) { + /* key was allocated in c_get */ + slapi_ch_free(&saved_key); + saved_key = cur_key.data; + } + if (ret) { + break; + } + } + /* we got another ID, add it to our IDL */ + idl_rc = idl_append_extend(&idl, id); + if (idl_rc) { + LDAPDebug1Arg(LDAP_DEBUG_ANY, + "unable to extend id list (err=%d)\n", idl_rc); + idl_free(idl); idl = NULL; + goto error; + } +#if defined(DB_ALLIDS_ON_READ) + /* enforce the allids read limit */ + if (idl && idl_new_exceeds_allidslimit(count, ai, allidslimit)) { + idl->b_nids = 1; + idl->b_ids[0] = ALLID; + ret = DB_NOTFOUND; /* fool the code below into thinking that we finished the dups */ + break; + } +#endif + } +#endif + + if (ret) { + if (ret == DB_NOTFOUND) { + ret = 0; /* normal case */ + } else { + idl_free(idl); idl = NULL; + ldbm_nasty(filename,59,ret); + goto error; + } + } + + /* check for allids value */ + if (idl && (idl->b_nids == 1) && (idl->b_ids[0] == ALLID)) { + idl_free(idl); + idl = idl_allids(be); + LDAPDebug1Arg(LDAP_DEBUG_TRACE, "idl_new_fetch %s returns allids\n", + cur_key.data); + } else { + LDAPDebug2Args(LDAP_DEBUG_TRACE, "idl_new_fetch %s returns nids=%lu\n", + cur_key.data, (u_long)IDL_NIDS(idl)); + } + +error: + DBT_FREE_PAYLOAD(cur_key); + /* Close the cursor */ + if (NULL != cursor) { + int ret2 = cursor->c_close(cursor); + if (ret2) { + ldbm_nasty(filename,3,ret2); + if (!ret) { + /* if cursor close returns DEADLOCK, we must bubble that up + to the higher layers for retries */ + ret = ret2; + } + } + } + if (ret) { + dblayer_read_txn_abort(be, &s_txn); + } else { + dblayer_read_txn_commit(be, &s_txn); + } + *flag_err = ret; + + /* sort idl */ + if (idl && !ALLIDS(idl)) { + qsort((void *)&idl->b_ids[0], idl->b_nids, + (size_t)sizeof(ID), idl_sort_cmp); + } + return idl; +} + int idl_new_insert_key( backend *be, DB* db, @@ -405,7 +748,7 @@ int idl_new_insert_key( if (NULL != disposition) { *disposition = IDL_INSERT_ALLIDS; } - goto error; /* allid: don't bother inserting any more */ + goto error; /* allid: don't bother inserting any more */ } } else if (DB_NOTFOUND != ret) { ldbm_nasty(filename,12,ret); @@ -506,7 +849,7 @@ int idl_new_delete_key( ret = cursor->c_get(cursor,key,&data,DB_GET_BOTH); if (0 == ret) { if (id == ALLID) { - goto error; /* allid: never delete it */ + goto error; /* allid: never delete it */ } } else { if (DB_NOTFOUND == ret) { @@ -557,19 +900,19 @@ static int idl_new_store_allids(backend *be, DB *db, DBT *key, DB_TXN *txn) /* Position cursor at the key */ ret = cursor->c_get(cursor,key,&data,DB_SET); if (ret == 0) { - /* We found it, so delete all duplicates */ - ret = cursor->c_del(cursor,0); - while (0 == ret) { - ret = cursor->c_get(cursor,key,&data,DB_NEXT_DUP); - if (0 != ret) { - break; - } - ret = cursor->c_del(cursor,0); - } - if (0 != ret && DB_NOTFOUND != ret) { + /* We found it, so delete all duplicates */ + ret = cursor->c_del(cursor,0); + while (0 == ret) { + ret = cursor->c_get(cursor,key,&data,DB_NEXT_DUP); + if (0 != ret) { + break; + } + ret = cursor->c_del(cursor,0); + } + if (0 != ret && DB_NOTFOUND != ret) { ldbm_nasty(filename,54,ret); goto error; - } else { + } else { ret = 0; } } else { @@ -602,9 +945,9 @@ error: } return ret; #ifdef KRAZY_K0DE - /* If this function is called in "no-allids" mode, then it's a bug */ - ldbm_nasty(filename,63,0); - return -1; + /* If this function is called in "no-allids" mode, then it's a bug */ + ldbm_nasty(filename,63,0); + return -1; #endif } #endif @@ -674,7 +1017,7 @@ int idl_new_store_block( ret = cursor->c_put(cursor, key, &data, DB_NODUPDATA); if (0 != ret) { if (DB_KEYEXIST == ret) { - ret = 0; /* exist is okay */ + ret = 0; /* exist is okay */ } else { ldbm_nasty(filename,48,ret); goto error; diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c index 59487172b..a10c67a2b 100644 --- a/ldap/servers/slapd/back-ldbm/index.c +++ b/ldap/servers/slapd/back-ldbm/index.c @@ -1086,7 +1086,7 @@ index_read_ext( see also dblayer_bt_compare */ -static int +int DBTcmp (DBT* L, DBT* R, value_compare_fn_type cmp_fn) { struct berval Lv; @@ -1107,16 +1107,6 @@ DBTcmp (DBT* L, DBT* R, value_compare_fn_type cmp_fn) return cmp_fn(&Lv, &Rv); } -/* This only works with normalized keys, which - should be ok because at this point both L and R - should have already been normalized -*/ -#define DBT_EQ(L,R) ((L)->dsize == (R)->dsize &&\ - ! memcmp ((L)->dptr, (R)->dptr, (L)->dsize)) - - -#define DBT_FREE_PAYLOAD(d) if ((d).data) {free((d).data);(d).data=NULL;} - /* Steps to the next key without keeping a cursor open */ /* Returns the new key value in the DBT */ static int index_range_next_key(DB *db,DBT *key,DB_TXN *db_txn) @@ -1222,7 +1212,7 @@ index_range_read_ext( DBT upperkey = {0}; DBT cur_key = {0}; DBT data = {0} ; - IDList *idl= NULL; + IDList *idl = NULL; char *prefix = NULL; char *realbuf, *nextrealbuf; size_t reallen, nextreallen; @@ -1230,10 +1220,9 @@ index_range_read_ext( ID i; struct attrinfo *ai = NULL; int lookthrough_limit = -1; /* default no limit */ - int retry_count = 0; int is_and = 0; int sizelimit = 0; - time_t curtime, stoptime, optime; + time_t curtime, stoptime = 0; int timelimit = -1; back_search_result_set *sr = NULL; int isroot = 0; @@ -1254,19 +1243,20 @@ index_range_read_ext( plen = strlen(prefix); slapi_pblock_get(pb, SLAPI_SEARCH_IS_AND, &is_and); - if (!is_and) - { + if (!is_and) { slapi_pblock_get(pb, SLAPI_SEARCH_SIZELIMIT, &sizelimit); } - slapi_pblock_get( pb, SLAPI_OPINITIATED_TIME, &optime ); slapi_pblock_get(pb, SLAPI_SEARCH_TIMELIMIT, &timelimit); - stoptime = optime + timelimit; + if (timelimit != -1) { + time_t optime; + slapi_pblock_get(pb, SLAPI_OPINITIATED_TIME, &optime); + stoptime = optime + timelimit; + } /* * Determine the lookthrough_limit from the PBlock. * No limit if there is no search result set and the requestor is root. */ - slapi_pblock_get( pb, SLAPI_SEARCH_RESULT_SET, &sr ); if (sr != NULL) { /* the normal case */ @@ -1279,8 +1269,8 @@ index_range_read_ext( } } - LDAPDebug(LDAP_DEBUG_TRACE, "index_range_read lookthrough_limit=%d\n", - lookthrough_limit, 0, 0); + LDAPDebug1Arg(LDAP_DEBUG_TRACE, "index_range_read lookthrough_limit=%d\n", + lookthrough_limit); switch( operator ) { case SLAPI_OP_LESS: @@ -1323,7 +1313,7 @@ index_range_read_ext( /* get a cursor so we can walk over the table */ *err = db->cursor(db,db_txn,&dbc,0); if (0 != *err ) { - ldbm_nasty(errmsg, 1060, *err); + ldbm_nasty(errmsg, 1060, *err); LDAPDebug( LDAP_DEBUG_ANY, "<= index_range_read(%s,%s) NULL: db->cursor() == %i\n", type, prefix, *err ); @@ -1344,7 +1334,7 @@ index_range_read_ext( reallen = plen + 1; /* include 0 terminator */ realbuf = slapi_ch_strdup(prefix); } - if (range != 1) { + if (range != 1) { /* open range search */ char *tmpbuf = NULL; /* this is a search with only one boundary value */ switch( operator ) { @@ -1390,12 +1380,12 @@ index_range_read_ext( tmpbuf = slapi_ch_realloc (tmpbuf, cur_key.dsize); memcpy (tmpbuf, cur_key.dptr, cur_key.dsize); DBT_FREE_PAYLOAD(upperkey); - upperkey.dptr = tmpbuf; - upperkey.dsize = cur_key.dsize; + upperkey.dptr = NULL; /* x >= a :no need to check upper bound */ + upperkey.dsize = 0; } break; } - } else { + } else { /* closed range search: e.g., (&(x >= a)(x <= b)) */ /* this is a search with two boundary values (starting and ending) */ if ( nextval != NULL ) { /* compute a key from nextval */ const size_t vlen = nextval->bv_len; @@ -1409,22 +1399,10 @@ index_range_read_ext( nextrealbuf = slapi_ch_strdup(prefix); } /* set up the starting and ending keys for search */ - switch( operator ) { - case SLAPI_OP_LESS: - case SLAPI_OP_LESS_OR_EQUAL: - lowerkey.dptr = nextrealbuf; - lowerkey.dsize = nextreallen; - upperkey.dptr = realbuf; - upperkey.dsize = reallen; - break; - case SLAPI_OP_GREATER_OR_EQUAL: - case SLAPI_OP_GREATER: - lowerkey.dptr = realbuf; - lowerkey.dsize = reallen; - upperkey.dptr = nextrealbuf; - upperkey.dsize = nextreallen; - break; - } + lowerkey.dptr = realbuf; + lowerkey.dsize = reallen; + upperkey.dptr = nextrealbuf; + upperkey.dsize = nextreallen; } /* if (LDAP_DEBUG_FILTER) { char encbuf [BUFSIZ]; @@ -1472,140 +1450,159 @@ index_range_read_ext( if (operator == SLAPI_OP_GREATER) { *err = index_range_next_key(db,&cur_key,db_txn); } - while (*err == 0 && - (operator == SLAPI_OP_LESS) ? - DBTcmp(&cur_key, &upperkey, ai->ai_key_cmp_fn) < 0 : - DBTcmp(&cur_key, &upperkey, ai->ai_key_cmp_fn) <= 0) { - /* exit the loop when we either run off the end of the table, - * fail to read a key, or read a key that's out of range. - */ - IDList *tmp; - /* - char encbuf [BUFSIZ]; - LDAPDebug( LDAP_DEBUG_FILTER, " cur_key=%s(%li bytes)\n", - encoded (&cur_key, encbuf), (long)cur_key.dsize, 0 ); - */ - /* Check to see if we've already looked too hard */ - if (idl != NULL && lookthrough_limit != -1 && idl->b_nids > (ID)lookthrough_limit) { - if (NULL != idl) { - idl_free(idl); + if (idl_get_idl_new()) { /* new idl */ + idl = idl_new_range_fetch(be, db, &cur_key, &upperkey, db_txn, + ai, err, allidslimit, sizelimit, stoptime, + lookthrough_limit, operator); + } else { /* old idl */ + int retry_count = 0; + while (*err == 0 && + (upperkey.data && + (operator == SLAPI_OP_LESS) ? + DBTcmp(&cur_key, &upperkey, ai->ai_key_cmp_fn) < 0 : + DBTcmp(&cur_key, &upperkey, ai->ai_key_cmp_fn) <= 0)) { + /* exit the loop when we either run off the end of the table, + * fail to read a key, or read a key that's out of range. + */ + IDList *tmp; + /* + char encbuf [BUFSIZ]; + LDAPDebug( LDAP_DEBUG_FILTER, " cur_key=%s(%li bytes)\n", + encoded (&cur_key, encbuf), (long)cur_key.dsize, 0 ); + */ + /* lookthrough limit and size limit check */ + if (idl) { + if ((lookthrough_limit != -1) && + (idl->b_nids > (ID)lookthrough_limit)) { + idl_free(idl); + idl = idl_allids( be ); + LDAPDebug0Args(LDAP_DEBUG_TRACE, + "index_range_read lookthrough_limit exceeded\n"); + *err = LDAP_ADMINLIMIT_EXCEEDED; + break; + } + if ((sizelimit > 0) && (idl->b_nids > (ID)sizelimit)) { + LDAPDebug0Args(LDAP_DEBUG_TRACE, + "index_range_read sizelimit exceeded\n"); + *err = LDAP_SIZELIMIT_EXCEEDED; + break; + } } - idl = idl_allids( be ); - LDAPDebug(LDAP_DEBUG_TRACE, "index_range_read lookthrough_limit exceeded\n", - 0, 0, 0); - break; - } - if (idl != NULL && sizelimit > 0 && idl->b_nids > (ID)sizelimit) - { - LDAPDebug(LDAP_DEBUG_TRACE, "index_range_read sizelimit exceeded\n", - 0, 0, 0); - break; - } - /* check time limit */ - curtime = current_time(); - if ( timelimit != -1 && curtime >= stoptime ) - { - LDAPDebug(LDAP_DEBUG_TRACE, "index_range_read timelimit exceeded\n", - 0, 0, 0); - break; - } - - /* Check to see if the operation has been abandoned (also happens - * when the connection is closed by the client). - */ - if ( slapi_op_abandoned( pb )) { - if (NULL != idl) { - idl_free(idl); - idl = NULL; + /* check time limit */ + if (timelimit != -1) { + curtime = current_time(); + if (curtime >= stoptime) { + LDAPDebug0Args(LDAP_DEBUG_TRACE, + "index_range_read timelimit exceeded\n"); + *err = LDAP_TIMELIMIT_EXCEEDED; + break; + } } - LDAPDebug(LDAP_DEBUG_TRACE, - "index_range_read - operation abandoned\n", 0, 0, 0); - break; /* clean up happens outside the while() loop */ - } - - /* the cur_key DBT already has the first entry in it when we enter the loop */ - /* so we process the entry then step to the next one */ - cur_key.flags = 0; - for (retry_count = 0; retry_count < IDL_FETCH_RETRY_COUNT; retry_count++) { - *err = NEW_IDL_DEFAULT; - tmp = idl_fetch_ext( be, db, &cur_key, NULL, ai, err, allidslimit ); - if(*err == DB_LOCK_DEADLOCK) { - ldbm_nasty("index_range_read retrying transaction", 1090, *err); + /* Check to see if the operation has been abandoned (also happens + * when the connection is closed by the client). + */ + if ( slapi_op_abandoned( pb )) { + if (NULL != idl) { + idl_free(idl); + idl = NULL; + } + LDAPDebug0Args(LDAP_DEBUG_TRACE, + "index_range_read - operation abandoned\n"); + break; /* clean up happens outside the while() loop */ + } + + /* the cur_key DBT already has the first entry in it when we enter + * the loop, so we process the entry then step to the next one */ + cur_key.flags = 0; + for (retry_count = 0; + retry_count < IDL_FETCH_RETRY_COUNT; + retry_count++) { + *err = NEW_IDL_DEFAULT; + tmp = idl_fetch_ext(be, db, &cur_key, NULL, ai, err, allidslimit); + if(*err == DB_LOCK_DEADLOCK) { + ldbm_nasty("index_range_read retrying transaction", 1090, *err); #ifdef FIX_TXN_DEADLOCKS #error if txn != NULL, have to abort and retry the transaction, not just the fetch #endif - continue; - } else { - break; - } - } - if(retry_count == IDL_FETCH_RETRY_COUNT) { - ldbm_nasty("index_range_read retry count exceeded",1095,*err); - } - if (!tmp) { - if (slapi_is_loglevel_set(LDAP_DEBUG_TRACE)) { - char encbuf[BUFSIZ]; - LDAPDebug2Args(LDAP_DEBUG_TRACE, - "index_range_read_ext: cur_key=%s(%li bytes) was deleted - skipping\n", - encoded(&cur_key, encbuf), (long)cur_key.dsize); + continue; + } else { + break; + } } - } else { - /* idl tmp only contains one id */ - /* append it at the end here; sort idlist at the end */ - if (ALLIDS(tmp)) { - idl_free(idl); - idl = tmp; + if(retry_count == IDL_FETCH_RETRY_COUNT) { + ldbm_nasty("index_range_read retry count exceeded",1095,*err); + } + if (!tmp) { + if (slapi_is_loglevel_set(LDAP_DEBUG_TRACE)) { + char encbuf[BUFSIZ]; + LDAPDebug2Args(LDAP_DEBUG_TRACE, + "index_range_read_ext: cur_key=%s(%li bytes) was deleted - skipping\n", + encoded(&cur_key, encbuf), (long)cur_key.dsize); + } } else { - ID id; - for (id = idl_firstid(tmp); id != NOID; id = idl_nextid(tmp, id)) { - *err = idl_append_extend(&idl, id); - if (*err) { - ldbm_nasty("index_range_read - failed to generate idlist", - 1097, *err); + /* idl tmp only contains one id */ + /* append it at the end here; sort idlist at the end */ + if (ALLIDS(tmp)) { + idl_free(idl); + idl = tmp; + } else { + ID id; + for (id = idl_firstid(tmp); + id != NOID; id = idl_nextid(tmp, id)) { + *err = idl_append_extend(&idl, id); + if (*err) { + ldbm_nasty("index_range_read - failed to generate idlist", + 1097, *err); + } } + idl_free(tmp); + } + if (ALLIDS(idl)) { + LDAPDebug0Args(LDAP_DEBUG_TRACE, + "index_range_read hit an allids value\n"); + break; } - idl_free(tmp); } - if (ALLIDS(idl)) { - LDAPDebug(LDAP_DEBUG_TRACE, "index_range_read hit an allids value\n", - 0, 0, 0); + if (DBT_EQ (&cur_key, &upperkey)) { /* this is the last key */ + break; + /* Another c_get would return the same key, with no error. */ + } + data.flags = DB_DBT_MALLOC; + cur_key.flags = DB_DBT_MALLOC; + *err = index_range_next_key(db,&cur_key,db_txn); + /* *err = dbc->c_get(dbc,&cur_key,&data,DB_NEXT); */ + if (*err == DB_NOTFOUND) { + *err = 0; break; } } - if (DBT_EQ (&cur_key, &upperkey)) { /* this is the last key */ - break; - /* Another c_get would return the same key, with no error. */ - } - data.flags = DB_DBT_MALLOC; - cur_key.flags = DB_DBT_MALLOC; - *err = index_range_next_key(db,&cur_key,db_txn); - /* *err = dbc->c_get(dbc,&cur_key,&data,DB_NEXT); */ - if (*err == DB_NOTFOUND) { - *err = 0; - break; + /* sort idl */ + if (idl && !ALLIDS(idl)) { + qsort((void *)&idl->b_ids[0], idl->b_nids, + (size_t)sizeof(ID), idl_sort_cmp); } } - if (*err) LDAPDebug( LDAP_DEBUG_FILTER, " dbc->c_get(...DB_NEXT) == %i\n", *err, 0, 0); + if (*err) { + LDAPDebug1Arg(LDAP_DEBUG_FILTER, + " dbc->c_get(...DB_NEXT) == %i\n", *err); + } #ifdef LDAP_DEBUG - /* this is for debugging only */ - if (idl != NULL) - { - if (ALLIDS(idl)) { - LDAPDebug( LDAP_DEBUG_FILTER, - " idl=ALLIDS\n", 0, 0, 0 ); - } else { - LDAPDebug( LDAP_DEBUG_FILTER, - " idl->b_nids=%d\n", idl->b_nids, 0, 0 ); - LDAPDebug( LDAP_DEBUG_FILTER, - " idl->b_nmax=%d\n", idl->b_nmax, 0, 0 ); - - for ( i= 0; i< idl->b_nids; i++) - { - LDAPDebug( LDAP_DEBUG_FILTER, - " idl->b_ids[%d]=%d\n", i, idl->b_ids[i], 0); - } + /* this is for debugging only */ + if (idl != NULL) { + if (ALLIDS(idl)) { + LDAPDebug0Args(LDAP_DEBUG_FILTER, " idl=ALLIDS\n"); + } else { + LDAPDebug1Arg(LDAP_DEBUG_FILTER, + " idl->b_nids=%d\n", idl->b_nids); + LDAPDebug1Arg(LDAP_DEBUG_FILTER, + " idl->b_nmax=%d\n", idl->b_nmax); + + for (i = 0; i < idl->b_nids; i++) { + LDAPDebug2Args(LDAP_DEBUG_FILTER, + " idl->b_ids[%d]=%d\n", i, idl->b_ids[i]); } } + } #endif error: index_free_prefix(prefix); @@ -1613,13 +1610,6 @@ error: DBT_FREE_PAYLOAD(upperkey); dblayer_release_index_file( be, ai, db ); - - /* sort idl */ - if (idl && !ALLIDS(idl)) { - qsort((void *)&idl->b_ids[0], idl->b_nids, - (size_t)sizeof(ID), idl_sort_cmp); - } - LDAPDebug( LDAP_DEBUG_TRACE, "<= index_range_read(%s,%s) %lu candidates\n", type, prefix, (u_long)IDL_NIDS(idl) ); return( idl ); diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h index f9c2c05b4..a0c3a4241 100644 --- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h +++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h @@ -283,6 +283,10 @@ int idl_new_compare_dups( const DBT *a, const DBT *b ); +IDList *idl_new_range_fetch(backend *be, DB* db, DBT *lowerkey, DBT *upperkey, + DB_TXN *txn, struct attrinfo *a, int *flag_err, + int allidslimit, int sizelimit, time_t stoptime, + int lookthrough_limit, int operator); /* * index.c @@ -300,6 +304,7 @@ IDList* index_read_ext_allids( backend *be, char *type, const char* indextype, c IDList* index_range_read( Slapi_PBlock *pb, backend *be, char *type, const char* indextype, int ftype, struct berval* val, struct berval* nextval, int range, back_txn *txn, int *err ); IDList* index_range_read_ext( Slapi_PBlock *pb, backend *be, char *type, const char* indextype, int ftype, struct berval* val, struct berval* nextval, int range, back_txn *txn, int *err, int allidslimit ); const char *encode( const struct berval* data, char buf[BUFSIZ] ); +int DBTcmp(DBT* L, DBT* R, value_compare_fn_type cmp_fn); extern const char* indextype_PRESENCE; extern const char* indextype_EQUALITY;
0
79e93093e031bdd5a0bc047baf767644832eb562
389ds/389-ds-base
527848 - make sure db upgrade to 4.7 and later works correctly https://bugzilla.redhat.com/show_bug.cgi?id=527848 Change Description: 1. Replication Changelog 1-1. In the clean recover mode, transaction logs should not be removed. 1-2. When nsslapd-db-circular-logging is on (by default, it's on), call log_archive function with DB_ARCH_REMOVE, which removes log files that are no longer needed. 1-3. Call transaction checkpoint just before shutting down the server. 1-4. "From string" in the upbrade message had a flaw. 2. Backend dblayer 2-1. In checkpoint_threadmain, call log_archive with DB_ARCH_ABS, which returns the absolute path of the transaction log files. It eliminates the code which generates the absolute paths.
commit 79e93093e031bdd5a0bc047baf767644832eb562 Author: Noriko Hosoi <[email protected]> Date: Thu Feb 18 14:40:55 2010 -0800 527848 - make sure db upgrade to 4.7 and later works correctly https://bugzilla.redhat.com/show_bug.cgi?id=527848 Change Description: 1. Replication Changelog 1-1. In the clean recover mode, transaction logs should not be removed. 1-2. When nsslapd-db-circular-logging is on (by default, it's on), call log_archive function with DB_ARCH_REMOVE, which removes log files that are no longer needed. 1-3. Call transaction checkpoint just before shutting down the server. 1-4. "From string" in the upbrade message had a flaw. 2. Backend dblayer 2-1. In checkpoint_threadmain, call log_archive with DB_ARCH_ABS, which returns the absolute path of the transaction log files. It eliminates the code which generates the absolute paths. diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c index 364b292d0..95c55e3b2 100644 --- a/ldap/servers/plugins/replication/cl5_api.c +++ b/ldap/servers/plugins/replication/cl5_api.c @@ -2396,7 +2396,6 @@ static int _cl5AppInit (PRBool *didRecovery) if (CL5_OPEN_CLEAN_RECOVER == s_cl5Desc.dbOpenMode) { _cl5RemoveEnv(); - _cl5RemoveLogs(); } rc = _cl5Recover (flags, dbEnv); @@ -3267,7 +3266,7 @@ static int _cl5CheckpointMain (void *param) } #if 1000*DB_VERSION_MAJOR + 100*DB_VERSION_MINOR < 4100 else if (rc != DB_INCOMPLETE) /* real error happened */ - { + { slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl, "_cl5CheckpointMain: checkpoint failed, db error - %d %s\n", rc, db_strerror(rc)); @@ -3278,22 +3277,25 @@ static int _cl5CheckpointMain (void *param) if (s_cl5Desc.dbConfig.circularLogging) { char **list = NULL; - char **listp = NULL; - int rc = -1; - char filename[MAXPATHLEN + 1]; - /* find out which log files don't contain active txns */ - rc = LOG_ARCHIVE(s_cl5Desc.dbEnv, &list, 0, (void *)slapi_ch_malloc); - if (0 == rc && NULL != list) + /* DB_ARCH_REMOVE: Remove log files that are no longer needed; + * no filenames are returned. */ + int rc = LOG_ARCHIVE(s_cl5Desc.dbEnv, &list, + DB_ARCH_REMOVE, (void *)slapi_ch_malloc); + if (rc) { - /* zap 'em ! */ - for (listp = list; *listp != NULL; ++listp) - { - PR_snprintf(filename, MAXPATHLEN, "%s/%s", s_cl5Desc.dbDir,*listp); - PR_Delete (filename); - } - slapi_ch_free((void **)&list); + slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl, + "_cl5CheckpointMain: log archive failed, " + "db error - %d %s\n", rc, db_strerror(rc)); } + slapi_ch_free((void **)&list); /* just in case */ + } + else + { + slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl, + "_cl5CheckpointMain: %s is off; " + "transaction logs won't be removed.\n", + CONFIG_CHANGELOG_DB_CIRCULAR_LOGGING); } } @@ -3302,6 +3304,8 @@ static int _cl5CheckpointMain (void *param) /* answer---because the interval might be changed after the server starts up */ DS_Sleep(interval); } + /* Check point and archive before shutting down */ + rc = TXN_CHECKPOINT(s_cl5Desc.dbEnv, 0, 0, 0); PR_AtomicDecrement (&s_cl5Desc.threadCount); slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl, "_cl5CheckpointMain: exiting\n"); @@ -3536,7 +3540,7 @@ static int _cl5CheckDBVersion () { *dotp = '\0'; dbmajor = strtol(versionp, (char **)NULL, 10); - dbminor = strtol(++dotp, (char **)NULL, 10); + dbminor = strtol(dotp+1, (char **)NULL, 10); *dotp = '.'; } else @@ -3991,6 +3995,8 @@ static int _cl5Delete (const char *clDir, int rmDir) continue; } PR_snprintf(filename, MAXPATHLEN, "%s/%s", clDir, entry->name); + /* _cl5Delete deletes the whole changelog directory with all the files + * underneath. Thus, we can just remove them physically. */ rc = PR_Delete(filename); if (rc != PR_SUCCESS) { diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c index b99d7e9d5..b3d61db6d 100644 --- a/ldap/servers/slapd/back-ldbm/dblayer.c +++ b/ldap/servers/slapd/back-ldbm/dblayer.c @@ -2485,10 +2485,11 @@ int dblayer_instance_close(backend *be) } inst->inst_id2entry = NULL; - if (inst->import_env) { - /* ignore the value of env, close, because at this point, work is done with import env - by calling env.close, env and all the associated db handles will be closed, ignore, - if sleepycat complains, that db handles are open at env close time */ + if (inst->import_env) { + /* ignore the value of env, close, because at this point, + * work is done with import env by calling env.close, + * env and all the associated db handles will be closed, ignore, + * if sleepycat complains, that db handles are open at env close time */ return_value |= inst->import_env->dblayer_DB_ENV->close(inst->import_env->dblayer_DB_ENV, 0); return_value = db_env_create(&env, 0); if (return_value == 0) { @@ -2510,9 +2511,9 @@ int dblayer_instance_close(backend *be) } PR_DestroyRWLock(inst->import_env->dblayer_env_lock); slapi_ch_free((void **)&inst->import_env); - } else { + } else { be->be_state = BE_STATE_STOPPED; - } + } return return_value; } @@ -3496,7 +3497,7 @@ dblayer_start_deadlock_thread(struct ldbminfo *li) return return_value; } -/* checkpoint thread main function */ +/* deadlock thread main function */ static int deadlock_threadmain(void *param) { @@ -3643,6 +3644,9 @@ static int checkpoint_threadmain(void *param) int debug_checkpointing = 0; int checkpoint_interval; char *home_dir = NULL; + char **list = NULL; + char **listp = NULL; + struct dblayer_private_env *penv = NULL; PR_ASSERT(NULL != param); li = (struct ldbminfo*)param; @@ -3665,6 +3669,7 @@ static int checkpoint_threadmain(void *param) /* work around a problem with newly created environments */ dblayer_force_checkpoint(li); + penv = priv->dblayer_env; debug_checkpointing = priv->db_debug_checkpointing; /* assumes dblayer_force_checkpoint worked */ time_of_last_checkpoint_completion = current_time(); @@ -3745,59 +3750,41 @@ static int checkpoint_threadmain(void *param) time_of_last_checkpoint_completion = current_time(); } } - { - char **list = NULL; - char **listp = NULL; - int return_value = -1; - char filename[MAXPATHLEN]; - char *prefix = NULL; - struct dblayer_private_env *penv = priv->dblayer_env; - if ((NULL != priv->dblayer_log_directory) && - (0 != strlen(priv->dblayer_log_directory))) - { - prefix = priv->dblayer_log_directory; - } - else - { - prefix = home_dir; - } - /* find out which log files don't contain active txns */ - DB_CHECKPOINT_LOCK(PR_TRUE, penv->dblayer_env_lock); - return_value = LOG_ARCHIVE(penv->dblayer_DB_ENV, &list, - 0, (void *)slapi_ch_malloc); - DB_CHECKPOINT_UNLOCK(PR_TRUE, penv->dblayer_env_lock); - checkpoint_debug_message(debug_checkpointing, - "Got list of logfiles not needed %d %p\n", - return_value,list, 0); - if (0 == return_value && NULL != list) - { - /* zap 'em ! */ - for (listp = list; *listp != NULL; ++listp) - { - PR_snprintf(filename,sizeof(filename),"%s/%s",prefix,*listp); - if (priv->dblayer_circular_logging) { - checkpoint_debug_message(debug_checkpointing, - "Deleting %s\n",filename, 0, 0); - unlink(filename); - } else { - char new_filename[MAXPATHLEN]; - PR_snprintf(new_filename,sizeof(new_filename),"%s/old.%s", - prefix,*listp); - checkpoint_debug_message(debug_checkpointing, - "Renaming %s\n",filename,0, 0); - rename(filename,new_filename); - } + /* find out which log files don't contain active txns */ + DB_CHECKPOINT_LOCK(PR_TRUE, penv->dblayer_env_lock); + rval = LOG_ARCHIVE(penv->dblayer_DB_ENV, &list, + DB_ARCH_ABS, (void *)slapi_ch_malloc); + DB_CHECKPOINT_UNLOCK(PR_TRUE, penv->dblayer_env_lock); + if (rval) { + LDAPDebug2Args(LDAP_DEBUG_ANY, "checkpoint_threadmain: " + "log archive failed - %s (%d)\n", + dblayer_strerror(rval), rval); + } else { + for (listp = list; listp && *listp != NULL; ++listp) { + if (priv->dblayer_circular_logging) { + checkpoint_debug_message(debug_checkpointing, + "Deleting %s\n", *listp, 0, 0); + unlink(*listp); + } else { + char new_filename[MAXPATHLEN]; + PR_snprintf(new_filename, sizeof(new_filename), + "%s.old", *listp); + checkpoint_debug_message(debug_checkpointing, + "Renaming %s -> %s\n",*listp, new_filename, 0); + rename(*listp, new_filename); } - slapi_ch_free((void**)&list); } + slapi_ch_free((void**)&list); + /* Note: references inside the returned memory need not be + * individually freed. */ } } - LDAPDebug(LDAP_DEBUG_TRACE, "Leaving checkpoint_threadmain before checkpoint\n", 0, 0, 0); + LDAPDebug0Args(LDAP_DEBUG_TRACE, "Check point before leaving\n"); rval = dblayer_force_checkpoint(li); error_return: DECR_THREAD_COUNT(priv); - LDAPDebug(LDAP_DEBUG_TRACE, "Leaving checkpoint_threadmain\n", 0, 0, 0); + LDAPDebug0Args(LDAP_DEBUG_TRACE, "Leaving checkpoint_threadmain\n"); return rval; }
0
ca2f99b78387c0e5f5f91c7c1951958cdf3b6c09
389ds/389-ds-base
Bug 610119 - fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199 https://bugzilla.redhat.com/show_bug.cgi?id=610119 Resolves: bug 610119 Bug description: Fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199 Fix description: Catch possible NULL pointer in slapi_entry_schema_check().
commit ca2f99b78387c0e5f5f91c7c1951958cdf3b6c09 Author: Endi S. Dewata <[email protected]> Date: Fri Jul 2 00:18:18 2010 -0500 Bug 610119 - fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199 https://bugzilla.redhat.com/show_bug.cgi?id=610119 Resolves: bug 610119 Bug description: Fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199 Fix description: Catch possible NULL pointer in slapi_entry_schema_check(). diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c index 453944278..6e2fefe7a 100644 --- a/ldap/servers/slapd/schema.c +++ b/ldap/servers/slapd/schema.c @@ -508,6 +508,20 @@ slapi_entry_schema_check( Slapi_PBlock *pb, Slapi_Entry *e ) ocname = slapi_value_get_string(v); + if ( !ocname ) { + char ebuf[ BUFSIZ ]; + LDAPDebug( LDAP_DEBUG_ANY, + "Entry \"%s\" \"objectclass\" value missing\n", + escape_string( slapi_entry_get_dn_const(e), ebuf ), 0, 0 ); + if (pb) { + PR_snprintf( errtext, sizeof( errtext ), + "missing \"objectclass\" value\n" ); + slapi_pblock_set( pb, SLAPI_PB_RESULT_TEXT, errtext ); + } + ret = 1; + goto out; + } + if ( isExtensibleObjectclass( ocname )) { /* * if the entry is an extensibleObject, just check to see if @@ -524,7 +538,7 @@ slapi_entry_schema_check( Slapi_PBlock *pb, Slapi_Entry *e ) /* we don't know about the oc; return an appropriate error message */ char ebuf[ BUFSIZ ]; char ebuf2[ BUFSIZ ]; - size_t ocname_len = ( ocname == NULL ) ? 0 : strlen( ocname ); + size_t ocname_len = strlen( ocname ); const char *extra_msg = ""; if ( ocname_len > 0 && isspace( ocname[ ocname_len-1 ] )) {
0
9174cc61589ef4fc554ca82077bbddabc06e617b
389ds/389-ds-base
Bug 610281 - fix coverity Defect Type: Control flow issues - daemon.c:write_function() https://bugzilla.redhat.com/show_bug.cgi?id=610281 Resolves: bug 610281 Bug Description: fix coverity Defect Type: Control flow issues - daemon.c:write_function() Reviewed by: nhosoi (Thanks!) Branch: master Fix Description: sentbytes < count is ok in the non error case - it just means we need to send some more data. Move the checking to the error case so we can print the number of bytes sent and expected. Platforms tested: RHEL5 x86_64, Fedora 14 x86_64 Flag Day: no Doc impact: no
commit 9174cc61589ef4fc554ca82077bbddabc06e617b Author: Rich Megginson <[email protected]> Date: Fri Aug 13 14:32:22 2010 -0600 Bug 610281 - fix coverity Defect Type: Control flow issues - daemon.c:write_function() https://bugzilla.redhat.com/show_bug.cgi?id=610281 Resolves: bug 610281 Bug Description: fix coverity Defect Type: Control flow issues - daemon.c:write_function() Reviewed by: nhosoi (Thanks!) Branch: master Fix Description: sentbytes < count is ok in the non error case - it just means we need to send some more data. Move the checking to the error case so we can print the number of bytes sent and expected. Platforms tested: RHEL5 x86_64, Fedora 14 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c index 783aaaed6..249f247ed 100644 --- a/ldap/servers/slapd/daemon.c +++ b/ldap/servers/slapd/daemon.c @@ -1712,6 +1712,11 @@ write_function( int ignore, const void *buffer, int count, struct lextiof_socket LDAPDebug(LDAP_DEBUG_ANY, "PR_Write(%d) " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", fd, prerr, slapd_pr_strerror( prerr )); + if (sentbytes < count) { + LDAPDebug(LDAP_DEBUG_CONNS, + "PR_Write(%d) - wrote only %d bytes (expected %d bytes) - 0 (EOF)\n", /* disconnected */ + fd, sentbytes, count); + } break; /* fatal error */ } } else if (bytes == 0) { /* disconnect */ @@ -1731,12 +1736,6 @@ write_function( int ignore, const void *buffer, int count, struct lextiof_socket fd, sentbytes, count); PR_SetError(PR_BUFFER_OVERFLOW_ERROR, EMSGSIZE); break; - } else if (sentbytes < count) { - LDAPDebug(LDAP_DEBUG_CONNS, - "PR_Write(%d) - wrote only %d bytes (expected %d bytes) - 0 (EOF)\n", /* disconnected */ - fd, sentbytes, count); - PR_SetError(PR_PIPE_ERROR, EPIPE); - break; } } }
0
675508c6b5482161510514055d0d42006471551b
389ds/389-ds-base
SASL IO sometimes loops with "error: would block" https://bugzilla.redhat.com/show_bug.cgi?id=526319 Resolves: bug 526319 Bug Description: SASL IO sometimes loops with "error: would block" Reviewed by: nkinder (Thanks!) Fix Description: The semantics for recv() are that it returns -1 for errors, 0 for connection closed, and non-zero for some bytes received. The sasl code was not using those semantics - it was returning 0 for successful read and -1 for error. Although I have not been able to reproduce the exact failure, what I believe is happening is that the initial read of the packet length in sasl_io_start_packet() works, and the sasl IO is received. At some point, the connection is closed by the client, and the PR_Recv return of 0 is not handled correctly, and somehow the errno gets set to EWOULDBLOCK. From this point on, PR_Recv() will return -1 (since the socket has been closed) and errno is not reset from EWOULDBLOCK. The fix is to make sure the sasl IO code handles the PR_Recv() return value correctly. Note that with CONNS (8) error log level, you may still occasionally see "would block" errors, but as long as they are not endlessly repeating, this should be ok. Platforms tested: RHEL5 x86_64 Flag Day: no Doc impact: no
commit 675508c6b5482161510514055d0d42006471551b Author: Rich Megginson <[email protected]> Date: Tue Sep 29 14:08:35 2009 -0600 SASL IO sometimes loops with "error: would block" https://bugzilla.redhat.com/show_bug.cgi?id=526319 Resolves: bug 526319 Bug Description: SASL IO sometimes loops with "error: would block" Reviewed by: nkinder (Thanks!) Fix Description: The semantics for recv() are that it returns -1 for errors, 0 for connection closed, and non-zero for some bytes received. The sasl code was not using those semantics - it was returning 0 for successful read and -1 for error. Although I have not been able to reproduce the exact failure, what I believe is happening is that the initial read of the packet length in sasl_io_start_packet() works, and the sasl IO is received. At some point, the connection is closed by the client, and the PR_Recv return of 0 is not handled correctly, and somehow the errno gets set to EWOULDBLOCK. From this point on, PR_Recv() will return -1 (since the socket has been closed) and errno is not reset from EWOULDBLOCK. The fix is to make sure the sasl IO code handles the PR_Recv() return value correctly. Note that with CONNS (8) error log level, you may still occasionally see "would block" errors, but as long as they are not endlessly repeating, this should be ok. Platforms tested: RHEL5 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c index 59ae3a033..3b8986c37 100644 --- a/ldap/servers/slapd/daemon.c +++ b/ldap/servers/slapd/daemon.c @@ -1695,7 +1695,7 @@ write_function( int ignore, const void *buffer, int count, struct lextiof_socket } else if (bytes == 0) { /* disconnect */ PRErrorCode prerr = PR_GetError(); LDAPDebug(LDAP_DEBUG_CONNS, - "PR_Recv(%d) - 0 (EOF) %d:%s\n", /* disconnected */ + "PR_Write(%d) - 0 (EOF) %d:%s\n", /* disconnected */ fd, prerr, slapd_pr_strerror(prerr)); PR_SetError(PR_PIPE_ERROR, EPIPE); break; diff --git a/ldap/servers/slapd/sasl_io.c b/ldap/servers/slapd/sasl_io.c index 3280a98bd..f0e403de5 100644 --- a/ldap/servers/slapd/sasl_io.c +++ b/ldap/servers/slapd/sasl_io.c @@ -194,6 +194,12 @@ sasl_get_io_private(PRFileDesc *fd) return sp; } +/* + * return values: + * 0 - connection was closed + * 1 - success + * -1 - error + */ static PRInt32 sasl_io_start_packet(PRFileDesc *fd, PRIntn flags, PRIntervalTime timeout, PRInt32 *err) { @@ -212,9 +218,14 @@ sasl_io_start_packet(PRFileDesc *fd, PRIntn flags, PRIntervalTime timeout, PRInt "read sasl packet length returned %d on connection %" NSPRIu64 "\n", ret, c->c_connid, 0 ); if (ret <= 0) { *err = PR_GetError(); - LDAPDebug( LDAP_DEBUG_ANY, - "sasl_io_start_packet: error reading sasl packet length on connection %" NSPRIu64 " %d:%s\n", c->c_connid, *err, slapd_pr_strerror(*err) ); - return PR_FAILURE; + if (ret == 0) { + LDAPDebug1Arg( LDAP_DEBUG_CONNS, + "sasl_io_start_packet: connection closed while reading sasl packet length on connection %" NSPRIu64 "\n", c->c_connid ); + } else { + LDAPDebug( LDAP_DEBUG_CONNS, + "sasl_io_start_packet: error reading sasl packet length on connection %" NSPRIu64 " %d:%s\n", c->c_connid, *err, slapd_pr_strerror(*err) ); + } + return ret; } /* * NOTE: A better way to do this would be to read the bytes and add them to @@ -224,11 +235,11 @@ sasl_io_start_packet(PRFileDesc *fd, PRIntn flags, PRIntervalTime timeout, PRInt * perhaps only in error conditions, in which case the ret < 0 case above * will run */ - if (ret != 0 && ret < sizeof(buffer)) { + if (ret < sizeof(buffer)) { LDAPDebug( LDAP_DEBUG_ANY, "sasl_io_start_packet: failed - read only %d bytes of sasl packet length on connection %" NSPRIu64 "\n", ret, c->c_connid, 0 ); PR_SetError(PR_IO_ERROR, 0); - return PR_FAILURE; + return -1; } if (ret == sizeof(buffer)) { /* Decode the length (could use ntohl here ??) */ @@ -249,7 +260,7 @@ sasl_io_start_packet(PRFileDesc *fd, PRIntn flags, PRIntervalTime timeout, PRInt packet_length, config_get_maxsasliosize(), 0); PR_SetError(PR_BUFFER_OVERFLOW_ERROR, 0); *err = PR_BUFFER_OVERFLOW_ERROR; - return PR_FAILURE; + return -1; } sasl_io_resize_encrypted_buffer(sp, packet_length); @@ -260,7 +271,7 @@ sasl_io_start_packet(PRFileDesc *fd, PRIntn flags, PRIntervalTime timeout, PRInt sp->encrypted_buffer_offset = 4; } - return PR_SUCCESS; + return 1; } static PRInt32 @@ -276,11 +287,16 @@ sasl_io_read_packet(PRFileDesc *fd, PRIntn flags, PRIntervalTime timeout, PRInt3 bytes_remaining_to_read, c->c_connid ); ret = PR_Recv(fd->lower, sp->encrypted_buffer + sp->encrypted_buffer_offset, bytes_remaining_to_read, flags, timeout); - if (ret < 0) { + if (ret <= 0) { *err = PR_GetError(); - LDAPDebug( LDAP_DEBUG_ANY, - "sasl_io_read_packet: error reading sasl packet on connection %" NSPRIu64 " %d:%s\n", c->c_connid, *err, slapd_pr_strerror(*err) ); - return PR_FAILURE; + if (ret == 0) { + LDAPDebug1Arg( LDAP_DEBUG_CONNS, + "sasl_io_read_packet: connection closed while reading sasl packet on connection %" NSPRIu64 "\n", c->c_connid ); + } else { + LDAPDebug( LDAP_DEBUG_CONNS, + "sasl_io_read_packet: error reading sasl packet on connection %" NSPRIu64 " %d:%s\n", c->c_connid, *err, slapd_pr_strerror(*err) ); + } + return ret; } sp->encrypted_buffer_offset += ret; return ret; @@ -307,8 +323,8 @@ sasl_io_recv(PRFileDesc *fd, void *buf, PRInt32 len, PRIntn flags, if (!sasl_io_reading_packet(sp)) { /* First read the packet length and so on */ ret = sasl_io_start_packet(fd, flags, timeout, &err); - if (0 != ret) { - /* Most likely the i/o timed out */ + if (0 >= ret) { + /* timeout, connection closed, or error */ return ret; } } @@ -316,7 +332,7 @@ sasl_io_recv(PRFileDesc *fd, void *buf, PRInt32 len, PRIntn flags, * we now must read more data off the wire until we have the complete packet */ ret = sasl_io_read_packet(fd, flags, timeout, &err); - if (PR_FAILURE == ret) { + if (0 >= ret) { return ret; /* read packet will set pr error */ } /* If we have not read the packet yet, we cannot return any decrypted data to the
0
41fa124aeec3b6bc86f28d69aeccb0e02f382aeb
389ds/389-ds-base
Extend dirsrv SELinux policy interface. The dirsrv SELinux policy interface needed to be extended to allow the confined Admin Server the proper permissions to interact with the Directory Server.
commit 41fa124aeec3b6bc86f28d69aeccb0e02f382aeb Author: Nathan Kinder <[email protected]> Date: Thu Oct 22 14:56:06 2009 -0700 Extend dirsrv SELinux policy interface. The dirsrv SELinux policy interface needed to be extended to allow the confined Admin Server the proper permissions to interact with the Directory Server. diff --git a/selinux/dirsrv.if b/selinux/dirsrv.if index 17035293b..80b478f18 100644 --- a/selinux/dirsrv.if +++ b/selinux/dirsrv.if @@ -118,6 +118,24 @@ interface(`dirsrv_manage_var_run',` files_pid_filetrans($1, dirsrv_var_run_t, dir) ') +####################################### +## <summary> +## Allow a domain to read dirsrv /var/run files. +## </summary> +## <param name="domain"> +## <summary> +## Domain allowed access. +## </summary> +## </param> +# +interface(`dirsrv_read_var_run',` + gen_require(` + type dirsrv_var_run_t; + ') + allow $1 dirsrv_var_run_t:dir list_dir_perms; + allow $1 dirsrv_var_run_t:file read_file_perms; +') + ######################################## ## <summary> ## Manage dirsrv configuration files. @@ -152,8 +170,10 @@ interface(`dirsrv_exec_lib',` type dirsrv_lib_t; ') - allow $1 dirsrv_lib_t:dir { search getattr }; - allow $1 dirsrv_lib_t:file { read getattr open execute execute_no_trans ioctl}; + allow $1 dirsrv_lib_t:dir search_dir_perms; + allow $1 dirsrv_lib_t:file exec_file_perms; + # Not all platforms include ioctl in exec_file_perms + allow $1 dirsrv_lib_t:file ioctl; ') ######################################## @@ -171,6 +191,7 @@ interface(`dirsrv_read_share',` type dirsrv_share_t; ') - allow $1 dirsrv_share_t:dir { search getattr }; - allow $1 dirsrv_share_t:file { read getattr open }; + allow $1 dirsrv_share_t:dir list_dir_perms; + allow $1 dirsrv_share_t:file read_file_perms; + allow $1 dirsrv_share_t:lnk_file read; ')
0
d74ba635eb5202ba74b2c971a124be15d76f0480
389ds/389-ds-base
Ticket 49008 - Adjust CI test for new memberOf behavior Bug Description: In the current version the MO plugin now adds a valid objectclass to an entry. This breaks the test design as some operations were expected to fail, but now they pass. Fix Description: Set the auto add objectclass to an objectclass that does not allow memberOf. This now allows the test work as designed. https://pagure.io/389-ds-base/issue/49008 Reviewed by: firstyear(Thanks!)
commit d74ba635eb5202ba74b2c971a124be15d76f0480 Author: Mark Reynolds <[email protected]> Date: Thu Jun 1 16:05:51 2017 -0400 Ticket 49008 - Adjust CI test for new memberOf behavior Bug Description: In the current version the MO plugin now adds a valid objectclass to an entry. This breaks the test design as some operations were expected to fail, but now they pass. Fix Description: Set the auto add objectclass to an objectclass that does not allow memberOf. This now allows the test work as designed. https://pagure.io/389-ds-base/issue/49008 Reviewed by: firstyear(Thanks!) diff --git a/dirsrvtests/tests/tickets/ticket49008_test.py b/dirsrvtests/tests/tickets/ticket49008_test.py index 673b2f926..332cf229a 100644 --- a/dirsrvtests/tests/tickets/ticket49008_test.py +++ b/dirsrvtests/tests/tickets/ticket49008_test.py @@ -32,7 +32,12 @@ def test_ticket49008(T): A.agreement.pause(AtoC) C.agreement.pause(CtoA) + # Enable memberOf on Master B B.plugins.enable(name=PLUGIN_MEMBER_OF) + + # Set the auto OC to an objectclass that does NOT allow memberOf + B.modify_s('cn=MemberOf Plugin,cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'memberofAutoAddOC', 'referral')]) B.restart(timeout=10) # add a few entries allowing memberof
0
8e4f0baa792a9e500bd3090592dd48195a784711
389ds/389-ds-base
Ticket 50 - Add db2* tasks to dsctl Bug Description: To make dsctl complete, we need to add the various db2* tasks to allow backup and restore. Fix Description: Add the tasks, along with their tests and some minor fixes to lib389 to support there. https://pagure.io/lib389/issue/50 Author: wibrown Review by: tbordaz, ilias95 (Thanks!)
commit 8e4f0baa792a9e500bd3090592dd48195a784711 Author: William Brown <[email protected]> Date: Tue Jun 6 16:58:42 2017 +1000 Ticket 50 - Add db2* tasks to dsctl Bug Description: To make dsctl complete, we need to add the various db2* tasks to allow backup and restore. Fix Description: Add the tasks, along with their tests and some minor fixes to lib389 to support there. https://pagure.io/lib389/issue/50 Author: wibrown Review by: tbordaz, ilias95 (Thanks!) diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py index 9e2465f52..ae3e6f7e8 100644 --- a/src/lib389/lib389/__init__.py +++ b/src/lib389/lib389/__init__.py @@ -2587,52 +2587,60 @@ class DirSrv(SimpleLDAPObject, object): # server is stopped) # def ldif2db(self, bename, suffixes, excludeSuffixes, encrypt, - *import_files): + import_file): """ @param bename - The backend name of the database to import @param suffixes - List/tuple of suffixes to import @param excludeSuffixes - List/tuple of suffixes to exclude from import @param encrypt - Perform attribute encryption - @param input_files - Files to import: file, file, file + @param input_file - File to import: file @return - True if import succeeded """ DirSrvTools.lib389User(user=DEFAULT_USER) prog = os.path.join(self.ds_paths.sbin_dir, 'ns-slapd') + if self.status(): + log.error("ldif2db: Can not operate while directory server is running") + return False + if not bename and not suffixes: log.error("ldif2db: backend name or suffix missing") return False - for ldif in import_files: - if not os.path.isfile(ldif): - log.error("ldif2db: Can't find file: %s" % ldif) - return False + if not os.path.isfile(import_file): + log.error("ldif2db: Can't find file: %s" % import_file) + return False - cmd = '%s ldif2db -D %s' % (prog, self.get_config_dir()) + cmd = [ + prog, + 'ldif2db', + '-D', self.get_config_dir(), + '-i', import_file, + ] if bename: - cmd = cmd + ' -n ' + bename + cmd.append('-n') + cmd.append(bename) if suffixes: for suffix in suffixes: - cmd = cmd + ' -s ' + suffix + cmd.append('-s') + cmd.append(suffix) if excludeSuffixes: for excludeSuffix in excludeSuffixes: cmd = cmd + ' -x ' + excludeSuffix + cmd.append('-x') + cmd.append(excludeSuffix) if encrypt: - cmd = cmd + ' -E' - for ldif in import_files: - cmd = cmd + ' -i ' + ldif + cmd.append('-E') - self.stop(timeout=10) - log.info('Running script: %s' % cmd) - result = True - try: - os.system(cmd) - except: - log.error("ldif2db: error executing %s" % cmd) - result = False - self.start(timeout=10) + result = subprocess.check_output(cmd) + u_result = ensure_str(result) - return result + log.debug("ldif2db output: BEGIN") + for line in u_result.split("\n"): + log.debug(line) + log.debug("ldif2db output: END") + + return True def db2ldif(self, bename, suffixes, excludeSuffixes, encrypt, repl_data, outputfile): @@ -2648,39 +2656,48 @@ class DirSrv(SimpleLDAPObject, object): DirSrvTools.lib389User(user=DEFAULT_USER) prog = os.path.join(self.ds_paths.sbin_dir, 'ns-slapd') + if self.status(): + log.error("db2ldif: Can not operate while directory server is running") + return False + if not bename and not suffixes: log.error("db2ldif: backend name or suffix missing") return False - # The shell wrapper is not always reliable, so bypass it. We want to - # kill it off anyway! - cmd = '%s db2ldif -D %s' % (prog, self.get_config_dir()) + cmd = [ + prog, + 'db2ldif', + '-D', self.get_config_dir() + ] if bename: - cmd = cmd + ' -n ' + bename + cmd.append('-n') + cmd.append(bename) if suffixes: for suffix in suffixes: - cmd = cmd + ' -s ' + suffix + cmd.append('-s') + cmd.append(suffix) if excludeSuffixes: for excludeSuffix in excludeSuffixes: cmd = cmd + ' -x ' + excludeSuffix + cmd.append('-x') + cmd.append(excludeSuffix) if encrypt: - cmd = cmd + ' -E' + cmd.append('-E') if repl_data: - cmd = cmd + ' -r' + cmd.append('-r') if outputfile: - cmd = cmd + ' -a ' + outputfile + cmd.append('-a') + cmd.append(outputfile) - self.stop(timeout=10) - log.info('Running script: %s' % cmd) - result = True - try: - os.system(cmd) - except: - log.error("db2ldif: error executing %s" % cmd) - result = False - self.start(timeout=10) + result = subprocess.check_output(cmd) + u_result = ensure_str(result) - return result + log.debug("db2ldif output: BEGIN") + for line in u_result.split("\n"): + log.debug(line) + log.debug("db2ldif output: END") + + return True def bak2db(self, archive_dir, bename=None): """ @@ -2689,27 +2706,30 @@ class DirSrv(SimpleLDAPObject, object): @return - True if the restore succeeded """ DirSrvTools.lib389User(user=DEFAULT_USER) - prog = os.path.join(self.ds_paths.sbin_dir, BAK2DB) + prog = os.path.join(self.ds_paths.sbin_dir, 'ns-slapd') + + if self.status(): + log.error("bak2db: Can not operate while directory server is running") + return False if not archive_dir: log.error("bak2db: backup directory missing") return False - cmd = '%s %s -Z %s' % (prog, archive_dir, self.serverid) - if bename: - cmd = cmd + ' -n ' + bename + result = subprocess.check_output([ + prog, + 'archive2db', + '-a', archive_dir, + '-D', self.get_config_dir() + ]) + u_result = ensure_str(result) - self.stop(timeout=10) - log.info('Running script: %s' % cmd) - result = True - try: - os.system(cmd) - except: - log.error("bak2db: error executing %s" % cmd) - result = False - self.start(timeout=10) + log.debug("bak2db output: BEGIN") + for line in u_result.split("\n"): + log.debug(line) + log.debug("bak2db output: END") - return result + return True def db2bak(self, archive_dir): """ @@ -2717,25 +2737,30 @@ class DirSrv(SimpleLDAPObject, object): @return - True if the backup succeeded """ DirSrvTools.lib389User(user=DEFAULT_USER) - prog = os.path.join(self.ds_paths.sbin_dir, DB2BAK) + prog = os.path.join(self.ds_paths.sbin_dir, 'ns-slapd') + + if self.status(): + log.error("db2bak: Can not operate while directory server is running") + return False if not archive_dir: - log.error("db2bak: backup directory missing") + log.error("db2bak: archive directory missing") return False - cmd = '%s %s -Z %s' % (prog, archive_dir, self.serverid) + result = subprocess.check_output([ + prog, + 'db2archive', + '-a', archive_dir, + '-D', self.get_config_dir() + ]) + u_result = ensure_str(result) - self.stop(timeout=10) - log.info('Running script: %s' % cmd) - result = True - try: - os.system(cmd) - except: - log.error("db2bak: error executing %s" % cmd) - result = False - self.start(timeout=10) + log.debug("db2bak output: BEGIN") + for line in u_result.split("\n"): + log.debug(line) + log.debug("db2bak output: END") - return result + return True def db2index(self, bename=None, suffixes=None, attrs=None, vlvTag=None): """ diff --git a/src/lib389/lib389/cli_ctl/dbtasks.py b/src/lib389/lib389/cli_ctl/dbtasks.py index 276f478e7..f2fd53ca5 100644 --- a/src/lib389/lib389/cli_ctl/dbtasks.py +++ b/src/lib389/lib389/cli_ctl/dbtasks.py @@ -7,12 +7,50 @@ # --- END COPYRIGHT BLOCK --- def dbtasks_db2index(inst, log, args): - # inst.db2index(suffixes=[args.suffix,]) inst.db2index(bename=args.backend) +def dbtasks_db2bak(inst, log, args): + # Needs an output name? + inst.db2bak(args.archive) + log.info("db2bak successful") + +def dbtasks_bak2db(inst, log, args): + # Needs the archive to restore. + inst.bak2db(args.archive) + log.info("bak2db successful") + +def dbtasks_db2ldif(inst, log, args): + inst.db2ldif(bename=args.backend, encrypt=args.encrypt, repl_data=args.replication, outputfile=args.ldif, suffixes=None, excludeSuffixes=None) + log.info("db2ldif successful") + +def dbtasks_ldif2db(inst, log, args): + inst.ldif2db(bename=args.backend, encrypt=args.encrypt, import_file=args.ldif, suffixes=None, excludeSuffixes=None) + log.info("ldif2db successful") + def create_parser(subcommands): db2index_parser = subcommands.add_parser('db2index', help="Initialise a reindex of the server database. The server must be stopped for this to proceed.") # db2index_parser.add_argument('suffix', help="The suffix to reindex. IE dc=example,dc=com.") db2index_parser.add_argument('backend', help="The backend to reindex. IE userRoot") db2index_parser.set_defaults(func=dbtasks_db2index) + db2bak_parser = subcommands.add_parser('db2bak', help="Initialise a BDB backup of the database. The server must be stopped for this to proceed.") + db2bak_parser.add_argument('archive', help="The destination for the archive. This will be created during the db2bak process.") + db2bak_parser.set_defaults(func=dbtasks_db2bak) + + db2ldif_parser = subcommands.add_parser('db2ldif', help="Initialise an LDIF dump of the database. The server must be stopped for this to proceed.") + db2ldif_parser.add_argument('backend', help="The backend to output as an LDIF. IE userRoot") + db2ldif_parser.add_argument('ldif', help="The path to the ldif output location.") + db2ldif_parser.add_argument('--replication', help="Export replication information, suitable for importing on a new consumer or backups.", default=False, action='store_true') + db2ldif_parser.add_argument('--encrypted', help="Export encrypted attributes", default=False, action='store_true') + db2ldif_parser.set_defaults(func=dbtasks_db2ldif) + + bak2db_parser = subcommands.add_parser('bak2db', help="Restore a BDB backup of the database. The server must be stopped for this to proceed.") + bak2db_parser.add_argument('archive', help="The archive to restore. This will erase all current server databases.") + bak2db_parser.set_defaults(func=dbtasks_bak2db) + + ldif2db_parser = subcommands.add_parser('ldif2db', help="Restore an LDIF dump of the database. The server must be stopped for this to proceed.") + ldif2db_parser.add_argument('backend', help="The backend to restore from an LDIF. IE userRoot") + db2ldif_parser.add_argument('ldif', help="The path to the ldif to import") + ldif2db_parser.add_argument('--encrypted', help="Import encrypted attributes", default=False, action='store_true') + ldif2db_parser.set_defaults(func=dbtasks_ldif2db) + diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py index 996c5dff4..f7468d104 100644 --- a/src/lib389/lib389/instance/setup.py +++ b/src/lib389/lib389/instance/setup.py @@ -336,6 +336,10 @@ class SetupDs(object): except OSError: pass os.chown(slapd[path], slapd['user_uid'], slapd['group_gid']) + ### Warning! We need to down the directory under db too for .restore to work. + # See dblayer.c for more! + db_parent = os.path.join(slapd['db_dir'], '..') + os.chown(db_parent, slapd['user_uid'], slapd['group_gid']) # Copy correct data to the paths. # Copy in the schema diff --git a/src/lib389/lib389/tests/cli/__init__.py b/src/lib389/lib389/tests/cli/__init__.py index 9a57230c6..02cfc5103 100644 --- a/src/lib389/lib389/tests/cli/__init__.py +++ b/src/lib389/lib389/tests/cli/__init__.py @@ -15,6 +15,8 @@ from lib389.instance.setup import SetupDs from lib389.instance.options import General2Base, Slapd2Base from lib389._constants import * +from lib389.configurations import get_sample_entries + INSTANCE_PORT = 54321 INSTANCE_SERVERID = 'standalone' @@ -28,9 +30,8 @@ class TopologyInstance(object): self.logcap = logcap # Need a teardown to destroy the instance. [email protected] [email protected](scope="module") def topology(request): - lc = LogCapture() instance = DirSrv(verbose=DEBUGGING) instance.log.debug("Instance allocated") @@ -73,3 +74,31 @@ def topology(request): request.addfinalizer(fin) return TopologyInstance(instance, lc) + + [email protected](scope="module") +def topology_be_latest(topology): + be = topology.standalone.backends.create(properties={ + 'cn': 'userRoot', + 'suffix' : DEFAULT_SUFFIX, + }) + # Now apply sample entries + centries = get_sample_entries(INSTALL_LATEST_CONFIG) + cent = centries(topology.standalone, DEFAULT_SUFFIX) + cent.apply() + return topology + + [email protected](scope="module") +def topology_be_001003006(topology): + be = topology.standalone.backends.create(properties={ + 'cn': 'userRoot', + 'suffix' : DEFAULT_SUFFIX, + }) + # Now apply sample entries + centries = get_sample_entries('001003006') + cent = centries(topology.standalone, DEFAULT_SUFFIX) + cent.apply() + return topology + + diff --git a/src/lib389/lib389/tests/cli/ctl_dbtasks_test.py b/src/lib389/lib389/tests/cli/ctl_dbtasks_test.py new file mode 100644 index 000000000..f8abb5303 --- /dev/null +++ b/src/lib389/lib389/tests/cli/ctl_dbtasks_test.py @@ -0,0 +1,71 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +# Test the cli tools from the dsctl command for correct behaviour. + +import os +import pytest +from lib389.cli_ctl.dbtasks import dbtasks_db2index, dbtasks_db2bak, dbtasks_db2ldif, dbtasks_ldif2db, dbtasks_bak2db + +from lib389.cli_base import LogCapture, FakeArgs +from lib389.tests.cli import topology, topology_be_latest + +def test_db2index(topology): + pass + +def test_db2bak_bak2db(topology_be_latest): + standalone = topology_be_latest.standalone + standalone.stop() + args = FakeArgs() + args.archive = os.path.join(standalone.get_bak_dir(), "testdb2bak") + # Stop the instance + dbtasks_db2bak(standalone, topology_be_latest.logcap.log, args) + # Assert none. + assert topology_be_latest.logcap.contains("db2bak successful") + topology_be_latest.logcap.flush() + # We can re-use the same arguments + dbtasks_bak2db(standalone, topology_be_latest.logcap.log, args) + # Assert none. + assert topology_be_latest.logcap.contains("bak2db successful") + +def test_ldif2db_db2ldif_no_repl(topology_be_latest): + standalone = topology_be_latest.standalone + standalone.stop() + args = FakeArgs() + args.backend = 'userRoot' + args.ldif = os.path.join(standalone.get_ldif_dir(), "test.ldif") + args.encrypt = False + args.replication = False + # Stop the instance + dbtasks_db2ldif(standalone, topology_be_latest.logcap.log, args) + # Assert none. + assert topology_be_latest.logcap.contains("db2ldif successful") + topology_be_latest.logcap.flush() + # We can re-use the same arguments + dbtasks_ldif2db(standalone, topology_be_latest.logcap.log, args) + # Assert none. + assert topology_be_latest.logcap.contains("ldif2db successful") + +def test_ldif2db_db2ldif_repl(topology_be_latest): + standalone = topology_be_latest.standalone + standalone.stop() + args = FakeArgs() + args.backend = 'userRoot' + args.ldif = os.path.join(standalone.get_ldif_dir(), "test.ldif") + args.encrypt = False + args.replication = False + args.archive = os.path.join(standalone.get_ldif_dir(), "test.ldif") + # Stop the instance + dbtasks_db2ldif(standalone, topology_be_latest.logcap.log, args) + # Assert none. + assert topology_be_latest.logcap.contains("db2ldif successful") + topology_be_latest.logcap.flush() + # We can re-use the same arguments + dbtasks_ldif2db(standalone, topology_be_latest.logcap.log, args) + # Assert none. + assert topology_be_latest.logcap.contains("ldif2db successful")
0
e4ed5a8a5a27b056dfa99bc92c106c1ecf8d7c0b
389ds/389-ds-base
Issue 5628 - Handle graceful timeout in CI tests (#5657) Issue: Sometime CI test fails because github workflow timeout of 6 hour is triggered and in this case there is no data to help troubleshooting except that timeout occurred. Solution: Implement a 5 hour timeout in topologies fixture so that test result artefacts get collected before github timeout. when the timeout occurs: the pytest test fails because of a TimeoutError exception and ns-slapd get killed first with SIGTERM then with SIGQUIT ( to get the core file in case of deadlock ) Specific Test modules may configure their own timeout by adding an autouse fixture (see dirsrvtests/tests/suites/lib389/timeout_test.py) Issue: 5628 Reviewed by: @mreynolds389
commit e4ed5a8a5a27b056dfa99bc92c106c1ecf8d7c0b Author: progier389 <[email protected]> Date: Mon Feb 13 16:59:00 2023 +0100 Issue 5628 - Handle graceful timeout in CI tests (#5657) Issue: Sometime CI test fails because github workflow timeout of 6 hour is triggered and in this case there is no data to help troubleshooting except that timeout occurred. Solution: Implement a 5 hour timeout in topologies fixture so that test result artefacts get collected before github timeout. when the timeout occurs: the pytest test fails because of a TimeoutError exception and ns-slapd get killed first with SIGTERM then with SIGQUIT ( to get the core file in case of deadlock ) Specific Test modules may configure their own timeout by adding an autouse fixture (see dirsrvtests/tests/suites/lib389/timeout_test.py) Issue: 5628 Reviewed by: @mreynolds389 diff --git a/dirsrvtests/tests/suites/lib389/timeout_test.py b/dirsrvtests/tests/suites/lib389/timeout_test.py new file mode 100644 index 000000000..97e6abce9 --- /dev/null +++ b/dirsrvtests/tests/suites/lib389/timeout_test.py @@ -0,0 +1,60 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import time +from lib389._constants import * +from lib389.topologies import topology_st as topo, set_timeout + +logging.basicConfig(format='%(asctime)s %(message)s', force=True) +log = logging.getLogger(__name__) +log.setLevel(logging.DEBUG) +# create console handler with a higher log level +ch = logging.StreamHandler() +ch.setLevel(logging.DEBUG) +# create formatter and add it to the handlers +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +ch.setFormatter(formatter) +# add the handlers to logger +log.addHandler(ch) + +TEST_TIMEOUT = 150 + [email protected](autouse=True, scope="module") +def init_timeout(): + set_timeout(TEST_TIMEOUT) + +def test_timeout(topo): + """Specify a test case purpose or name here + + :id: 4a2917d2-ad4c-44a7-aa5f-daad26d1d36e + :setup: Standalone Instance + :steps: + 1. Fill in test case steps here + 2. And indent them like this (RST format requirement) + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + with pytest.raises(TimeoutError): + log.info("Start waiting %d seconds" % TEST_TIMEOUT ) + time.sleep(TEST_TIMEOUT) + log.info("End waiting") + for inst in topo: + assert inst.status() is False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/src/lib389/lib389/topologies.py b/src/lib389/lib389/topologies.py index f4f2dbe10..d44299c5b 100644 --- a/src/lib389/lib389/topologies.py +++ b/src/lib389/lib389/topologies.py @@ -9,6 +9,10 @@ import os import logging import socket # For hostname detection for GSSAPI tests +import time +import subprocess +from signal import SIGALRM, alarm, signal +from datetime import timedelta import pytest from lib389 import DirSrv from lib389.utils import generate_ds_params, is_fips @@ -28,6 +32,33 @@ else: log = logging.getLogger(__name__) +# Github kills workflow after 6 hours so lets keep 1 hour margin +# For workflow and test initialization, and artifacts collection +DEFAULT_TEST_TIMEOUT = 5 * 3600 + +_test_timeout = DEFAULT_TEST_TIMEOUT + + +def set_timeout(timeout): + """Set the test timeout. + There is an example about how to use it in + https://github.com/389ds/389-ds-base/tree/main/dirsrvtests/tests/suites/lib389/timeout_test.py + + :param timeout: timeout in seconds + 0 for no timeout + negative value: reset default timeout + :type timeout: int + + :return - None + """ + global _test_timeout + if timeout < 0: + _test_timeout = DEFAULT_TEST_TIMEOUT + else: + _test_timeout = timeout + log.info(f"Set Topologies timeout to {str(timedelta(seconds=_test_timeout))}") + + def _remove_ssca_db(topology): ssca = NssSsl(dbpath=topology[0].get_ssca_dir()) if ssca._db_exists(): @@ -126,7 +157,7 @@ def _create_instances(topo_dict, suffix): return TopologyMain(standalones=ins, suppliers=ms, consumers=cs, hubs=hs) -def create_topology(topo_dict, suffix=DEFAULT_SUFFIX): +def create_topology(topo_dict, suffix=DEFAULT_SUFFIX, request=None, cleanup_cb=None): """Create a requested topology. Cascading replication scenario isn't supported :param topo_dict: a dictionary {ReplicaRole.STANDALONE: num, ReplicaRole.SUPPLIER: num, @@ -134,6 +165,10 @@ def create_topology(topo_dict, suffix=DEFAULT_SUFFIX): :type topo_dict: dict :param suffix: a suffix for the replication :type suffix: str + :param request: The pytest request + :type request: FixtureRequest + :param cleanup_cb: a callback for additional cleanup task + :type cleanup_cb: Callable[[TopologyMain], None] :return - TopologyMain object """ @@ -141,19 +176,57 @@ def create_topology(topo_dict, suffix=DEFAULT_SUFFIX): if not topo_dict: ValueError("You need to specify the dict. For instance: {ReplicaRole.STANDALONE: 1}") - if ReplicaRole.HUB in topo_dict.keys(): - NotImplementedError("Cascading replication scenario isn't supported." - "Please, use existing topology or create your own.") + if cleanup_cb and not request: + ValueError("You need to specify the pytest fixture request when specifying cleanup callback.") topo = _create_instances(topo_dict, suffix) + # register topo finalizer + log.info(f"Topology request is {topo}") + if request: + log.info(f"Topology has a request") + def fin(): + alarm(0) + [inst.stop() for inst in topo] + if DEBUGGING is None: + if cleanup_cb: + cleanup_cb(topo) + if not _remove_ssca_db(topo): + log.warning("Failed to remove the CA certificate database during the tescase cleanup phase.") + for inst in topo: + if inst.exists(): + inst.delete() + + request.addfinalizer(fin) + + # Timeout management + def timeout(signum, frame): + # Lets try to stop gracefully all instances and off-line "tasks". + # In case of deadlock or loop a thread will not be able to finish + # and the server will not die. + log.error("Timeout. kill ns-slapd processes with SIGTERM.") + subprocess.run(["/usr/bin/pkill", "--signal", "TERM", "ns-slapd",], stderr=subprocess.STDOUT) + time.sleep(120) + # Everything should be stopped except stuck instances + # lets kill with a signal that generate core and could not + # be confused with SIGSEGV or SIGBUS + log.error("Timeout. kill remaining ns-slapd processes with SIGQUIT.") + subprocess.run(["/usr/bin/pkill", "--signal", "QUIT", "ns-slapd",], stderr=subprocess.STDOUT) + # let enough time to write the cores + time.sleep(120) + raise TimeoutError(f"Test timed out after {str(timedelta(seconds=_test_timeout))}") + + signal(SIGALRM, timeout) + log.info(f"Armed timeout of {str(timedelta(seconds=_test_timeout))}") + alarm(_test_timeout) + # Start with a single supplier, and create it "first". first_supplier = None + repl = ReplicationManager(DEFAULT_SUFFIX) try: first_supplier = list(topo.ms.values())[0] log.info("Creating replication topology.") # Now get the first supplier ready. - repl = ReplicationManager(DEFAULT_SUFFIX) repl.create_first_supplier(first_supplier) except IndexError: pass @@ -176,15 +249,46 @@ def create_topology(topo_dict, suffix=DEFAULT_SUFFIX): log.info("Ensuring supplier %s to %s ..." % (mo.serverid, mi.serverid)) repl.ensure_agreement(mo, mi) - # Add supplier -> consumer agreements. - for c in topo.cs.values(): - log.info("Joining consumer %s from %s ..." % (c.serverid, first_supplier.serverid)) - repl.join_consumer(first_supplier, c) - - for m in topo.ms.values(): + if ReplicaRole.HUB in topo_dict.keys(): + first_hub = list(topo.hs.values())[0] + # Initialize the hubs + for h in topo.hs.values(): + log.info("Joining hub %s from %s ..." % (h.serverid, first_supplier.serverid)) + repl.join_hub(first_supplier, h) + + # Initialize the consumers + # Add hub -> consumer agreements. + for c in topo.cs.values(): + log.info("Joining consumer %s from %s ..." % (c.serverid, first_hub.serverid)) + repl.join_consumer(first_hub, c) + + # Mesh the supplier<->hub agreements. + for mo in topo.ms.values(): + for h in topo.hs.values(): + if mo is not first_supplier: + log.info("Ensuring supplier %s to hub %s ..." % (mo.serverid, h.serverid)) + repl.ensure_agreement(mo, h) + log.info("Ensuring hub %s to supplier %s ..." % (h.serverid, mo.serverid)) + repl.ensure_agreement(h, mo) + + # Mesh the hub->consumer agreements. + for h in topo.hs.values(): + if h is first_hub: + continue + for c in topo.cs.values(): + log.info("Ensuring consumer %s from hub %s ..." % (c.serverid, h.serverid)) + repl.ensure_agreement(h, c) + else: + # Master(s) -> Consumer(s) topologies + # Add supplier -> consumer agreements. for c in topo.cs.values(): - log.info("Ensuring consumer %s from %s ..." % (c.serverid, m.serverid)) - repl.ensure_agreement(m, c) + log.info("Joining consumer %s from %s ..." % (c.serverid, first_supplier.serverid)) + repl.join_consumer(first_supplier, c) + + for m in topo.ms.values(): + for c in topo.cs.values(): + log.info("Ensuring consumer %s from %s ..." % (c.serverid, m.serverid)) + repl.ensure_agreement(m, c) # Clear out the tmp dir for instance in topo: @@ -253,16 +357,7 @@ class TopologyMain(object): def topology_st(request): """Create DS standalone instance""" - topology = create_topology({ReplicaRole.STANDALONE: 1}) - - def fin(): - topology.standalone.stop() - if DEBUGGING is None: - assert _remove_ssca_db(topology) - if topology.standalone.exists(): - topology.standalone.delete() - - request.addfinalizer(fin) + topology = create_topology({ReplicaRole.STANDALONE: 1}, request=request) topology.logcap = LogCapture() return topology @@ -284,7 +379,8 @@ def topology_st_gssapi(request): REALM = hostname[1].upper() - topology = create_topology({ReplicaRole.STANDALONE: 1}) + topology = create_topology({ReplicaRole.STANDALONE: 1}, request=request, + cleanup_cb = lambda x: krb.destroy_realm()) # Fix the hostname. topology.standalone.host = socket.gethostname() @@ -324,16 +420,6 @@ def topology_st_gssapi(request): topology.standalone.clearTmpDir(__file__) - def fin(): - topology.standalone.stop() - if DEBUGGING is None: - assert _remove_ssca_db(topology) - if topology.standalone.exists(): - topology.standalone.delete() - krb.destroy_realm() - - request.addfinalizer(fin) - topology.logcap = LogCapture() return topology @@ -342,21 +428,12 @@ def topology_st_gssapi(request): def topology_no_sample(request): """Create instance without sample entries to reproduce not initialised database""" - topology = create_topology({ReplicaRole.STANDALONE: 1}, None) + topology = create_topology({ReplicaRole.STANDALONE: 1}, None, request=request) topology.standalone.backends.create(properties={ 'cn': 'userRoot', 'nsslapd-suffix': DEFAULT_SUFFIX, }) - def fin(): - topology.standalone.stop() - if DEBUGGING is None: - assert _remove_ssca_db(topology) - if topology.standalone.exists(): - topology.standalone.delete() - - request.addfinalizer(fin) - topology.logcap = LogCapture() return topology @@ -365,17 +442,7 @@ def topology_no_sample(request): def topology_i2(request): """Create two instance DS deployment""" - topology = create_topology({ReplicaRole.STANDALONE: 2}) - - def fin(): - [inst.stop() for inst in topology] - if DEBUGGING is None: - assert _remove_ssca_db(topology) - for inst in topology: - if inst.exists(): - inst.delete() - - request.addfinalizer(fin) + topology = create_topology({ReplicaRole.STANDALONE: 2}, request=request) topology.logcap = LogCapture() return topology @@ -385,17 +452,7 @@ def topology_i2(request): def topology_i3(request): """Create three instance DS deployment""" - topology = create_topology({ReplicaRole.STANDALONE: 3}) - - def fin(): - [inst.stop() for inst in topology] - if DEBUGGING is None: - assert _remove_ssca_db(topology) - for inst in topology: - if inst.exists(): - inst.delete() - - request.addfinalizer(fin) + topology = create_topology({ReplicaRole.STANDALONE: 3}, request=request) topology.logcap = LogCapture() return topology @@ -404,18 +461,7 @@ def topology_i3(request): def topology_m1(request): """Create Replication Deployment with one supplier and one consumer""" - topology = create_topology({ReplicaRole.SUPPLIER: 1}) - - def fin(): - [inst.stop() for inst in topology] - if DEBUGGING is None: - assert _remove_ssca_db(topology) - for inst in topology: - if inst.exists(): - inst.delete() - - - request.addfinalizer(fin) + topology = create_topology({ReplicaRole.SUPPLIER: 1}, request=request) topology.logcap = LogCapture() return topology @@ -425,17 +471,7 @@ def topology_m1c1(request): """Create Replication Deployment with one supplier and one consumer""" topology = create_topology({ReplicaRole.SUPPLIER: 1, - ReplicaRole.CONSUMER: 1}) - - def fin(): - [inst.stop() for inst in topology] - if DEBUGGING is None: - assert _remove_ssca_db(topology) - for inst in topology: - if inst.exists(): - inst.delete() - - request.addfinalizer(fin) + ReplicaRole.CONSUMER: 1}, request=request) topology.logcap = LogCapture() return topology @@ -445,17 +481,7 @@ def topology_m1c1(request): def topology_m2(request): """Create Replication Deployment with two suppliers""" - topology = create_topology({ReplicaRole.SUPPLIER: 2}) - - def fin(): - [inst.stop() for inst in topology] - if DEBUGGING is None: - assert _remove_ssca_db(topology) - for inst in topology: - if inst.exists(): - inst.delete() - - request.addfinalizer(fin) + topology = create_topology({ReplicaRole.SUPPLIER: 2}, request=request) topology.logcap = LogCapture() return topology @@ -465,17 +491,7 @@ def topology_m2(request): def topology_m3(request): """Create Replication Deployment with three suppliers""" - topology = create_topology({ReplicaRole.SUPPLIER: 3}) - - def fin(): - [inst.stop() for inst in topology] - if DEBUGGING is None: - assert _remove_ssca_db(topology) - for inst in topology: - if inst.exists(): - inst.delete() - - request.addfinalizer(fin) + topology = create_topology({ReplicaRole.SUPPLIER: 3}, request=request) topology.logcap = LogCapture() return topology @@ -485,17 +501,7 @@ def topology_m3(request): def topology_m4(request): """Create Replication Deployment with four suppliers""" - topology = create_topology({ReplicaRole.SUPPLIER: 4}) - - def fin(): - [inst.stop() for inst in topology] - if DEBUGGING is None: - assert _remove_ssca_db(topology) - for inst in topology: - if inst.exists(): - inst.delete() - - request.addfinalizer(fin) + topology = create_topology({ReplicaRole.SUPPLIER: 4}, request=request) topology.logcap = LogCapture() return topology @@ -506,17 +512,7 @@ def topology_m2c2(request): """Create Replication Deployment with two suppliers and two consumers""" topology = create_topology({ReplicaRole.SUPPLIER: 2, - ReplicaRole.CONSUMER: 2}) - - def fin(): - [inst.stop() for inst in topology] - if DEBUGGING is None: - assert _remove_ssca_db(topology) - for inst in topology: - if inst.exists(): - inst.delete() - - request.addfinalizer(fin) + ReplicaRole.CONSUMER: 2}, request=request) topology.logcap = LogCapture() return topology @@ -527,35 +523,7 @@ def topology_m1h1c1(request): """Create Replication Deployment with one supplier, one consumer and one hub""" topo_roles = {ReplicaRole.SUPPLIER: 1, ReplicaRole.HUB: 1, ReplicaRole.CONSUMER: 1} - topology = _create_instances(topo_roles, DEFAULT_SUFFIX) - supplier = topology.ms["supplier1"] - hub = topology.hs["hub1"] - consumer = topology.cs["consumer1"] - - # Start with the supplier, and create it "first". - log.info("Creating replication topology.") - # Now get the first supplier ready. - repl = ReplicationManager(DEFAULT_SUFFIX) - repl.create_first_supplier(supplier) - # Finish the topology creation - repl.join_hub(supplier, hub) - repl.join_consumer(hub, consumer) - - repl.test_replication(supplier, consumer) - - # Clear out the tmp dir - for instance in topology: - instance.clearTmpDir(__file__) - - def fin(): - [inst.stop() for inst in topology] - if DEBUGGING is None: - assert _remove_ssca_db(topology) - for inst in topology: - if inst.exists(): - inst.delete() - - request.addfinalizer(fin) + topology = create_topology(topo_roles, request=request) topology.logcap = LogCapture() return topology
0
16386dde80e62b3d45697b7538d84dcb4a09255a
389ds/389-ds-base
Ticket #47700 - Unresolved external symbol references break loading of the ACL plugin Description of problem by [email protected]: Various functions in the directory server are declared with extern "C" linkage, causing the compiler to emit references to an unmangled symbol name, but because their definitions don't match the declarations, and the definitions are compiled using the C++ compiler, the implementations are emitted as mangled symbols. Fix description: Using ld option "-z defs", cleaned all the undefined references. https://fedorahosted.org/389/ticket/47700 Reviewed by [email protected] (Thank you, Rich!!)
commit 16386dde80e62b3d45697b7538d84dcb4a09255a Author: Noriko Hosoi <[email protected]> Date: Tue Feb 18 15:28:54 2014 -0800 Ticket #47700 - Unresolved external symbol references break loading of the ACL plugin Description of problem by [email protected]: Various functions in the directory server are declared with extern "C" linkage, causing the compiler to emit references to an unmangled symbol name, but because their definitions don't match the declarations, and the definitions are compiled using the C++ compiler, the implementations are emitted as mangled symbols. Fix description: Using ld option "-z defs", cleaned all the undefined references. https://fedorahosted.org/389/ticket/47700 Reviewed by [email protected] (Thank you, Rich!!) diff --git a/include/base/ereport.h b/include/base/ereport.h index 40813ea28..e97be14ed 100644 --- a/include/base/ereport.h +++ b/include/base/ereport.h @@ -65,13 +65,13 @@ NSPR_BEGIN_EXTERN_C * the current date. */ -NSAPI_PUBLIC int INTereport(int degree, const char *fmt, ...) +NSAPI_PUBLIC int INTereport(int degree, char *fmt, ...) #ifdef __GNUC__ __attribute__ ((format (printf, 2, 3))); #else ; #endif -NSAPI_PUBLIC int INTereport_v(int degree, const char *fmt, va_list args); +NSAPI_PUBLIC int INTereport_v(int degree, char *fmt, va_list args); NSPR_END_EXTERN_C diff --git a/include/libaccess/aclerror.h b/include/libaccess/aclerror.h index 3aff2c717..32610dcce 100644 --- a/include/libaccess/aclerror.h +++ b/include/libaccess/aclerror.h @@ -219,16 +219,6 @@ NSPR_END_EXTERN_C /* ACL_DatabaseRegister */ #define ACLERR4500 4500 /* database name is missing */ -/* ACL_ReadDbMapFile */ -#define ACLERR4600 4600 /* Error reading the Map file */ -#define ACLERR4610 4610 /* Couldn't determine dbtype */ -#define ACLERR4620 4620 /* Missing URL for database */ -#define ACLERR4630 4630 /* Invalid proprty pair */ -#define ACLERR4640 4640 /* Register database failed */ -#define ACLERR4650 4650 /* Default database not LDAP */ -#define ACLERR4660 4660 /* Multiple default databases */ -#define ACLERR4670 4670 /* No default database */ - /* LASDnsBuild */ #define ACLERR4700 4700 /* Cannot allocatae hash */ #define ACLERR4710 4710 /* Cannot add token to hash */ diff --git a/include/libaccess/acleval.h b/include/libaccess/acleval.h index 15e1f5e0e..43c7ef8fa 100644 --- a/include/libaccess/acleval.h +++ b/include/libaccess/acleval.h @@ -63,8 +63,7 @@ NSPR_BEGIN_EXTERN_C /* Functions in acleval.c */ -extern int aclDNSLookup(DNSFilter_t * dnf, - char * dnsspec, int fqdn, char **match); +extern int aclDNSLookup(DNSFilter_t * dnf, const char * dnsspec, int fqdn, const char **match); extern int aclIPLookup(IPFilter_t * ipf, IPAddr_t ipaddr, void **match); extern int aclUserLookup(UidUser_t * uup, UserObj_t * uoptr); extern int aclEvaluate(ACL_t * acl, USI_t arid, ClAuth_t * clauth, int * padn);
0
ce0dc222c36ed4b8ca3936e800e28cb26d64d9f5
389ds/389-ds-base
Bug 672468 - Don't use empty path elements in LD_LIBRARY_PATH Many of the command line wrappers and perl scripts have their LD_LIBRARY_PATH and SHLIB_PATH defined with macros that are replaced by configure. It is common for some of these macros to be empty, which results in empty path elements. This patch adds a bit more logic to the creation of the libpath to avoid empty path elements and leading and trailing colons.
commit ce0dc222c36ed4b8ca3936e800e28cb26d64d9f5 Author: Nathan Kinder <[email protected]> Date: Thu Jan 27 12:10:01 2011 -0800 Bug 672468 - Don't use empty path elements in LD_LIBRARY_PATH Many of the command line wrappers and perl scripts have their LD_LIBRARY_PATH and SHLIB_PATH defined with macros that are replaced by configure. It is common for some of these macros to be empty, which results in empty path elements. This patch adds a bit more logic to the creation of the libpath to avoid empty path elements and leading and trailing colons. diff --git a/ldap/admin/src/scripts/start-dirsrv.in b/ldap/admin/src/scripts/start-dirsrv.in index d7144d153..291c82110 100755 --- a/ldap/admin/src/scripts/start-dirsrv.in +++ b/ldap/admin/src/scripts/start-dirsrv.in @@ -6,6 +6,11 @@ # 1: Server could not be started # 2: Server already running +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + # Starts a single instance start_instance() { # The first argument is the server ID. Anything @@ -22,9 +27,15 @@ start_instance() { fi prefix="$DS_ROOT" - LD_LIBRARY_PATH=$prefix$SERVER_DIR:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@:@pcre_libdir@ + + libpath_add "$prefix$SERVER_DIR" + libpath_add "$prefix@nss_libdir@" + libpath_add "$prefix@libdir@" + libpath_add "@nss_libdir@" + libpath_add "$prefix@pcre_libdir@" + export LD_LIBRARY_PATH - SHLIB_PATH=$prefix$SERVER_DIR:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@:@pcre_libdir@ + SHLIB_PATH=$LD_LIBRARY_PATH export SHLIB_PATH DS_CONFIG_DIR=$CONFIG_DIR diff --git a/ldap/admin/src/scripts/template-bak2db.in b/ldap/admin/src/scripts/template-bak2db.in index 0f9266780..b08183911 100755 --- a/ldap/admin/src/scripts/template-bak2db.in +++ b/ldap/admin/src/scripts/template-bak2db.in @@ -4,10 +4,17 @@ prefix="{{DS-ROOT}}" if [ "$prefix" = "/" ] ; then prefix="" fi -LD_LIBRARY_PATH=$prefix/{{SERVER-DIR}}:$prefix@nss_libdir@:$prefix@libdir@:$prefix@pcre_libdir@ -if [ -n "$prefix" ] ; then - LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:@nss_libdir@" -fi + +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "$prefix{{SERVER-DIR}}" +libpath_add "$prefix@nss_libdir@" +libpath_add "$prefix@libdir@" +libpath_add "$prefix@pcre_libdir@" + export LD_LIBRARY_PATH SHLIB_PATH=$LD_LIBRARY_PATH export SHLIB_PATH diff --git a/ldap/admin/src/scripts/template-bak2db.pl.in b/ldap/admin/src/scripts/template-bak2db.pl.in index 4cd51ab32..4ab79cc7b 100644 --- a/ldap/admin/src/scripts/template-bak2db.pl.in +++ b/ldap/admin/src/scripts/template-bak2db.pl.in @@ -125,8 +125,25 @@ $entry = "${dn}${misc}${cn}${nsinstance}${nsarchivedir}${nsdbtype}"; $vstr = ""; if ($verbose != 0) { $vstr = "-v"; } $ENV{'PATH'} = "$prefix@ldaptool_bindir@:$prefix/usr/bin:@ldaptool_bindir@:/usr/bin"; -$ENV{'LD_LIBRARY_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; -$ENV{'SHLIB_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; + +libpath_add("$prefix@nss_libdir@"); +libpath_add("$prefix/usr/lib"); +libpath_add("@nss_libdir@"); +libpath_add("/usr/lib"); + +$ENV{'SHLIB_PATH'} = "$ENV{'LD_LIBRARY_PATH'}"; open(FOO, "| ldapmodify @ldaptool_opts@ $vstr -h {{SERVER-NAME}} -p {{SERVER-PORT}} -D \"$rootdn\" -w \"$passwd\" -a" ); print(FOO "$entry"); close(FOO); + +sub libpath_add { + my $libpath = shift; + + if ($libpath) { + if ($ENV{'LD_LIBRARY_PATH'}) { + $ENV{'LD_LIBRARY_PATH'} = "$ENV{'LD_LIBRARY_PATH'}:$libpath"; + } else { + $ENV{'LD_LIBRARY_PATH'} = "$libpath"; + } + } +} diff --git a/ldap/admin/src/scripts/template-db2bak.in b/ldap/admin/src/scripts/template-db2bak.in index 5ff5d0783..d89f6eeb7 100755 --- a/ldap/admin/src/scripts/template-db2bak.in +++ b/ldap/admin/src/scripts/template-db2bak.in @@ -4,10 +4,17 @@ prefix="{{DS-ROOT}}" if [ "$prefix" = "/" ] ; then prefix="" fi -LD_LIBRARY_PATH=$prefix/{{SERVER-DIR}}:$prefix@nss_libdir@:$prefix@libdir@:$prefix@pcre_libdir@ -if [ -n "$prefix" ] ; then - LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:@nss_libdir@" -fi + +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "$prefix{{SERVER-DIR}}" +libpath_add "$prefix@nss_libdir@" +libpath_add "$prefix@libdir@" +libpath_add "$prefix@pcre_libdir@" + export LD_LIBRARY_PATH SHLIB_PATH=$LD_LIBRARY_PATH export SHLIB_PATH diff --git a/ldap/admin/src/scripts/template-db2bak.pl.in b/ldap/admin/src/scripts/template-db2bak.pl.in index b443f0b10..8a0cb1c6a 100644 --- a/ldap/admin/src/scripts/template-db2bak.pl.in +++ b/ldap/admin/src/scripts/template-db2bak.pl.in @@ -114,9 +114,26 @@ $entry = "${dn}${misc}${cn}${nsarchivedir}${nsdbtype}"; $vstr = ""; if ($verbose != 0) { $vstr = "-v"; } $ENV{'PATH'} = "$prefix@ldaptool_bindir@:$prefix/usr/bin:@ldaptool_bindir@:/usr/bin"; -$ENV{'LD_LIBRARY_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; -$ENV{'SHLIB_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; + +libpath_add("$prefix@nss_libdir@"); +libpath_add("$prefix/usr/lib"); +libpath_add("@nss_libdir@"); +libpath_add("/usr/lib"); + +$ENV{'SHLIB_PATH'} = "$ENV{'LD_LIBRARY_PATH'}"; print("Back up directory: $archivedir\n"); open(FOO, "| ldapmodify @ldaptool_opts@ $vstr -h {{SERVER-NAME}} -p {{SERVER-PORT}} -D \"$rootdn\" -w \"$passwd\" -a" ); print(FOO "$entry"); close(FOO); + +sub libpath_add { + my $libpath = shift; + + if ($libpath) { + if ($ENV{'LD_LIBRARY_PATH'}) { + $ENV{'LD_LIBRARY_PATH'} = "$ENV{'LD_LIBRARY_PATH'}:$libpath"; + } else { + $ENV{'LD_LIBRARY_PATH'} = "$libpath"; + } + } +} diff --git a/ldap/admin/src/scripts/template-db2index.in b/ldap/admin/src/scripts/template-db2index.in index e0e9a5553..3dc4740c4 100755 --- a/ldap/admin/src/scripts/template-db2index.in +++ b/ldap/admin/src/scripts/template-db2index.in @@ -4,10 +4,16 @@ prefix="{{DS-ROOT}}" if [ "$prefix" = "/" ] ; then prefix="" fi -LD_LIBRARY_PATH=$prefix/{{SERVER-DIR}}:$prefix@nss_libdir@:$prefix@libdir@:$prefix@pcre_libdir@ -if [ -n "$prefix" ] ; then - LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:@nss_libdir@" -fi +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "$prefix{{SERVER-DIR}}" +libpath_add "$prefix@nss_libdir@" +libpath_add "$prefix@libdir@" +libpath_add "$prefix@pcre_libdir@" + export LD_LIBRARY_PATH SHLIB_PATH=$LD_LIBRARY_PATH export SHLIB_PATH diff --git a/ldap/admin/src/scripts/template-db2index.pl.in b/ldap/admin/src/scripts/template-db2index.pl.in index eb58957d6..e598b05b4 100644 --- a/ldap/admin/src/scripts/template-db2index.pl.in +++ b/ldap/admin/src/scripts/template-db2index.pl.in @@ -69,8 +69,13 @@ $verbose = 0; $prefix = "{{DS-ROOT}}"; $ENV{'PATH'} = "$prefix@ldaptool_bindir@:$prefix/usr/bin:@ldaptool_bindir@:/usr/bin"; -$ENV{'LD_LIBRARY_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; -$ENV{'SHLIB_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; + +libpath_add("$prefix@nss_libdir@"); +libpath_add("$prefix/usr/lib"); +libpath_add("@nss_libdir@"); +libpath_add("/usr/lib"); + +$ENV{'SHLIB_PATH'} = "$ENV{'LD_LIBRARY_PATH'}"; $i = 0; while ($i <= $#ARGV) @@ -224,3 +229,15 @@ $entry = "${dn}${misc}${cn}${nsinstance}${attribute}${vlvattribute}"; open(FOO, "| ldapmodify @ldaptool_opts@ $vstr -h {{SERVER-NAME}} -p {{SERVER-PORT}} -D \"$rootdn\" -w \"$passwd\" -a" ); print(FOO "$entry"); close(FOO); + +sub libpath_add { + my $libpath = shift; + + if ($libpath) { + if ($ENV{'LD_LIBRARY_PATH'}) { + $ENV{'LD_LIBRARY_PATH'} = "$ENV{'LD_LIBRARY_PATH'}:$libpath"; + } else { + $ENV{'LD_LIBRARY_PATH'} = "$libpath"; + } + } +} diff --git a/ldap/admin/src/scripts/template-db2ldif.in b/ldap/admin/src/scripts/template-db2ldif.in index e519f3e17..2989b7727 100755 --- a/ldap/admin/src/scripts/template-db2ldif.in +++ b/ldap/admin/src/scripts/template-db2ldif.in @@ -4,10 +4,17 @@ prefix="{{DS-ROOT}}" if [ "$prefix" = "/" ] ; then prefix="" fi -LD_LIBRARY_PATH=$prefix/{{SERVER-DIR}}:$prefix@nss_libdir@:$prefix@libdir@:$prefix@pcre_libdir@ -if [ -n "$prefix" ] ; then - LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:@nss_libdir@" -fi + +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "$prefix{{SERVER-DIR}}" +libpath_add "$prefix@nss_libdir@" +libpath_add "$prefix@libdir@" +libpath_add "$prefix@pcre_libdir@" + export LD_LIBRARY_PATH SHLIB_PATH=$LD_LIBRARY_PATH export SHLIB_PATH diff --git a/ldap/admin/src/scripts/template-db2ldif.pl.in b/ldap/admin/src/scripts/template-db2ldif.pl.in index 2db64213b..a4929ba4a 100644 --- a/ldap/admin/src/scripts/template-db2ldif.pl.in +++ b/ldap/admin/src/scripts/template-db2ldif.pl.in @@ -258,9 +258,26 @@ $entry = "${dn}${misc}${cn}${nsinstance}${nsincluded}${nsexcluded}${nsreplica}${ $vstr = ""; if ($verbose != 0) { $vstr = "-v"; } $ENV{'PATH'} = "$prefix@ldaptool_bindir@:$prefix/usr/bin:@ldaptool_bindir@:/usr/bin"; -$ENV{'LD_LIBRARY_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; -$ENV{'SHLIB_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; + +libpath_add("$prefix@nss_libdir@"); +libpath_add("$prefix/usr/lib"); +libpath_add("@nss_libdir@"); +libpath_add("/usr/lib"); + +$ENV{'SHLIB_PATH'} = "$ENV{'LD_LIBRARY_PATH'}"; print("Exporting to ldif file: ${ldiffile}\n"); open(FOO, "| ldapmodify @ldaptool_opts@ $vstr -h {{SERVER-NAME}} -p {{SERVER-PORT}} -D \"$rootdn\" -w \"$passwd\" -a" ); print(FOO "$entry"); close(FOO); + +sub libpath_add { + my $libpath = shift; + + if ($libpath) { + if ($ENV{'LD_LIBRARY_PATH'}) { + $ENV{'LD_LIBRARY_PATH'} = "$ENV{'LD_LIBRARY_PATH'}:$libpath"; + } else { + $ENV{'LD_LIBRARY_PATH'} = "$libpath"; + } + } +} diff --git a/ldap/admin/src/scripts/template-dbverify.in b/ldap/admin/src/scripts/template-dbverify.in index 0f714509b..8c1a98ead 100755 --- a/ldap/admin/src/scripts/template-dbverify.in +++ b/ldap/admin/src/scripts/template-dbverify.in @@ -4,10 +4,17 @@ prefix="{{DS-ROOT}}" if [ "$prefix" = "/" ] ; then prefix="" fi -LD_LIBRARY_PATH=$prefix/{{SERVER-DIR}}:$prefix@nss_libdir@:$prefix@libdir@:$prefix@pcre_libdir@ -if [ -n "$prefix" ] ; then - LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:@nss_libdir@" -fi + +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "$prefix{{SERVER-DIR}}" +libpath_add "$prefix@nss_libdir@" +libpath_add "$prefix@libdir@" +libpath_add "$prefix@pcre_libdir@" + export LD_LIBRARY_PATH SHLIB_PATH=$LD_LIBRARY_PATH export SHLIB_PATH diff --git a/ldap/admin/src/scripts/template-dn2rdn.in b/ldap/admin/src/scripts/template-dn2rdn.in index f9efeea76..a9402fdee 100755 --- a/ldap/admin/src/scripts/template-dn2rdn.in +++ b/ldap/admin/src/scripts/template-dn2rdn.in @@ -4,10 +4,16 @@ prefix="{{DS-ROOT}}" if [ "$prefix" = "/" ] ; then prefix="" fi -LD_LIBRARY_PATH=$prefix/{{SERVER-DIR}}:$prefix@nss_libdir@:$prefix@libdir@ -if [ -n "$prefix" ] ; then - LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:@nss_libdir@" -fi + +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "$prefix{{SERVER-DIR}}" +libpath_add "$prefix@nss_libdir@" +libpath_add "$prefix@libdir@" + export LD_LIBRARY_PATH SHLIB_PATH=$LD_LIBRARY_PATH export SHLIB_PATH diff --git a/ldap/admin/src/scripts/template-fixup-linkedattrs.pl.in b/ldap/admin/src/scripts/template-fixup-linkedattrs.pl.in index 1745f80b6..f181a7af2 100644 --- a/ldap/admin/src/scripts/template-fixup-linkedattrs.pl.in +++ b/ldap/admin/src/scripts/template-fixup-linkedattrs.pl.in @@ -61,8 +61,13 @@ $verbose = 0; $prefix = "{{DS-ROOT}}"; $ENV{'PATH'} = "$prefix@ldaptool_bindir@:$prefix/usr/bin:@ldaptool_bindir@:/usr/bin"; -$ENV{'LD_LIBRARY_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; -$ENV{'SHLIB_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; + +libpath_add("$prefix@nss_libdir@"); +libpath_add("$prefix/usr/lib"); +libpath_add("@nss_libdir@"); +libpath_add("/usr/lib"); + +$ENV{'SHLIB_PATH'} = "$ENV{'LD_LIBRARY_PATH'}"; $i = 0; while ($i <= $#ARGV) @@ -150,3 +155,15 @@ $entry = "${dn}${misc}${cn}${basedn}${linkdn}"; open(FOO, "| ldapmodify @ldaptool_opts@ $vstr -h {{SERVER-NAME}} -p {{SERVER-PORT}} -D \"$rootdn\" -w \"$passwd\" -a" ); print(FOO "$entry"); close(FOO); + +sub libpath_add { + my $libpath = shift; + + if ($libpath) { + if ($ENV{'LD_LIBRARY_PATH'}) { + $ENV{'LD_LIBRARY_PATH'} = "$ENV{'LD_LIBRARY_PATH'}:$libpath"; + } else { + $ENV{'LD_LIBRARY_PATH'} = "$libpath"; + } + } +} diff --git a/ldap/admin/src/scripts/template-fixup-memberof.pl.in b/ldap/admin/src/scripts/template-fixup-memberof.pl.in index 547379f4c..b43c21ad7 100644 --- a/ldap/admin/src/scripts/template-fixup-memberof.pl.in +++ b/ldap/admin/src/scripts/template-fixup-memberof.pl.in @@ -65,8 +65,13 @@ $verbose = 0; $prefix = "{{DS-ROOT}}"; $ENV{'PATH'} = "$prefix@ldaptool_bindir@:$prefix/usr/bin:@ldaptool_bindir@:/usr/bin"; -$ENV{'LD_LIBRARY_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; -$ENV{'SHLIB_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; + +libpath_add("$prefix@nss_libdir@"); +libpath_add("$prefix/usr/lib"); +libpath_add("@nss_libdir@"); +libpath_add("/usr/lib"); + +$ENV{'SHLIB_PATH'} = "$ENV{'LD_LIBRARY_PATH'}"; $i = 0; while ($i <= $#ARGV) @@ -161,3 +166,15 @@ $entry = "${dn}${misc}${cn}${basedn}${filter}"; open(FOO, "| ldapmodify @ldaptool_opts@ $vstr -h {{SERVER-NAME}} -p {{SERVER-PORT}} -D \"$rootdn\" -w \"$passwd\" -a" ); print(FOO "$entry"); close(FOO); + +sub libpath_add { + my $libpath = shift; + + if ($libpath) { + if ($ENV{'LD_LIBRARY_PATH'}) { + $ENV{'LD_LIBRARY_PATH'} = "$ENV{'LD_LIBRARY_PATH'}:$libpath"; + } else { + $ENV{'LD_LIBRARY_PATH'} = "$libpath"; + } + } +} diff --git a/ldap/admin/src/scripts/template-ldif2db.in b/ldap/admin/src/scripts/template-ldif2db.in index ce64193af..8f92acff2 100755 --- a/ldap/admin/src/scripts/template-ldif2db.in +++ b/ldap/admin/src/scripts/template-ldif2db.in @@ -4,10 +4,17 @@ prefix="{{DS-ROOT}}" if [ "$prefix" = "/" ] ; then prefix="" fi -LD_LIBRARY_PATH=$prefix/{{SERVER-DIR}}:$prefix@nss_libdir@:$prefix@libdir@:$prefix@pcre_libdir@ -if [ -n "$prefix" ] ; then - LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:@nss_libdir@" -fi + +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "$prefix{{SERVER-DIR}}" +libpath_add "$prefix@nss_libdir@" +libpath_add "$prefix@libdir@" +libpath_add "$prefix@pcre_libdir@" + export LD_LIBRARY_PATH SHLIB_PATH=$LD_LIBRARY_PATH export SHLIB_PATH diff --git a/ldap/admin/src/scripts/template-ldif2db.pl.in b/ldap/admin/src/scripts/template-ldif2db.pl.in index da6073bd6..089c95dfb 100644 --- a/ldap/admin/src/scripts/template-ldif2db.pl.in +++ b/ldap/admin/src/scripts/template-ldif2db.pl.in @@ -217,8 +217,25 @@ $entry = "${dn}${misc}${cn}${nsinstance}${nsincluded}${nsexcluded}${nsldiffiles} $vstr = ""; if ($verbose != 0) { $vstr = "-v"; } $ENV{'PATH'} = "$prefix@ldaptool_bindir@:$prefix/usr/bin:@ldaptool_bindir@:/usr/bin"; -$ENV{'LD_LIBRARY_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; -$ENV{'SHLIB_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; + +libpath_add("$prefix@nss_libdir@"); +libpath_add("$prefix/usr/lib"); +libpath_add("@nss_libdir@"); +libpath_add("/usr/lib"); + +$ENV{'SHLIB_PATH'} = "$ENV{'LD_LIBRARY_PATH'}"; open(FOO, "| ldapmodify @ldaptool_opts@ $vstr -h {{SERVER-NAME}} -p {{SERVER-PORT}} -D \"$rootdn\" -w \"$passwd\" -a" ); print(FOO "$entry"); close(FOO); + +sub libpath_add { + my $libpath = shift; + + if ($libpath) { + if ($ENV{'LD_LIBRARY_PATH'}) { + $ENV{'LD_LIBRARY_PATH'} = "$ENV{'LD_LIBRARY_PATH'}:$libpath"; + } else { + $ENV{'LD_LIBRARY_PATH'} = "$libpath"; + } + } +} diff --git a/ldap/admin/src/scripts/template-ldif2ldap.in b/ldap/admin/src/scripts/template-ldif2ldap.in index 4f944895b..ab32f4851 100755 --- a/ldap/admin/src/scripts/template-ldif2ldap.in +++ b/ldap/admin/src/scripts/template-ldif2ldap.in @@ -5,10 +5,21 @@ if [ "$prefix" = "/" ] ; then prefix="" fi -PATH=$prefix@ldaptool_bindir@:@ldaptool_bindir@ -LD_LIBRARY_PATH=$prefix@ldapsdk_libdir@:@ldapsdk_libdir@:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@ +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "$prefix@ldapsdk_libdir@" +libpath_add "@ldapsdk_libdir@" +libpath_add "$prefix@nss_libdir@" +libpath_add "$prefix@libdir@" +libpath_add "@nss_libdir@" + export LD_LIBRARY_PATH -SHLIB_PATH=$prefix@ldapsdk_libdir@:@ldapsdk_libdir@:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@ +SHLIB_PATH=$LD_LIBRARY_PATH export SHLIB_PATH +PATH=$prefix@ldaptool_bindir@:@ldaptool_bindir@ + ldapmodify @ldaptool_opts@ -a -p {{SERVER-PORT}} -D "$1" -w "$2" -f $3 diff --git a/ldap/admin/src/scripts/template-monitor.in b/ldap/admin/src/scripts/template-monitor.in index 6cbde8717..49ac96020 100755 --- a/ldap/admin/src/scripts/template-monitor.in +++ b/ldap/admin/src/scripts/template-monitor.in @@ -1,11 +1,22 @@ #!/bin/sh -PATH=$prefix@ldaptool_bindir@:@ldaptool_bindir@ -LD_LIBRARY_PATH=$prefix@ldapsdk_libdir@:@ldapsdk_libdir@:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@ +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "$prefix@ldapsdk_libdir@" +libpath_add "@ldapsdk_libdir@" +libpath_add "$prefix@nss_libdir@" +libpath_add "$prefix@libdir@" +libpath_add "@nss_libdir@" + export LD_LIBRARY_PATH -SHLIB_PATH=$prefix@ldapsdk_libdir@:@ldapsdk_libdir@:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@ +SHLIB_PATH=$LD_LIBRARY_PATH export SHLIB_PATH +PATH=$prefix@ldaptool_bindir@:@ldaptool_bindir@ + if [ "x$1" != "x" ]; then MDN="$1"; else MDN="cn=monitor"; diff --git a/ldap/admin/src/scripts/template-ns-accountstatus.pl.in b/ldap/admin/src/scripts/template-ns-accountstatus.pl.in index 7769ce21b..8e2e5900a 100644 --- a/ldap/admin/src/scripts/template-ns-accountstatus.pl.in +++ b/ldap/admin/src/scripts/template-ns-accountstatus.pl.in @@ -393,8 +393,13 @@ else debug("Running ** $cmd ** $operation\n"); $ENV{'PATH'} = "$prefix@ldaptool_bindir@:$prefix/usr/bin:@ldaptool_bindir@:/usr/bin"; -$ENV{'LD_LIBRARY_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; -$ENV{'SHLIB_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; + +libpath_add("$prefix@nss_libdir@"); +libpath_add("$prefix/usr/lib"); +libpath_add("@nss_libdir@"); +libpath_add("/usr/lib"); + +$ENV{'SHLIB_PATH'} = "$ENV{'LD_LIBRARY_PATH'}"; $ldapsearch="ldapsearch @ldaptool_opts@ @plainldif_opts@"; $ldapmodify="ldapmodify @ldaptool_opts@"; @@ -842,3 +847,15 @@ debug("$modrole, $entry\n"); out("$entry $state.\n"); exit 0; + +sub libpath_add { + my $libpath = shift; + + if ($libpath) { + if ($ENV{'LD_LIBRARY_PATH'}) { + $ENV{'LD_LIBRARY_PATH'} = "$ENV{'LD_LIBRARY_PATH'}:$libpath"; + } else { + $ENV{'LD_LIBRARY_PATH'} = "$libpath"; + } + } +} diff --git a/ldap/admin/src/scripts/template-ns-activate.pl.in b/ldap/admin/src/scripts/template-ns-activate.pl.in index 7769ce21b..8e2e5900a 100644 --- a/ldap/admin/src/scripts/template-ns-activate.pl.in +++ b/ldap/admin/src/scripts/template-ns-activate.pl.in @@ -393,8 +393,13 @@ else debug("Running ** $cmd ** $operation\n"); $ENV{'PATH'} = "$prefix@ldaptool_bindir@:$prefix/usr/bin:@ldaptool_bindir@:/usr/bin"; -$ENV{'LD_LIBRARY_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; -$ENV{'SHLIB_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; + +libpath_add("$prefix@nss_libdir@"); +libpath_add("$prefix/usr/lib"); +libpath_add("@nss_libdir@"); +libpath_add("/usr/lib"); + +$ENV{'SHLIB_PATH'} = "$ENV{'LD_LIBRARY_PATH'}"; $ldapsearch="ldapsearch @ldaptool_opts@ @plainldif_opts@"; $ldapmodify="ldapmodify @ldaptool_opts@"; @@ -842,3 +847,15 @@ debug("$modrole, $entry\n"); out("$entry $state.\n"); exit 0; + +sub libpath_add { + my $libpath = shift; + + if ($libpath) { + if ($ENV{'LD_LIBRARY_PATH'}) { + $ENV{'LD_LIBRARY_PATH'} = "$ENV{'LD_LIBRARY_PATH'}:$libpath"; + } else { + $ENV{'LD_LIBRARY_PATH'} = "$libpath"; + } + } +} diff --git a/ldap/admin/src/scripts/template-ns-inactivate.pl.in b/ldap/admin/src/scripts/template-ns-inactivate.pl.in index 7769ce21b..8e2e5900a 100644 --- a/ldap/admin/src/scripts/template-ns-inactivate.pl.in +++ b/ldap/admin/src/scripts/template-ns-inactivate.pl.in @@ -393,8 +393,13 @@ else debug("Running ** $cmd ** $operation\n"); $ENV{'PATH'} = "$prefix@ldaptool_bindir@:$prefix/usr/bin:@ldaptool_bindir@:/usr/bin"; -$ENV{'LD_LIBRARY_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; -$ENV{'SHLIB_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; + +libpath_add("$prefix@nss_libdir@"); +libpath_add("$prefix/usr/lib"); +libpath_add("@nss_libdir@"); +libpath_add("/usr/lib"); + +$ENV{'SHLIB_PATH'} = "$ENV{'LD_LIBRARY_PATH'}"; $ldapsearch="ldapsearch @ldaptool_opts@ @plainldif_opts@"; $ldapmodify="ldapmodify @ldaptool_opts@"; @@ -842,3 +847,15 @@ debug("$modrole, $entry\n"); out("$entry $state.\n"); exit 0; + +sub libpath_add { + my $libpath = shift; + + if ($libpath) { + if ($ENV{'LD_LIBRARY_PATH'}) { + $ENV{'LD_LIBRARY_PATH'} = "$ENV{'LD_LIBRARY_PATH'}:$libpath"; + } else { + $ENV{'LD_LIBRARY_PATH'} = "$libpath"; + } + } +} diff --git a/ldap/admin/src/scripts/template-ns-newpwpolicy.pl.in b/ldap/admin/src/scripts/template-ns-newpwpolicy.pl.in index 5d158bd07..7e997196b 100755 --- a/ldap/admin/src/scripts/template-ns-newpwpolicy.pl.in +++ b/ldap/admin/src/scripts/template-ns-newpwpolicy.pl.in @@ -47,8 +47,13 @@ use lib qw(@perlpath@); $prefix = "{{DS-ROOT}}"; $ENV{'PATH'} = "$prefix@ldaptool_bindir@:$prefix/usr/bin:@ldaptool_bindir@:/usr/bin"; -$ENV{'LD_LIBRARY_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; -$ENV{'SHLIB_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; + +libpath_add("$prefix@nss_libdir@"); +libpath_add("$prefix/usr/lib"); +libpath_add("@nss_libdir@"); +libpath_add("/usr/lib"); + +$ENV{'SHLIB_PATH'} = "$ENV{'LD_LIBRARY_PATH'}"; # Add new password policy specific entries ############################################################################# @@ -283,3 +288,15 @@ sub usage { } } # end of $opt_U } + +sub libpath_add { + my $libpath = shift; + + if ($libpath) { + if ($ENV{'LD_LIBRARY_PATH'}) { + $ENV{'LD_LIBRARY_PATH'} = "$ENV{'LD_LIBRARY_PATH'}:$libpath"; + } else { + $ENV{'LD_LIBRARY_PATH'} = "$libpath"; + } + } +} diff --git a/ldap/admin/src/scripts/template-restoreconfig.in b/ldap/admin/src/scripts/template-restoreconfig.in index 0e5de9b07..480af88d2 100755 --- a/ldap/admin/src/scripts/template-restoreconfig.in +++ b/ldap/admin/src/scripts/template-restoreconfig.in @@ -1,9 +1,20 @@ #!/bin/sh prefix="{{DS-ROOT}}" -LD_LIBRARY_PATH=$prefix/{{SERVER-DIR}}:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@:$prefix@pcre_libdir@ + +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "$prefix{{SERVER-DIR}}" +libpath_add "$prefix@nss_libdir@" +libpath_add "$prefix@libdir@" +libpath_add "@nss_libdir@" +libpath_add "$prefix@pcre_libdir@" + export LD_LIBRARY_PATH -SHLIB_PATH=$prefix/{{SERVER-DIR}}:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@:$prefix@pcre_libdir@ +SHLIB_PATH=$LD_LIBRARY_PATH export SHLIB_PATH cd {{SERVERBIN-DIR}} diff --git a/ldap/admin/src/scripts/template-saveconfig.in b/ldap/admin/src/scripts/template-saveconfig.in index 737c97bdd..4c8f3d62b 100755 --- a/ldap/admin/src/scripts/template-saveconfig.in +++ b/ldap/admin/src/scripts/template-saveconfig.in @@ -1,9 +1,20 @@ #!/bin/sh prefix="{{DS-ROOT}}" -LD_LIBRARY_PATH=$prefix/{{SERVER-DIR}}:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@:$prefix@pcre_libdir@ + +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "$prefix{{SERVER-DIR}}" +libpath_add "$prefix@nss_libdir@" +libpath_add "$prefix@libdir@" +libpath_add "@nss_libdir@" +libpath_add "$prefix@pcre_libdir@" + export LD_LIBRARY_PATH -SHLIB_PATH=$prefix/{{SERVER-DIR}}:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@:$prefix@pcre_libdir@ +SHLIB_PATH=$LD_LIBRARY_PATH export SHLIB_PATH cd {{SERVERBIN-DIR}} diff --git a/ldap/admin/src/scripts/template-schema-reload.pl.in b/ldap/admin/src/scripts/template-schema-reload.pl.in index 364c2b03d..f7a1dbb5a 100644 --- a/ldap/admin/src/scripts/template-schema-reload.pl.in +++ b/ldap/admin/src/scripts/template-schema-reload.pl.in @@ -60,8 +60,13 @@ $verbose = 0; $prefix = "{{DS-ROOT}}"; $ENV{'PATH'} = "$prefix@ldaptool_bindir@:$prefix/usr/bin:@ldaptool_bindir@:/usr/bin"; -$ENV{'LD_LIBRARY_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; -$ENV{'SHLIB_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; + +libpath_add("$prefix@nss_libdir@"); +libpath_add("$prefix/usr/lib"); +libpath_add("@nss_libdir@"); +libpath_add("/usr/lib"); + +$ENV{'SHLIB_PATH'} = "$ENV{'LD_LIBRARY_PATH'}"; $i = 0; while ($i <= $#ARGV) @@ -150,3 +155,15 @@ $entry = "${dn}${misc}${cn}${basedn}${schemadir}"; open(FOO, "| ldapmodify @ldaptool_opts@ $vstr -h {{SERVER-NAME}} -p {{SERVER-PORT}} -D \"$rootdn\" -w \"$passwd\" -a" ); print(FOO "$entry"); close(FOO); + +sub libpath_add { + my $libpath = shift; + + if ($libpath) { + if ($ENV{'LD_LIBRARY_PATH'}) { + $ENV{'LD_LIBRARY_PATH'} = "$ENV{'LD_LIBRARY_PATH'}:$libpath"; + } else { + $ENV{'LD_LIBRARY_PATH'} = "$libpath"; + } + } +} diff --git a/ldap/admin/src/scripts/template-suffix2instance.in b/ldap/admin/src/scripts/template-suffix2instance.in index 994f8af67..8186ef812 100755 --- a/ldap/admin/src/scripts/template-suffix2instance.in +++ b/ldap/admin/src/scripts/template-suffix2instance.in @@ -1,9 +1,20 @@ #!/bin/sh prefix="{{DS-ROOT}}" -LD_LIBRARY_PATH=$prefix/{{SERVER-DIR}}:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@:$prefix@pcre_libdir@ + +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "$prefix{{SERVER-DIR}}" +libpath_add "$prefix@nss_libdir@" +libpath_add "$prefix@libdir@" +libpath_add "@nss_libdir@" +libpath_add "$prefix@pcre_libdir@" + export LD_LIBRARY_PATH -SHLIB_PATH=$prefix/{{SERVER-DIR}}:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@:$prefix@pcre_libdir@ +SHLIB_PATH=$LD_LIBRARY_PATH export SHLIB_PATH cd {{SERVERBIN-DIR}} diff --git a/ldap/admin/src/scripts/template-syntax-validate.pl.in b/ldap/admin/src/scripts/template-syntax-validate.pl.in index b5fb796b9..b87e12d75 100644 --- a/ldap/admin/src/scripts/template-syntax-validate.pl.in +++ b/ldap/admin/src/scripts/template-syntax-validate.pl.in @@ -65,8 +65,13 @@ $verbose = 0; $prefix = "{{DS-ROOT}}"; $ENV{'PATH'} = "$prefix@ldaptool_bindir@:$prefix/usr/bin:@ldaptool_bindir@:/usr/bin"; -$ENV{'LD_LIBRARY_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; -$ENV{'SHLIB_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; + +libpath_add("$prefix@nss_libdir@"); +libpath_add("$prefix/usr/lib"); +libpath_add("@nss_libdir@"); +libpath_add("/usr/lib"); + +$ENV{'SHLIB_PATH'} = "$ENV{'LD_LIBRARY_PATH'}"; $i = 0; while ($i <= $#ARGV) @@ -161,3 +166,15 @@ $entry = "${dn}${misc}${cn}${basedn}${filter}"; open(FOO, "| ldapmodify @ldaptool_opts@ $vstr -h {{SERVER-NAME}} -p {{SERVER-PORT}} -D \"$rootdn\" -w \"$passwd\" -a" ); print(FOO "$entry"); close(FOO); + +sub libpath_add { + my $libpath = shift; + + if ($libpath) { + if ($ENV{'LD_LIBRARY_PATH'}) { + $ENV{'LD_LIBRARY_PATH'} = "$ENV{'LD_LIBRARY_PATH'}:$libpath"; + } else { + $ENV{'LD_LIBRARY_PATH'} = "$libpath"; + } + } +} diff --git a/ldap/admin/src/scripts/template-upgradedb.in b/ldap/admin/src/scripts/template-upgradedb.in index 9b6f03d6a..a36acc6f8 100755 --- a/ldap/admin/src/scripts/template-upgradedb.in +++ b/ldap/admin/src/scripts/template-upgradedb.in @@ -1,9 +1,20 @@ #!/bin/sh prefix="{{DS-ROOT}}" -LD_LIBRARY_PATH=$prefix/{{SERVER-DIR}}:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@:$prefix@pcre_libdir@ + +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "$prefix{{SERVER-DIR}}" +libpath_add "$prefix@nss_libdir@" +libpath_add "$prefix@libdir@" +libpath_add "@nss_libdir@" +libpath_add "$prefix@pcre_libdir@" + export LD_LIBRARY_PATH -SHLIB_PATH=$prefix/{{SERVER-DIR}}:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@:$prefix@pcre_libdir@ +SHLIB_PATH=$LD_LIBRARY_PATH export SHLIB_PATH cd {{SERVERBIN-DIR}} diff --git a/ldap/admin/src/scripts/template-upgradednformat.in b/ldap/admin/src/scripts/template-upgradednformat.in index ea4f18a49..d1ff5c837 100755 --- a/ldap/admin/src/scripts/template-upgradednformat.in +++ b/ldap/admin/src/scripts/template-upgradednformat.in @@ -11,10 +11,17 @@ prefix="{{DS-ROOT}}" if [ "$prefix" = "/" ] ; then prefix="" fi -LD_LIBRARY_PATH=$prefix/{{SERVER-DIR}}:$prefix@nss_libdir@:$prefix@libdir@:$prefix@pcre_libdir@ -if [ -n "$prefix" ] ; then - LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:@nss_libdir@" -fi + +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "$prefix{{SERVER-DIR}}" +libpath_add "$prefix@nss_libdir@" +libpath_add "$prefix@libdir@" +libpath_add "$prefix@pcre_libdir@" + export LD_LIBRARY_PATH SHLIB_PATH=$LD_LIBRARY_PATH export SHLIB_PATH diff --git a/ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl.in b/ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl.in index 53e4cea91..3e17be02c 100644 --- a/ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl.in +++ b/ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl.in @@ -64,8 +64,13 @@ $verbose = 0; $prefix = "{{DS-ROOT}}"; $ENV{'PATH'} = "$prefix@ldaptool_bindir@:$prefix/usr/bin:@ldaptool_bindir@:/usr/bin"; -$ENV{'LD_LIBRARY_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; -$ENV{'SHLIB_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; + +libpath_add("$prefix@nss_libdir@"); +libpath_add("$prefix/usr/lib"); +libpath_add("@nss_libdir@"); +libpath_add("/usr/lib"); + +$ENV{'SHLIB_PATH'} = "$ENV{'LD_LIBRARY_PATH'}"; $i = 0; while ($i <= $#ARGV) @@ -178,3 +183,15 @@ $entry = "${dn}${misc}${cn}${basedn}${args}"; open(FOO, "| ldapmodify @ldaptool_opts@ $vstr -h {{SERVER-NAME}} -p {{SERVER-PORT}} -D \"$rootdn\" -w \"$passwd\" -a" ); print(FOO "$entry"); close(FOO); + +sub libpath_add { + my $libpath = shift; + + if ($libpath) { + if ($ENV{'LD_LIBRARY_PATH'}) { + $ENV{'LD_LIBRARY_PATH'} = "$ENV{'LD_LIBRARY_PATH'}:$libpath"; + } else { + $ENV{'LD_LIBRARY_PATH'} = "$libpath"; + } + } +} diff --git a/ldap/admin/src/scripts/template-verify-db.pl.in b/ldap/admin/src/scripts/template-verify-db.pl.in index 25bb46b88..c352eb26b 100644 --- a/ldap/admin/src/scripts/template-verify-db.pl.in +++ b/ldap/admin/src/scripts/template-verify-db.pl.in @@ -170,8 +170,11 @@ my $dbdirs = getDbDir($startpoint); my $prefix = "{{DS-ROOT}}"; $ENV{'PATH'} = "{{INST-DIR}}:$prefix@db_bindir@:$prefix/usr/bin:@db_bindir@:/usr/bin"; -$ENV{'LD_LIBRARY_PATH'} = "@db_libdir@:@libdir@"; -$ENV{'SHLIB_PATH'} = "@db_libdir@:@libdir@"; + +libpath_add("@db_libdir@"); +libpath_add("@libdir@"); + +$ENV{'SHLIB_PATH'} = "$ENV{'LD_LIBRARY_PATH'}"; # Check transaction logs by db_printlog for (my $i = 0; "$$dbdirs[$i]" ne ""; $i++) @@ -256,3 +259,15 @@ else print "Good\n"; exit(0); } + +sub libpath_add { + my $libpath = shift; + + if ($libpath) { + if ($ENV{'LD_LIBRARY_PATH'}) { + $ENV{'LD_LIBRARY_PATH'} = "$ENV{'LD_LIBRARY_PATH'}:$libpath"; + } else { + $ENV{'LD_LIBRARY_PATH'} = "$libpath"; + } + } +} diff --git a/ldap/admin/src/scripts/template-vlvindex.in b/ldap/admin/src/scripts/template-vlvindex.in index e6cd74399..193667a2d 100755 --- a/ldap/admin/src/scripts/template-vlvindex.in +++ b/ldap/admin/src/scripts/template-vlvindex.in @@ -1,9 +1,20 @@ #!/bin/sh prefix="{{DS-ROOT}}" -LD_LIBRARY_PATH=$prefix/{{SERVER-DIR}}:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@:$prefix@pcre_libdir@ + +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "$prefix{{SERVER-DIR}}" +libpath_add "$prefix@nss_libdir@" +libpath_add "$prefix@libdir@" +libpath_add "@nss_libdir@" +libpath_add "$prefix@pcre_libdir@" + export LD_LIBRARY_PATH -SHLIB_PATH=$prefix/{{SERVER-DIR}}:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@:$prefix@pcre_libdir@ +SHLIB_PATH=$LD_LIBRARY_PATH export SHLIB_PATH cd {{SERVERBIN-DIR}} diff --git a/wrappers/cl-dump.in b/wrappers/cl-dump.in index 9a6301a1b..d42c6f2f4 100755 --- a/wrappers/cl-dump.in +++ b/wrappers/cl-dump.in @@ -43,8 +43,15 @@ # wrapper for cl-dump.pl # set the library paths and call cl-dump.pl -LD_LIBRARY_PATH=@nss_libdir@:/usr/lib -SHLIB_PATH=@nss_libdir@:/usr/lib +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "@nss_libdir@" +libpath_add "/usr/lib" + +SHLIB_PATH=$LD_LIBRARY_PATH export LD_LIBRARY_PATH SHLIB_PATH @bindir@/cl-dump.pl "$@" diff --git a/wrappers/dbscan.in b/wrappers/dbscan.in index 115a126d8..942ffc9d2 100755 --- a/wrappers/dbscan.in +++ b/wrappers/dbscan.in @@ -4,7 +4,6 @@ ## (1) Specify variables used by this script. ## ############################################################################### -LIB_DIR=@nspr_libdir@:@db_libdir@ BIN_DIR=@bindir@ COMMAND=dbscan-bin @@ -14,7 +13,14 @@ COMMAND=dbscan-bin ## search order this command wrapper uses to find shared libraries. ## ############################################################################### -LD_LIBRARY_PATH=${LIB_DIR} +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "@nspr_libdir@" +libpath_add "@db_libdir@" + export LD_LIBRARY_PATH diff --git a/wrappers/infadd.in b/wrappers/infadd.in index 89eb0ea74..3ebc40b70 100755 --- a/wrappers/infadd.in +++ b/wrappers/infadd.in @@ -4,7 +4,6 @@ ## (1) Specify variables used by this script. ## ############################################################################### -LIB_DIR=@nss_libdir@:@nspr_libdir@:@ldapsdk_libdir@:@sasl_libdir@ BIN_DIR=@bindir@ COMMAND=infadd-bin @@ -14,7 +13,16 @@ COMMAND=infadd-bin ## search order this command wrapper uses to find shared libraries. ## ############################################################################### -LD_LIBRARY_PATH=${LIB_DIR} +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "@nss_libdir@" +libpath_add "@nspr_libdir@" +libpath_add "@ldapsdk_libdir@" +libpath_add "@sasl_libdir@" + export LD_LIBRARY_PATH diff --git a/wrappers/initscript.in b/wrappers/initscript.in index 147b2d6ea..7632749c4 100644 --- a/wrappers/initscript.in +++ b/wrappers/initscript.in @@ -60,6 +60,11 @@ success() } } +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + # On Solaris /var/run is in tmpfs and gets wiped out upon reboot # we have to recreate the /var/run/@package_name@ directory # We also have to make sure that the directory is writable @@ -150,7 +155,10 @@ if [ -n "$2" ]; then fi start() { - LD_LIBRARY_PATH=@libdir@/@package_name@:@nss_libdir@:@pcre_libdir@ + libpath_add "@libdir@/@package_name@" + libpath_add "@nss_libdir@" + libpath_add "@pcre_libdir@" + export LD_LIBRARY_PATH echo "Starting $prog: " # Start every slapd instance that isn't already running diff --git a/wrappers/ldap-agent.in b/wrappers/ldap-agent.in index 266507aa1..62c3d6e19 100755 --- a/wrappers/ldap-agent.in +++ b/wrappers/ldap-agent.in @@ -4,7 +4,6 @@ ## (1) Specify variables used by this script. ## ############################################################################### -LIB_DIR=@nss_libdir@:@nspr_libdir@:@ldapsdk_libdir@:@netsnmp_libdir@ BIN_DIR=@sbindir@ COMMAND=ldap-agent-bin @@ -17,7 +16,16 @@ export MIBS ## search order this command wrapper uses to find shared libraries. ## ############################################################################### -LD_LIBRARY_PATH=${LIB_DIR} +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "@nss_libdir@" +libpath_add "@nspr_libdir@" +libpath_add "@ldapsdk_libdir@" +libpath_add "@netsnmp_libdir@" + export LD_LIBRARY_PATH diff --git a/wrappers/ldclt.in b/wrappers/ldclt.in index 079e33cc6..7f0a3cc9e 100755 --- a/wrappers/ldclt.in +++ b/wrappers/ldclt.in @@ -4,7 +4,6 @@ ## (1) Specify variables used by this script. ## ############################################################################### -LIB_DIR=@nss_libdir@:@nspr_libdir@:@ldapsdk_libdir@:@sasl_libdir@ BIN_DIR=@bindir@ COMMAND=ldclt-bin @@ -14,7 +13,16 @@ COMMAND=ldclt-bin ## search order this command wrapper uses to find shared libraries. ## ############################################################################### -LD_LIBRARY_PATH=${LIB_DIR} +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "@nss_libdir@" +libpath_add "@nspr_libdir@" +libpath_add "@ldapsdk_libdir@" +libpath_add "@sasl_libdir@" + export LD_LIBRARY_PATH SASL_PATH=@sasl_path@ export SASL_PATH diff --git a/wrappers/ldif.in b/wrappers/ldif.in index 8fcaf1717..540792021 100755 --- a/wrappers/ldif.in +++ b/wrappers/ldif.in @@ -4,7 +4,6 @@ ## (1) Specify variables used by this script. ## ############################################################################### -LIB_DIR=@nss_libdir@:@nspr_libdir@:@ldapsdk_libdir@:@sasl_libdir@ BIN_DIR=@bindir@ COMMAND=ldif-bin @@ -14,7 +13,16 @@ COMMAND=ldif-bin ## search order this command wrapper uses to find shared libraries. ## ############################################################################### -LD_LIBRARY_PATH=${LIB_DIR} +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "@nss_libdir@" +libpath_add "@nspr_libdir@" +libpath_add "@ldapsdk_libdir@" +libpath_add "@sasl_libdir@" + export LD_LIBRARY_PATH diff --git a/wrappers/migratecred.in b/wrappers/migratecred.in index dd44800fb..49de8d7af 100755 --- a/wrappers/migratecred.in +++ b/wrappers/migratecred.in @@ -4,7 +4,6 @@ ## (1) Specify variables used by this script. ## ############################################################################### -LIB_DIR=@serverdir@:@nss_libdir@:@nspr_libdir@:@ldapsdk_libdir@:@sasl_libdir@:@pcre_libdir@ BIN_DIR=@bindir@ COMMAND=migratecred-bin @@ -14,7 +13,18 @@ COMMAND=migratecred-bin ## search order this command wrapper uses to find shared libraries. ## ############################################################################### -LD_LIBRARY_PATH=${LIB_DIR} +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "@serverdir@" +libpath_add "@nss_libdir@" +libpath_add "@nspr_libdir@" +libpath_add "@ldapsdk_libdir@" +libpath_add "@sasl_libdir@" +libpath_add "@pcre_libdir@" + export LD_LIBRARY_PATH diff --git a/wrappers/mmldif.in b/wrappers/mmldif.in index a11932e34..aa09bae98 100755 --- a/wrappers/mmldif.in +++ b/wrappers/mmldif.in @@ -4,7 +4,6 @@ ## (1) Specify variables used by this script. ## ############################################################################### -LIB_DIR=@serverdir@:@nss_libdir@:@nspr_libdir@:@ldapsdk_libdir@:@sasl_libdir@:@pcre_libdir@ BIN_DIR=@bindir@ COMMAND=mmldif-bin @@ -14,7 +13,18 @@ COMMAND=mmldif-bin ## search order this command wrapper uses to find shared libraries. ## ############################################################################### -LD_LIBRARY_PATH=${LIB_DIR} +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "@serverdir@" +libpath_add "@nss_libdir@" +libpath_add "@nspr_libdir@" +libpath_add "@ldapsdk_libdir@" +libpath_add "@sasl_libdir@" +libpath_add "@pcre_libdir@" + export LD_LIBRARY_PATH diff --git a/wrappers/pwdhash.in b/wrappers/pwdhash.in index b3ef3fa9e..69618fa9d 100755 --- a/wrappers/pwdhash.in +++ b/wrappers/pwdhash.in @@ -4,7 +4,6 @@ ## (1) Specify variables used by this script. ## ############################################################################### -LIB_DIR=@serverdir@:@nss_libdir@:@nspr_libdir@:@ldapsdk_libdir@:@sasl_libdir@:@pcre_libdir@ BIN_DIR=@bindir@ COMMAND=pwdhash-bin @@ -14,7 +13,18 @@ COMMAND=pwdhash-bin ## search order this command wrapper uses to find shared libraries. ## ############################################################################### -LD_LIBRARY_PATH=${LIB_DIR} +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "@serverdir@" +libpath_add "@nss_libdir@" +libpath_add "@nspr_libdir@" +libpath_add "@ldapsdk_libdir@" +libpath_add "@sasl_libdir@" +libpath_add "@pcre_libdir@" + export LD_LIBRARY_PATH diff --git a/wrappers/repl-monitor.in b/wrappers/repl-monitor.in index 116dbb542..cba72a1e7 100755 --- a/wrappers/repl-monitor.in +++ b/wrappers/repl-monitor.in @@ -43,8 +43,15 @@ # wrapper for repl-monitor.pl # set the library paths and call repl-monitor.pl -LD_LIBRARY_PATH=@nss_libdir@:/usr/lib -SHLIB_PATH=@nss_libdir@:/usr/lib +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "@nss_libdir@" +libpath_add "/usr/lib" + +SHLIB_PATH=$LD_LIBRARY_PATH export LD_LIBRARY_PATH SHLIB_PATH @bindir@/repl-monitor.pl "$@" diff --git a/wrappers/rsearch.in b/wrappers/rsearch.in index cebecf1f9..a78abd7c8 100755 --- a/wrappers/rsearch.in +++ b/wrappers/rsearch.in @@ -4,7 +4,6 @@ ## (1) Specify variables used by this script. ## ############################################################################### -LIB_DIR=@nss_libdir@:@nspr_libdir@:@ldapsdk_libdir@:@sasl_libdir@ BIN_DIR=@bindir@ COMMAND=rsearch-bin @@ -14,7 +13,16 @@ COMMAND=rsearch-bin ## search order this command wrapper uses to find shared libraries. ## ############################################################################### -LD_LIBRARY_PATH=${LIB_DIR} +libpath_add() { + [ -z "$1" ] && return + LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$1 +} + +libpath_add "@nss_libdir@" +libpath_add "@nspr_libdir@" +libpath_add "@ldapsdk_libdir@" +libpath_add "@sasl_libdir@" + export LD_LIBRARY_PATH
0
be459a234db6aca79b08f438a6ec5afb47581b72
389ds/389-ds-base
Resolves: bug 457846 Bug Description: The Windows Sync API should have plug-in points Reviewed by: nkinder (Thanks!) Fix Description: Several plug-in points have been added to the windows sync code, available to regular plug-ins that register with the winsync api via the slapi api broker interface. winsync-plugin.h documents the use of these along with some example plug-in code. The windows private data structure has been extended to add two additional fields: raw_entry - the raw entry read from AD - this is passed to several plug-in callbacks to allow them to have access to all of the attributes and values in the entry in case further processing is needed. This required a change to the function that reads the entry, to have it save the raw entry read each time from AD, in addition to the "cooked" entry it passes back to the caller. api_cookie - this is the plug-in private data passed back to each plug-in callback and allows the plug-in to specify some additional context Both of these are stored in the private data field in the agreement, so some of the existing functions had to be changed to pass in the connection object or the protocol object in order to gain access to the agreement object. There were several small memory leaks in the existing code that have been fixed - these are the places where a free() function of some sort has been added. Also the usage of slapi_sdn_init_dn_byval leaked - slapi_sdn_new_dn_byval must be used here instead - cannot mix slapi_sdn_new with slapi_sdn_init* I also cleaned up several compiler warnings. The slapi changes are not strictly necessary, but they provide some conveniences to the winsync code and to plug-in writers. The good thing is that they were already private functions, so mostly just needed to have public api wrappers. Platforms tested: RHEL5 Flag Day: no Doc impact: no
commit be459a234db6aca79b08f438a6ec5afb47581b72 Author: Rich Megginson <[email protected]> Date: Tue Aug 5 20:26:22 2008 +0000 Resolves: bug 457846 Bug Description: The Windows Sync API should have plug-in points Reviewed by: nkinder (Thanks!) Fix Description: Several plug-in points have been added to the windows sync code, available to regular plug-ins that register with the winsync api via the slapi api broker interface. winsync-plugin.h documents the use of these along with some example plug-in code. The windows private data structure has been extended to add two additional fields: raw_entry - the raw entry read from AD - this is passed to several plug-in callbacks to allow them to have access to all of the attributes and values in the entry in case further processing is needed. This required a change to the function that reads the entry, to have it save the raw entry read each time from AD, in addition to the "cooked" entry it passes back to the caller. api_cookie - this is the plug-in private data passed back to each plug-in callback and allows the plug-in to specify some additional context Both of these are stored in the private data field in the agreement, so some of the existing functions had to be changed to pass in the connection object or the protocol object in order to gain access to the agreement object. There were several small memory leaks in the existing code that have been fixed - these are the places where a free() function of some sort has been added. Also the usage of slapi_sdn_init_dn_byval leaked - slapi_sdn_new_dn_byval must be used here instead - cannot mix slapi_sdn_new with slapi_sdn_init* I also cleaned up several compiler warnings. The slapi changes are not strictly necessary, but they provide some conveniences to the winsync code and to plug-in writers. The good thing is that they were already private functions, so mostly just needed to have public api wrappers. Platforms tested: RHEL5 Flag Day: no Doc impact: no diff --git a/ldap/servers/plugins/replication/windows_connection.c b/ldap/servers/plugins/replication/windows_connection.c index 2c54c64b2..32787c4ce 100644 --- a/ldap/servers/plugins/replication/windows_connection.c +++ b/ldap/servers/plugins/replication/windows_connection.c @@ -512,15 +512,17 @@ windows_perform_operation(Repl_Connection *conn, int optype, const char *dn, /* Copied from the chaining backend*/ static Slapi_Entry * -windows_LDAPMessage2Entry(LDAP * ld, LDAPMessage * msg, int attrsonly) { +windows_LDAPMessage2Entry(Repl_Connection *conn, LDAPMessage * msg, int attrsonly) { - Slapi_Entry *e = slapi_entry_alloc(); + Slapi_Entry *rawentry = NULL; + Slapi_Entry *e = NULL; char *a = NULL; BerElement * ber = NULL; + LDAP *ld = conn->ld; + + windows_private_set_raw_entry(conn->agmt, NULL); /* clear it first */ - if ( e == NULL ) return NULL; if (msg == NULL) { - slapi_entry_free(e); return NULL; } @@ -529,10 +531,21 @@ windows_LDAPMessage2Entry(LDAP * ld, LDAPMessage * msg, int attrsonly) { * attribute type and values ARE allocated */ + e = slapi_entry_alloc(); + if ( e == NULL ) return NULL; slapi_entry_set_dn( e, ldap_get_dn( ld, msg ) ); + rawentry = slapi_entry_alloc(); + if ( rawentry == NULL ) { + slapi_entry_free(e); + return NULL; + } + slapi_entry_set_dn( rawentry, slapi_ch_strdup(slapi_entry_get_dn(e)) ); for ( a = ldap_first_attribute( ld, msg, &ber ); a!=NULL; a=ldap_next_attribute( ld, msg, ber ) ) { + struct berval ** aVal = ldap_get_values_len( ld, msg, a); + slapi_entry_add_values(rawentry, a, aVal); + if (0 == strcasecmp(a,"dnsRecord") || 0 == strcasecmp(a,"dnsproperty") || 0 == strcasecmp(a,"dscorepropagationdata")) { @@ -548,10 +561,8 @@ windows_LDAPMessage2Entry(LDAP * ld, LDAPMessage * msg, int attrsonly) { if (attrsonly) { slapi_entry_add_value(e, a, (Slapi_Value *)NULL); - ldap_memfree(a); } else { - struct berval ** aVal = ldap_get_values_len( ld, msg, a); char *type_to_use = NULL; /* Work around the fact that we alias street and streetaddress, while Microsoft do not */ if (0 == strcasecmp(a,"streetaddress")) @@ -575,15 +586,18 @@ windows_LDAPMessage2Entry(LDAP * ld, LDAPMessage * msg, int attrsonly) { slapi_entry_add_values( e, type_to_use, aVal); } - ldap_memfree(a); - ldap_value_free_len(aVal); } } + ldap_memfree(a); + ldap_value_free_len(aVal); } if ( NULL != ber ) { ldap_ber_free( ber, 0 ); } + + windows_private_set_raw_entry(conn->agmt, rawentry); /* windows private now owns rawentry */ + return e; } @@ -599,11 +613,6 @@ ConnResult windows_search_entry_ext(Repl_Connection *conn, char* searchbase, char *filter, Slapi_Entry **entry, LDAPControl **serverctrls) { ConnResult return_value = 0; - int ldap_rc = 0; - LDAPMessage *res = NULL; - int nummessages = 0; - int numentries = 0; - int numreferences = 0; LDAPDebug( LDAP_DEBUG_TRACE, "=> windows_search_entry\n", 0, 0, 0 ); @@ -611,15 +620,41 @@ windows_search_entry_ext(Repl_Connection *conn, char* searchbase, char *filter, if (windows_conn_connected(conn)) { - ldap_rc = ldap_search_ext_s(conn->ld, searchbase, LDAP_SCOPE_SUBTREE, - filter, NULL, 0 /* attrsonly */, - serverctrls , NULL /* client controls */, + int ldap_rc = 0; + LDAPMessage *res = NULL; + char *searchbase_copy = slapi_ch_strdup(searchbase); + int scope = LDAP_SCOPE_SUBTREE; + char *filter_copy = slapi_ch_strdup(filter); + char **attrs = NULL; + LDAPControl **serverctrls_copy = NULL; + + slapi_add_controls(&serverctrls_copy, serverctrls, 1 /* make a copy we can free */); + + LDAPDebug( LDAP_DEBUG_REPL, "Calling windows entry search request plugin\n", 0, 0, 0 ); + + winsync_plugin_call_pre_ad_search_cb(conn->agmt, NULL, &searchbase_copy, &scope, &filter_copy, + &attrs, &serverctrls_copy); + + ldap_rc = ldap_search_ext_s(conn->ld, searchbase_copy, scope, + filter_copy, attrs, 0 /* attrsonly */, + serverctrls_copy , NULL /* client controls */, &conn->timeout, 0 /* sizelimit */, &res); + + slapi_ch_free_string(&searchbase_copy); + slapi_ch_free_string(&filter_copy); + slapi_ch_array_free(attrs); + attrs = NULL; + ldap_controls_free(serverctrls_copy); + serverctrls_copy = NULL; + if (LDAP_SUCCESS == ldap_rc) { LDAPMessage *message = ldap_first_entry(conn->ld, res); if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { + int nummessages = 0; + int numentries = 0; + int numreferences = 0; nummessages = ldap_count_messages(conn->ld, res); numentries = ldap_count_entries(conn->ld, res); numreferences = ldap_count_references(conn->ld, res); @@ -629,7 +664,7 @@ windows_search_entry_ext(Repl_Connection *conn, char* searchbase, char *filter, if (NULL != entry) { - *entry = windows_LDAPMessage2Entry(conn->ld,message,0); + *entry = windows_LDAPMessage2Entry(conn,message,0); } /* See if there are any more entries : if so then that's an error * but we still need to get them to avoid gumming up the connection @@ -664,42 +699,46 @@ windows_search_entry_ext(Repl_Connection *conn, char* searchbase, char *filter, ConnResult send_dirsync_search(Repl_Connection *conn) { - int rc; ConnResult return_value; - LDAPControl *server_controls[2]; - int msgid; - - const char *op_string = NULL; - - const char* old_dn = NULL; - char* dn = NULL; LDAPDebug( LDAP_DEBUG_TRACE, "=> send_dirsync_search\n", 0, 0, 0 ); - /* need to strip the dn down to dc= */ - old_dn = slapi_sdn_get_ndn( windows_private_get_windows_subtree(conn->agmt) ); - dn = strstr(old_dn, "dc="); - if (windows_conn_connected(conn)) { + const char *op_string = NULL; + int rc; + int scope = LDAP_SCOPE_SUBTREE; + char *filter = slapi_ch_strdup("(objectclass=*)"); + char **attrs = NULL; + LDAPControl **server_controls = NULL; + int msgid; + /* need to strip the dn down to dc= */ + const char *old_dn = slapi_sdn_get_ndn( windows_private_get_windows_subtree(conn->agmt) ); + char *dn = slapi_ch_strdup(strstr(old_dn, "dc=")); + if (conn->supports_dirsync == 0) { - server_controls[0] = NULL; /* unsupported */ + /* unsupported */ } else { - server_controls[0] = windows_private_dirsync_control(conn->agmt); + slapi_add_control_ext(&server_controls, + windows_private_dirsync_control(conn->agmt), + 0 /* no copy - passin */); } - server_controls[1] = NULL; conn->last_operation = CONN_SEARCH; conn->status = STATUS_SEARCHING; op_string = "search"; + LDAPDebug( LDAP_DEBUG_REPL, "Calling dirsync search request plugin\n", 0, 0, 0 ); + + winsync_plugin_call_dirsync_search_params_cb(conn->agmt, old_dn, &dn, &scope, &filter, + &attrs, &server_controls); + LDAPDebug( LDAP_DEBUG_REPL, "Sending dirsync search request\n", 0, 0, 0 ); - rc = ldap_search_ext( conn->ld, dn, LDAP_SCOPE_SUBTREE, "(objectclass=*)", /* filter */ - NULL /*attrs */, PR_FALSE, server_controls, NULL, /* ClientControls */ - 0,0, &msgid); + rc = ldap_search_ext( conn->ld, dn, scope, filter, attrs, PR_FALSE, server_controls, + NULL /* ClientControls */, 0,0, &msgid); if (LDAP_SUCCESS == rc) { @@ -723,11 +762,13 @@ send_dirsync_search(Repl_Connection *conn) return_value = CONN_OPERATION_FAILED; } } - if (server_controls[0]) - { - ldap_control_free(server_controls[0]); - } - + /* cleanup */ + slapi_ch_free_string(&dn); + slapi_ch_free_string(&filter); + slapi_ch_array_free(attrs); + attrs = NULL; + ldap_controls_free(server_controls); + server_controls = NULL; } else { @@ -852,7 +893,7 @@ Slapi_Entry * windows_conn_get_search_result(Repl_Connection *conn) { slapi_log_error(SLAPI_LOG_REPL, windows_repl_plugin_name,"received entry from dirsync: %s\n", dn); lm = ldap_first_entry( conn->ld, res ); - e = windows_LDAPMessage2Entry(conn->ld,lm,0); + e = windows_LDAPMessage2Entry(conn,lm,0); ldap_memfree(dn); } } @@ -1424,6 +1465,13 @@ windows_conn_replica_supports_dirsync(Repl_Connection *conn) LDAPDebug( LDAP_DEBUG_TRACE, "=> windows_conn_replica_supports_dirsync\n", 0, 0, 0 ); +#ifdef WINSYNC_TEST + /* used to fake out dirsync to think it's talking to a real ad when in fact + it's just talking to another directory server */ + conn->supports_dirsync = 1; + return CONN_SUPPORTS_DIRSYNC; +#endif + if (windows_conn_connected(conn)) { if (conn->supports_dirsync == -1) { @@ -1882,7 +1930,7 @@ repl5_stop_debug_timeout(Slapi_Eq_Context eqctx, int *setlevel) LDAPDebug( LDAP_DEBUG_TRACE, "=> repl5_stop_debug_timeout\n", 0, 0, 0 ); if (eqctx && !*setlevel) { - int found = slapi_eq_cancel(eqctx); + (void)slapi_eq_cancel(eqctx); } if (s_debug_timeout && s_debug_level && *setlevel) { diff --git a/ldap/servers/plugins/replication/windows_private.c b/ldap/servers/plugins/replication/windows_private.c index 5baa7ce2a..d6176b537 100644 --- a/ldap/servers/plugins/replication/windows_private.c +++ b/ldap/servers/plugins/replication/windows_private.c @@ -70,8 +70,12 @@ struct windowsprivate { * so we only have to allocate each filter once instead of doing it every time we receive a change. */ Slapi_Filter *directory_filter; /* Used for checking if local entries need to be sync'd to AD */ Slapi_Filter *deleted_filter; /* Used for checking if an entry is an AD tombstone */ + Slapi_Entry *raw_entry; /* "raw" un-schema processed last entry read from AD */ + void *api_cookie; /* private data used by api callbacks */ }; +static void windows_private_set_windows_domain(const Repl_Agmt *ra, char *domain); + static int true_value_from_string(char *val) { @@ -99,7 +103,6 @@ windows_parse_config_entry(Repl_Agmt *ra, const char *type, Slapi_Entry *e) windows_private_set_windows_subtree(ra, slapi_sdn_new_dn_passin(tmpstr) ); } retval = 1; - slapi_ch_free((void**)&tmpstr); } if (type == NULL || slapi_attr_types_equivalent(type,type_nsds7DirectoryReplicaArea)) { @@ -109,7 +112,6 @@ windows_parse_config_entry(Repl_Agmt *ra, const char *type, Slapi_Entry *e) windows_private_set_directory_subtree(ra, slapi_sdn_new_dn_passin(tmpstr) ); } retval = 1; - slapi_ch_free((void**)&tmpstr); } if (type == NULL || slapi_attr_types_equivalent(type,type_nsds7CreateNewUsers)) { @@ -173,6 +175,8 @@ windows_init_agreement_from_entry(Repl_Agmt *ra, Slapi_Entry *e) agmt_set_priv(ra,windows_private_new()); windows_parse_config_entry(ra,NULL,e); + + windows_plugin_init(ra); } const char* windows_private_get_purl(const Repl_Agmt *ra) @@ -214,6 +218,9 @@ void windows_agreement_delete(Repl_Agmt *ra) slapi_filter_free(dp->directory_filter, 1); slapi_filter_free(dp->deleted_filter, 1); + slapi_entry_free(dp->raw_entry); + dp->raw_entry = NULL; + dp->api_cookie = NULL; slapi_ch_free((void **)dp); LDAPDebug( LDAP_DEBUG_TRACE, "<= windows_private_delete\n", 0, 0, 0 ); @@ -401,7 +408,7 @@ const Slapi_DN* windows_private_get_directory_subtree (const Repl_Agmt *ra) } /* Takes a copy of the sdn passed in */ -void windows_private_set_windows_subtree (const Repl_Agmt *ra,const Slapi_DN* sdn ) +void windows_private_set_windows_subtree (const Repl_Agmt *ra,Slapi_DN* sdn ) { Dirsync_Private *dp; @@ -413,14 +420,15 @@ void windows_private_set_windows_subtree (const Repl_Agmt *ra,const Slapi_DN* sd dp = (Dirsync_Private *) agmt_get_priv(ra); PR_ASSERT (dp); - - dp->windows_subtree = slapi_sdn_dup(sdn); + + slapi_sdn_free(&dp->windows_subtree); + dp->windows_subtree = sdn; LDAPDebug( LDAP_DEBUG_TRACE, "<= windows_private_set_windows_replarea\n", 0, 0, 0 ); } /* Takes a copy of the sdn passed in */ -void windows_private_set_directory_subtree (const Repl_Agmt *ra,const Slapi_DN* sdn ) +void windows_private_set_directory_subtree (const Repl_Agmt *ra,Slapi_DN* sdn ) { Dirsync_Private *dp; @@ -433,7 +441,8 @@ void windows_private_set_directory_subtree (const Repl_Agmt *ra,const Slapi_DN* dp = (Dirsync_Private *) agmt_get_priv(ra); PR_ASSERT (dp); - dp->directory_subtree = slapi_sdn_dup(sdn); + slapi_sdn_free(&dp->directory_subtree); + dp->directory_subtree = sdn; LDAPDebug( LDAP_DEBUG_TRACE, "<= windows_private_set_directory_replarea\n", 0, 0, 0 ); } @@ -516,6 +525,7 @@ LDAPControl* windows_private_dirsync_control(const Repl_Agmt *ra) LDAPControl *control = NULL; BerElement *ber; Dirsync_Private *dp; + char iscritical = PR_TRUE; LDAPDebug( LDAP_DEBUG_TRACE, "=> windows_private_dirsync_control\n", 0, 0, 0 ); @@ -527,7 +537,10 @@ LDAPControl* windows_private_dirsync_control(const Repl_Agmt *ra) ber_printf( ber, "{iio}", dp->dirsync_flags, dp->dirsync_maxattributecount, dp->dirsync_cookie, dp->dirsync_cookie_len ); - slapi_build_control( REPL_DIRSYNC_CONTROL_OID, ber, PR_TRUE, &control); +#ifdef WINSYNC_TEST + iscritical = PR_FALSE; +#endif + slapi_build_control( REPL_DIRSYNC_CONTROL_OID, ber, iscritical, &control); ber_free(ber,1); @@ -787,3 +800,389 @@ int windows_private_load_dirsync_cookie(const Repl_Agmt *ra) return rc; } +/* get returns a pointer to the structure - do not free */ +Slapi_Entry *windows_private_get_raw_entry(const Repl_Agmt *ra) +{ + Dirsync_Private *dp; + + LDAPDebug( LDAP_DEBUG_TRACE, "=> windows_private_get_raw_entry\n", 0, 0, 0 ); + + dp = (Dirsync_Private *) agmt_get_priv(ra); + PR_ASSERT (dp); + + LDAPDebug( LDAP_DEBUG_TRACE, "<= windows_private_get_raw_entry\n", 0, 0, 0 ); + + return dp->raw_entry; +} + +/* this is passin - windows_private owns the pointer, not a copy */ +void windows_private_set_raw_entry(const Repl_Agmt *ra, Slapi_Entry *e) +{ + Dirsync_Private *dp; + + LDAPDebug( LDAP_DEBUG_TRACE, "=> windows_private_set_raw_entry\n", 0, 0, 0 ); + + dp = (Dirsync_Private *) agmt_get_priv(ra); + PR_ASSERT (dp); + + slapi_entry_free(dp->raw_entry); + dp->raw_entry = e; + + LDAPDebug( LDAP_DEBUG_TRACE, "<= windows_private_set_raw_entry\n", 0, 0, 0 ); +} + +void *windows_private_get_api_cookie(const Repl_Agmt *ra) +{ + Dirsync_Private *dp; + + LDAPDebug( LDAP_DEBUG_TRACE, "=> windows_private_get_api_cookie\n", 0, 0, 0 ); + + dp = (Dirsync_Private *) agmt_get_priv(ra); + PR_ASSERT (dp); + + LDAPDebug( LDAP_DEBUG_TRACE, "<= windows_private_get_api_cookie\n", 0, 0, 0 ); + + return dp->api_cookie; +} + +void windows_private_set_api_cookie(Repl_Agmt *ra, void *api_cookie) +{ + Dirsync_Private *dp; + + LDAPDebug( LDAP_DEBUG_TRACE, "=> windows_private_set_api_cookie\n", 0, 0, 0 ); + + dp = (Dirsync_Private *) agmt_get_priv(ra); + PR_ASSERT (dp); + dp->api_cookie = api_cookie; + + LDAPDebug( LDAP_DEBUG_TRACE, "<= windows_private_set_api_cookie\n", 0, 0, 0 ); +} + +/* an array of function pointers */ +static void **_WinSyncAPI = NULL; + +void +windows_plugin_init(Repl_Agmt *ra) +{ + void *cookie = NULL; + winsync_plugin_init_cb initfunc = NULL; + + LDAPDebug( LDAP_DEBUG_PLUGIN, "--> windows_plugin_init_start -- begin\n",0,0,0); + + /* if the function pointer array is null, get the functions - we will + call init once per replication agreement, but will only grab the + api once */ + if((NULL == _WinSyncAPI) && + (slapi_apib_get_interface(WINSYNC_v1_0_GUID, &_WinSyncAPI) || + (NULL == _WinSyncAPI))) + { + LDAPDebug( LDAP_DEBUG_PLUGIN, + "<-- windows_plugin_init_start -- no windows plugin API registered for GUID [%s] -- end\n", + WINSYNC_v1_0_GUID,0,0); + return; + } + + initfunc = (winsync_plugin_init_cb)_WinSyncAPI[WINSYNC_PLUGIN_INIT_CB]; + if (initfunc) { + cookie = (*initfunc)(windows_private_get_directory_subtree(ra), + windows_private_get_windows_subtree(ra)); + } + windows_private_set_api_cookie(ra, cookie); + + LDAPDebug( LDAP_DEBUG_PLUGIN, "<-- windows_plugin_init_start -- end\n",0,0,0); + return; +} + +void +winsync_plugin_call_dirsync_search_params_cb(const Repl_Agmt *ra, const char *agmt_dn, + char **base, int *scope, char **filter, + char ***attrs, LDAPControl ***serverctrls) +{ + winsync_search_params_cb thefunc = + (_WinSyncAPI && _WinSyncAPI[WINSYNC_PLUGIN_DIRSYNC_SEARCH_CB]) ? + (winsync_search_params_cb)_WinSyncAPI[WINSYNC_PLUGIN_DIRSYNC_SEARCH_CB] : + NULL; + + if (!thefunc) { + return; + } + + (*thefunc)(windows_private_get_api_cookie(ra), agmt_dn, base, scope, filter, + attrs, serverctrls); + + return; +} + +void +winsync_plugin_call_pre_ad_search_cb(const Repl_Agmt *ra, const char *agmt_dn, + char **base, int *scope, char **filter, + char ***attrs, LDAPControl ***serverctrls) +{ + winsync_search_params_cb thefunc = + (_WinSyncAPI && _WinSyncAPI[WINSYNC_PLUGIN_PRE_AD_SEARCH_CB]) ? + (winsync_search_params_cb)_WinSyncAPI[WINSYNC_PLUGIN_PRE_AD_SEARCH_CB] : + NULL; + + if (!thefunc) { + return; + } + + (*thefunc)(windows_private_get_api_cookie(ra), agmt_dn, base, scope, filter, + attrs, serverctrls); + + return; +} + +void +winsync_plugin_call_pre_ds_search_entry_cb(const Repl_Agmt *ra, const char *agmt_dn, + char **base, int *scope, char **filter, + char ***attrs, LDAPControl ***serverctrls) +{ + winsync_search_params_cb thefunc = + (_WinSyncAPI && _WinSyncAPI[WINSYNC_PLUGIN_PRE_DS_SEARCH_ENTRY_CB]) ? + (winsync_search_params_cb)_WinSyncAPI[WINSYNC_PLUGIN_PRE_DS_SEARCH_ENTRY_CB] : + NULL; + + if (!thefunc) { + return; + } + + (*thefunc)(windows_private_get_api_cookie(ra), agmt_dn, base, scope, filter, + attrs, serverctrls); + + return; +} + +void +winsync_plugin_call_pre_ds_search_all_cb(const Repl_Agmt *ra, const char *agmt_dn, + char **base, int *scope, char **filter, + char ***attrs, LDAPControl ***serverctrls) +{ + winsync_search_params_cb thefunc = + (_WinSyncAPI && _WinSyncAPI[WINSYNC_PLUGIN_PRE_DS_SEARCH_ALL_CB]) ? + (winsync_search_params_cb)_WinSyncAPI[WINSYNC_PLUGIN_PRE_DS_SEARCH_ALL_CB] : + NULL; + + if (!thefunc) { + return; + } + + (*thefunc)(windows_private_get_api_cookie(ra), agmt_dn, base, scope, filter, + attrs, serverctrls); + + return; +} + +void +winsync_plugin_call_pre_ad_mod_user_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, + Slapi_Entry *ad_entry, Slapi_Entry *ds_entry, + Slapi_Mods *smods, int *do_modify) +{ + winsync_pre_mod_cb thefunc = + (_WinSyncAPI && _WinSyncAPI[WINSYNC_PLUGIN_PRE_AD_MOD_USER_CB]) ? + (winsync_pre_mod_cb)_WinSyncAPI[WINSYNC_PLUGIN_PRE_AD_MOD_USER_CB] : + NULL; + + if (!thefunc) { + return; + } + + (*thefunc)(windows_private_get_api_cookie(ra), rawentry, ad_entry, + ds_entry, smods, do_modify); + + return; +} + +void +winsync_plugin_call_pre_ad_mod_group_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, + Slapi_Entry *ad_entry, Slapi_Entry *ds_entry, + Slapi_Mods *smods, int *do_modify) +{ + winsync_pre_mod_cb thefunc = + (_WinSyncAPI && _WinSyncAPI[WINSYNC_PLUGIN_PRE_AD_MOD_GROUP_CB]) ? + (winsync_pre_mod_cb)_WinSyncAPI[WINSYNC_PLUGIN_PRE_AD_MOD_GROUP_CB] : + NULL; + + if (!thefunc) { + return; + } + + (*thefunc)(windows_private_get_api_cookie(ra), rawentry, ad_entry, + ds_entry, smods, do_modify); + + return; +} + +void +winsync_plugin_call_pre_ds_mod_user_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, + Slapi_Entry *ad_entry, Slapi_Entry *ds_entry, + Slapi_Mods *smods, int *do_modify) +{ + winsync_pre_mod_cb thefunc = + (_WinSyncAPI && _WinSyncAPI[WINSYNC_PLUGIN_PRE_DS_MOD_USER_CB]) ? + (winsync_pre_mod_cb)_WinSyncAPI[WINSYNC_PLUGIN_PRE_DS_MOD_USER_CB] : + NULL; + + if (!thefunc) { + return; + } + + (*thefunc)(windows_private_get_api_cookie(ra), rawentry, ad_entry, + ds_entry, smods, do_modify); + + return; +} + +void +winsync_plugin_call_pre_ds_mod_group_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, + Slapi_Entry *ad_entry, Slapi_Entry *ds_entry, + Slapi_Mods *smods, int *do_modify) +{ + winsync_pre_mod_cb thefunc = + (_WinSyncAPI && _WinSyncAPI[WINSYNC_PLUGIN_PRE_DS_MOD_GROUP_CB]) ? + (winsync_pre_mod_cb)_WinSyncAPI[WINSYNC_PLUGIN_PRE_DS_MOD_GROUP_CB] : + NULL; + + if (!thefunc) { + return; + } + + (*thefunc)(windows_private_get_api_cookie(ra), rawentry, ad_entry, + ds_entry, smods, do_modify); + + return; +} + +void +winsync_plugin_call_pre_ds_add_user_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, + Slapi_Entry *ad_entry, Slapi_Entry *ds_entry) +{ + winsync_pre_add_cb thefunc = + (_WinSyncAPI && _WinSyncAPI[WINSYNC_PLUGIN_PRE_DS_ADD_USER_CB]) ? + (winsync_pre_add_cb)_WinSyncAPI[WINSYNC_PLUGIN_PRE_DS_ADD_USER_CB] : + NULL; + + if (!thefunc) { + return; + } + + (*thefunc)(windows_private_get_api_cookie(ra), rawentry, ad_entry, + ds_entry); + + return; +} + +void +winsync_plugin_call_pre_ds_add_group_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, + Slapi_Entry *ad_entry, Slapi_Entry *ds_entry) +{ + winsync_pre_add_cb thefunc = + (_WinSyncAPI && _WinSyncAPI[WINSYNC_PLUGIN_PRE_DS_ADD_GROUP_CB]) ? + (winsync_pre_add_cb)_WinSyncAPI[WINSYNC_PLUGIN_PRE_DS_ADD_GROUP_CB] : + NULL; + + if (!thefunc) { + return; + } + + (*thefunc)(windows_private_get_api_cookie(ra), rawentry, ad_entry, + ds_entry); + + return; +} + +void +winsync_plugin_call_get_new_ds_user_dn_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, + Slapi_Entry *ad_entry, char **new_dn_string, + const Slapi_DN *ds_suffix, const Slapi_DN *ad_suffix) +{ + winsync_get_new_dn_cb thefunc = + (_WinSyncAPI && _WinSyncAPI[WINSYNC_PLUGIN_GET_NEW_DS_USER_DN_CB]) ? + (winsync_get_new_dn_cb)_WinSyncAPI[WINSYNC_PLUGIN_GET_NEW_DS_USER_DN_CB] : + NULL; + + if (!thefunc) { + return; + } + + (*thefunc)(windows_private_get_api_cookie(ra), rawentry, ad_entry, + new_dn_string, ds_suffix, ad_suffix); + + return; +} + +void +winsync_plugin_call_get_new_ds_group_dn_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, + Slapi_Entry *ad_entry, char **new_dn_string, + const Slapi_DN *ds_suffix, const Slapi_DN *ad_suffix) +{ + winsync_get_new_dn_cb thefunc = + (_WinSyncAPI && _WinSyncAPI[WINSYNC_PLUGIN_GET_NEW_DS_GROUP_DN_CB]) ? + (winsync_get_new_dn_cb)_WinSyncAPI[WINSYNC_PLUGIN_GET_NEW_DS_GROUP_DN_CB] : + NULL; + + if (!thefunc) { + return; + } + + (*thefunc)(windows_private_get_api_cookie(ra), rawentry, ad_entry, + new_dn_string, ds_suffix, ad_suffix); + + return; +} + +void +winsync_plugin_call_pre_ad_mod_user_mods_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, + const Slapi_DN *local_dn, LDAPMod * const *origmods, + Slapi_DN *remote_dn, LDAPMod ***modstosend) +{ + winsync_pre_ad_mod_mods_cb thefunc = + (_WinSyncAPI && _WinSyncAPI[WINSYNC_PLUGIN_PRE_AD_MOD_USER_MODS_CB]) ? + (winsync_pre_ad_mod_mods_cb)_WinSyncAPI[WINSYNC_PLUGIN_PRE_AD_MOD_USER_MODS_CB] : + NULL; + + if (!thefunc) { + return; + } + + (*thefunc)(windows_private_get_api_cookie(ra), rawentry, local_dn, + origmods, remote_dn, modstosend); + + return; +} + +void +winsync_plugin_call_pre_ad_mod_group_mods_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, + const Slapi_DN *local_dn, LDAPMod * const *origmods, + Slapi_DN *remote_dn, LDAPMod ***modstosend) +{ + winsync_pre_ad_mod_mods_cb thefunc = + (_WinSyncAPI && _WinSyncAPI[WINSYNC_PLUGIN_PRE_AD_MOD_GROUP_MODS_CB]) ? + (winsync_pre_ad_mod_mods_cb)_WinSyncAPI[WINSYNC_PLUGIN_PRE_AD_MOD_GROUP_MODS_CB] : + NULL; + + if (!thefunc) { + return; + } + + (*thefunc)(windows_private_get_api_cookie(ra), rawentry, local_dn, + origmods, remote_dn, modstosend); + + return; +} + +int +winsync_plugin_call_can_add_entry_to_ad_cb(const Repl_Agmt *ra, const Slapi_Entry *local_entry, + const Slapi_DN *remote_dn) +{ + winsync_can_add_to_ad_cb thefunc = + (_WinSyncAPI && _WinSyncAPI[WINSYNC_PLUGIN_CAN_ADD_ENTRY_TO_AD_CB]) ? + (winsync_can_add_to_ad_cb)_WinSyncAPI[WINSYNC_PLUGIN_CAN_ADD_ENTRY_TO_AD_CB] : + NULL; + + if (!thefunc) { + return 1; /* default is entry can be added to AD */ + } + + return (*thefunc)(windows_private_get_api_cookie(ra), local_entry, remote_dn); +} diff --git a/ldap/servers/plugins/replication/windows_protocol_util.c b/ldap/servers/plugins/replication/windows_protocol_util.c index 8088d6229..b893aac27 100644 --- a/ldap/servers/plugins/replication/windows_protocol_util.c +++ b/ldap/servers/plugins/replication/windows_protocol_util.c @@ -668,6 +668,7 @@ windows_acquire_replica(Private_Repl_Protocol *prp, RUV **ruv, int check_ruv) return_value = ACQUIRE_FATAL_ERROR; } slapi_sdn_free(&replarea_sdn); + csn_free(&current_csn); } } @@ -1004,7 +1005,6 @@ process_replay_add(Private_Repl_Protocol *prp, slapi_operation_parameters *op, S } if (cn_string) { - char *rdnstr = NULL; char *container_str = NULL; const char *suffix = slapi_sdn_get_dn(windows_private_get_windows_subtree(prp->agmt)); @@ -1184,7 +1184,19 @@ windows_replay_update(Private_Repl_Protocol *prp, slapi_operation_parameters *op agmt_get_long_name(prp->agmt), op2string(op->operation_type), op->target_address.dn, slapi_sdn_get_dn(remote_dn)); switch (op->operation_type) { - /* For an ADD operation, we map the entry and then send the operation, which may fail if the peer entry already existed */ + /* + we should check the modify case first and check the list of mods - + if the magic objectclass (ntuser) and attributes (ntUserCreateNewAccount + or ntGroupCreateNewAccount) then we should fall through to the ADD case + since the user wants to add the user to AD - could maybe just change + process_replay_add slightly, to add the mods list from the modify + operation - process_replay_add already turns the entry into a mods list + to pass to the ldap add operation, so it should not be too much more + trouble to apply the additional mods from the modify operation - we'll + have to pass in local entry, or perhaps just change the operation from + modify to an add, and set the op->p.p_add.target_entry to the local_entry + which gets retrieved above + */ case SLAPI_OPERATION_ADD: return_value = process_replay_add(prp,op,local_entry,local_dn,remote_dn,is_user,missing_entry,&password); break; @@ -1193,6 +1205,22 @@ windows_replay_update(Private_Repl_Protocol *prp, slapi_operation_parameters *op LDAPMod **mapped_mods = NULL; windows_map_mods_for_replay(prp,op->p.p_modify.modify_mods, &mapped_mods, is_user, &password); + if (is_user) { + winsync_plugin_call_pre_ad_mod_user_mods_cb(prp->agmt, + windows_private_get_raw_entry(prp->agmt), + local_dn, + op->p.p_modify.modify_mods, + remote_dn, + &mapped_mods); + } else if (is_group) { + winsync_plugin_call_pre_ad_mod_group_mods_cb(prp->agmt, + windows_private_get_raw_entry(prp->agmt), + local_dn, + op->p.p_modify.modify_mods, + remote_dn, + &mapped_mods); + } + /* It's possible that the mapping process results in an empty mod list, in which case we don't bother with the replay */ if ( mapped_mods == NULL || *(mapped_mods)== NULL ) { @@ -1304,7 +1332,7 @@ is_straight_mapped_attr(const char *type, int is_user /* or group */, int is_nt4 char *this_attr = NULL; char **list = is_user ? (is_nt4 ? nt4_user_matching_attributes : windows_user_matching_attributes) : (is_nt4 ? nt4_group_matching_attributes : windows_group_matching_attributes); /* Look for the type in the list of straight mapped attrs for the appropriate object type */ - while (this_attr = list[offset]) + while ((this_attr = list[offset])) { if (0 == slapi_attr_type_cmp(this_attr, type, SLAPI_TYPE_CMP_SUBTYPE)) { @@ -1327,7 +1355,7 @@ windows_map_attr_name(const char *original_type , int to_windows, int is_user, i *mapped_type = NULL; /* Iterate over the map entries looking for the type we have */ - while(this_map = &(our_map[offset])) + while((this_map = &(our_map[offset]))) { char *their_name = to_windows ? this_map->windows_attribute_name : this_map->ldap_attribute_name; char *our_name = to_windows ? this_map->ldap_attribute_name : this_map->windows_attribute_name; @@ -1500,7 +1528,6 @@ windows_create_remote_entry(Private_Repl_Protocol *prp,Slapi_Entry *original_ent if (0 == slapi_attr_type_cmp(new_type, "streetAddress", SLAPI_TYPE_CMP_SUBTYPE)) { if (slapi_valueset_count(vs) > 1) { int i = 0; - const char *street_value = NULL; Slapi_Value *value = NULL; Slapi_Value *new_value = NULL; @@ -2026,6 +2053,10 @@ find_entry_by_attr_value(const char *attribute, const char *value, Slapi_Entry * int rval = 0; const char *subtree_dn = NULL; int not_unique = 0; + char *subtree_dn_copy = NULL; + int scope = LDAP_SCOPE_SUBTREE; + char **attrs = NULL; + LDAPControl **server_controls = NULL; if (pb == NULL) goto done; @@ -2036,12 +2067,21 @@ find_entry_by_attr_value(const char *attribute, const char *value, Slapi_Entry * goto done; subtree_dn = slapi_sdn_get_dn(windows_private_get_directory_subtree(ra)); + subtree_dn_copy = slapi_ch_strdup(subtree_dn); - slapi_search_internal_set_pb(pb, subtree_dn, - LDAP_SCOPE_SUBTREE, query, NULL, 0, NULL, NULL, + winsync_plugin_call_pre_ds_search_entry_cb(ra, NULL, &subtree_dn_copy, &scope, &query, + &attrs, &server_controls); + + slapi_search_internal_set_pb(pb, subtree_dn_copy, + scope, query, attrs, 0, server_controls, NULL, (void *)plugin_get_default_component_id(), 0); slapi_search_internal_pb(pb); + slapi_ch_free_string(&subtree_dn_copy); slapi_ch_free_string(&query); + slapi_ch_array_free(attrs); + attrs = NULL; + ldap_controls_free(server_controls); + server_controls = NULL; slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &rval); if (rval != LDAP_SUCCESS) @@ -2096,7 +2136,7 @@ dedash_guid(char *str) char *p = str; char c = '\0'; - while (c = *p) + while ((c = *p)) { if ('-' == c) { @@ -2254,7 +2294,7 @@ map_windows_tombstone_dn(Slapi_Entry *e, Slapi_DN **dn, Private_Repl_Protocol *p /* The tombstone suffix discards any containers, so we need * to trim the DN to only dc components. */ - if (suffix = slapi_sdn_get_dn(windows_private_get_windows_subtree(prp->agmt))) { + if ((suffix = slapi_sdn_get_dn(windows_private_get_windows_subtree(prp->agmt)))) { /* If this isn't found, it is treated as an error below. */ suffix = (const char *) PL_strcasestr(suffix,"dc="); } @@ -2358,7 +2398,6 @@ static Slapi_DN *make_dn_from_guid(char *guid, int is_nt4, const char* suffix) char *dn_string = NULL; if (guid) { - new_dn = slapi_sdn_new(); if (is_nt4) { dn_string = PR_smprintf("GUID=%s,%s",guid,suffix); @@ -2366,7 +2405,7 @@ static Slapi_DN *make_dn_from_guid(char *guid, int is_nt4, const char* suffix) { dn_string = PR_smprintf("<GUID=%s>",guid); } - slapi_sdn_init_dn_byval(new_dn,dn_string); + new_dn = slapi_sdn_new_dn_byval(dn_string); PR_smprintf_free(dn_string); } /* dn string is now inside the Slapi_DN, and will be freed by its owner */ @@ -2452,6 +2491,7 @@ map_entry_dn_outbound(Slapi_Entry *e, Slapi_DN **dn, Private_Repl_Protocol *prp, * without removing the ntUniqueID attribute. We should verify that the entry really * exists in AD. */ rc = windows_get_remote_entry(prp, new_dn, &remote_entry); + slapi_sdn_free(&new_dn); if (0 == rc && remote_entry) { slapi_entry_free(remote_entry); } else { @@ -2471,7 +2511,6 @@ map_entry_dn_outbound(Slapi_Entry *e, Slapi_DN **dn, Private_Repl_Protocol *prp, } if (cn_string) { - char *rdnstr = NULL; char *container_str = NULL; container_str = extract_container(slapi_entry_get_sdn_const(e), @@ -2719,9 +2758,23 @@ map_entry_dn_inbound(Slapi_Entry *e, Slapi_DN **dn, const Repl_Agmt *ra) if (is_user) { new_dn_string = PR_smprintf("uid=%s,%s%s",username,container_str,suffix); + winsync_plugin_call_get_new_ds_user_dn_cb(ra, + windows_private_get_raw_entry(ra), + e, + &new_dn_string, + windows_private_get_directory_subtree(ra), + windows_private_get_windows_subtree(ra)); } else { new_dn_string = PR_smprintf("cn=%s,%s%s",username,container_str,suffix); + if (is_group) { + winsync_plugin_call_get_new_ds_group_dn_cb(ra, + windows_private_get_raw_entry(ra), + e, + &new_dn_string, + windows_private_get_directory_subtree(ra), + windows_private_get_windows_subtree(ra)); + } } new_dn = slapi_sdn_new_dn_byval(new_dn_string); PR_smprintf_free(new_dn_string); @@ -2939,6 +2992,18 @@ windows_create_local_entry(Private_Repl_Protocol *prp,Slapi_Entry *remote_entry, { slapi_entry_add_string(local_entry,"sn",username); } + + if (is_user) { + winsync_plugin_call_pre_ds_add_user_cb(prp->agmt, + windows_private_get_raw_entry(prp->agmt), + remote_entry, + local_entry); + } else if (is_group) { + winsync_plugin_call_pre_ds_add_group_cb(prp->agmt, + windows_private_get_raw_entry(prp->agmt), + remote_entry, + local_entry); + } /* Store it */ windows_dump_entry("Adding new local entry",local_entry); pb = slapi_pblock_new(); @@ -2951,6 +3016,7 @@ windows_create_local_entry(Private_Repl_Protocol *prp,Slapi_Entry *remote_entry, "add operation of entry %s returned: %d\n", slapi_sdn_get_dn(local_sdn), retval); } error: + slapi_ch_free_string(&guid_str); if (pb) { slapi_pblock_destroy(pb); @@ -3115,7 +3181,6 @@ windows_generate_update_mods(Private_Repl_Protocol *prp,Slapi_Entry *remote_entr * sure we don't try to send more than one value. */ if (slapi_valueset_count(vs) > 1) { int i = 0; - const char *street_value = NULL; Slapi_Value *value = NULL; Slapi_Value *new_value = NULL; @@ -3229,7 +3294,6 @@ windows_generate_update_mods(Private_Repl_Protocol *prp,Slapi_Entry *remote_entr * sure we don't try to send more than one value. */ if (slapi_valueset_count(vs) > 1) { int i = 0; - const char *street_value = NULL; Slapi_Value *value = NULL; Slapi_Value *new_value = NULL; @@ -3318,6 +3382,40 @@ windows_generate_update_mods(Private_Repl_Protocol *prp,Slapi_Entry *remote_entr slapi_ch_free_string(&local_type); } + if (to_windows) { + if (is_user) { + winsync_plugin_call_pre_ad_mod_user_cb(prp->agmt, + windows_private_get_raw_entry(prp->agmt), + local_entry, /* the cooked ad entry */ + remote_entry, /* the ds entry */ + smods, + do_modify); + } else if (is_group) { + winsync_plugin_call_pre_ad_mod_group_cb(prp->agmt, + windows_private_get_raw_entry(prp->agmt), + local_entry, /* the cooked ad entry */ + remote_entry, /* the ds entry */ + smods, + do_modify); + } + } else { + if (is_user) { + winsync_plugin_call_pre_ds_mod_user_cb(prp->agmt, + windows_private_get_raw_entry(prp->agmt), + remote_entry, /* the cooked ad entry */ + local_entry, /* the ds entry */ + smods, + do_modify); + } else if (is_group) { + winsync_plugin_call_pre_ds_mod_group_cb(prp->agmt, + windows_private_get_raw_entry(prp->agmt), + remote_entry, /* the cooked ad entry */ + local_entry, /* the ds entry */ + smods, + do_modify); + } + } + if (slapi_is_loglevel_set(SLAPI_LOG_REPL) && *do_modify) { slapi_mods_dump(smods,"windows sync"); @@ -3412,10 +3510,16 @@ windows_process_total_add(Private_Repl_Protocol *prp,Slapi_Entry *e, Slapi_DN* r Slapi_Entry *mapped_entry = NULL; char *password = NULL; const Slapi_DN* local_dn = NULL; + int can_add = winsync_plugin_call_can_add_entry_to_ad_cb(prp->agmt, e, remote_dn); /* First map the entry */ local_dn = slapi_entry_get_sdn_const(e); - if (missing_entry) - retval = windows_create_remote_entry(prp, e, remote_dn, &mapped_entry, &password); + if (missing_entry) { + if (can_add) { + retval = windows_create_remote_entry(prp, e, remote_dn, &mapped_entry, &password); + } else { + return retval; /* cannot add and no entry to modify */ + } + } /* Convert entry to mods */ if (0 == retval && mapped_entry) { diff --git a/ldap/servers/plugins/replication/windows_tot_protocol.c b/ldap/servers/plugins/replication/windows_tot_protocol.c index 6e01f633a..09ecbc8e3 100644 --- a/ldap/servers/plugins/replication/windows_tot_protocol.c +++ b/ldap/servers/plugins/replication/windows_tot_protocol.c @@ -99,11 +99,15 @@ windows_tot_run(Private_Repl_Protocol *prp) int rc; callback_data cb_data; Slapi_PBlock *pb; - const char* dn; + char* dn; RUV *ruv = NULL; RUV *starting_ruv = NULL; Replica *replica = NULL; Object *local_ruv_obj = NULL; + int scope = LDAP_SCOPE_SUBTREE; + char *filter = slapi_ch_strdup("(|(objectclass=ntuser)(objectclass=ntgroup))"); + char **attrs = NULL; + LDAPControl **server_controls = NULL; LDAPDebug( LDAP_DEBUG_TRACE, "=> windows_tot_run\n", 0, 0, 0 ); @@ -168,13 +172,15 @@ windows_tot_run(Private_Repl_Protocol *prp) /* send everything */ - dn = slapi_sdn_get_dn( windows_private_get_directory_subtree(prp->agmt)); + dn = slapi_ch_strdup(slapi_sdn_get_dn( windows_private_get_directory_subtree(prp->agmt))); + + winsync_plugin_call_pre_ds_search_all_cb(prp->agmt, NULL, &dn, &scope, &filter, + &attrs, &server_controls); pb = slapi_pblock_new (); /* Perform a subtree search for any ntuser or ntgroup entries underneath the * suffix defined in the sync agreement. */ - slapi_search_internal_set_pb (pb, dn, - LDAP_SCOPE_SUBTREE, "(|(objectclass=ntuser)(objectclass=ntgroup))", NULL, 0, NULL, NULL, + slapi_search_internal_set_pb (pb, dn, scope, filter, attrs, 0, server_controls, NULL, repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION), 0); cb_data.prp = prp; cb_data.rc = 0; @@ -186,6 +192,13 @@ windows_tot_run(Private_Repl_Protocol *prp) get_result /* result callback */, send_entry /* entry callback */, NULL /* referral callback*/); + slapi_ch_free_string(&dn); + slapi_ch_free_string(&filter); + slapi_ch_array_free(attrs); + attrs = NULL; + ldap_controls_free(server_controls); + server_controls = NULL; + slapi_pblock_destroy (pb); agmt_set_last_init_end(prp->agmt, current_time()); rc = cb_data.rc; diff --git a/ldap/servers/plugins/replication/windowsrepl.h b/ldap/servers/plugins/replication/windowsrepl.h index 3691664ad..471386562 100644 --- a/ldap/servers/plugins/replication/windowsrepl.h +++ b/ldap/servers/plugins/replication/windowsrepl.h @@ -44,9 +44,9 @@ /* windows_private.c */ typedef struct windowsprivate Dirsync_Private; Dirsync_Private* windows_private_new(); -void windows_private_set_windows_subtree (const Repl_Agmt *ra,const Slapi_DN* sdn ); +void windows_private_set_windows_subtree (const Repl_Agmt *ra,Slapi_DN* sdn ); const Slapi_DN* windows_private_get_windows_subtree (const Repl_Agmt *ra); -void windows_private_set_directory_subtree (const Repl_Agmt *ra,const Slapi_DN* sdn ); +void windows_private_set_directory_subtree (const Repl_Agmt *ra,Slapi_DN* sdn ); const Slapi_DN* windows_private_get_directory_subtree (const Repl_Agmt *ra); LDAPControl* windows_private_dirsync_control(const Repl_Agmt *ra); ConnResult send_dirsync_search(Repl_Connection *conn); @@ -63,7 +63,6 @@ PRBool windows_private_create_users(const Repl_Agmt *ra); void windows_private_set_create_groups(const Repl_Agmt *ra, PRBool value); PRBool windows_private_create_groups(const Repl_Agmt *ra); const char *windows_private_get_windows_domain(const Repl_Agmt *ra); -static void windows_private_set_windows_domain(const Repl_Agmt *ra, char *domain); int windows_private_get_isnt4(const Repl_Agmt *ra); void windows_private_set_isnt4(const Repl_Agmt *ra, int isit); int windows_private_get_iswin2k3(const Repl_Agmt *ra); @@ -71,6 +70,16 @@ void windows_private_set_iswin2k3(const Repl_Agmt *ra, int isit); Slapi_Filter* windows_private_get_directory_filter(const Repl_Agmt *ra); Slapi_Filter* windows_private_get_deleted_filter(const Repl_Agmt *ra); const char* windows_private_get_purl(const Repl_Agmt *ra); +/* + * The raw entry is the last raw entry read from AD - raw as opposed + * "cooked" - that is, having had schema processing done + */ +/* get returns a pointer to the structure - do not free */ +Slapi_Entry *windows_private_get_raw_entry(const Repl_Agmt *ra); +/* this is passin - windows_private owns the pointer, not a copy */ +void windows_private_set_raw_entry(const Repl_Agmt *ra, Slapi_Entry *e); +void *windows_private_get_api_cookie(const Repl_Agmt *ra); +void windows_private_set_api_cookie(Repl_Agmt *ra, void *cookie); /* in windows_connection.c */ ConnResult windows_conn_connect(Repl_Connection *conn); @@ -112,3 +121,86 @@ int windows_check_user_password(Repl_Connection *conn, Slapi_DN *sdn, char *pass /* Used for GUID format conversion */ #define NTUNIQUEID_LENGTH 32 #define AD_GUID_LENGTH 36 + +/* called for each replication agreement - so the winsync + plugin can be agreement specific and store agreement + specific data +*/ +void windows_plugin_init(Repl_Agmt *ra); + +void winsync_plugin_call_dirsync_search_params_cb(const Repl_Agmt *ra, const char *agmt_dn, char **base, int *scope, char **filter, char ***attrs, LDAPControl ***serverctrls); +/* called before searching for a single entry from AD - agmt_dn will be NULL */ +void winsync_plugin_call_pre_ad_search_cb(const Repl_Agmt *ra, const char *agmt_dn, char **base, int *scope, char **filter, char ***attrs, LDAPControl ***serverctrls); +/* called before an internal search to get a single DS entry - agmt_dn will be NULL */ +void winsync_plugin_call_pre_ds_search_entry_cb(const Repl_Agmt *ra, const char *agmt_dn, char **base, int *scope, char **filter, char ***attrs, LDAPControl ***serverctrls); +/* called before the total update to get all entries from the DS to sync to AD */ +void winsync_plugin_call_pre_ds_search_all_cb(const Repl_Agmt *ra, const char *agmt_dn, char **base, int *scope, char **filter, char ***attrs, LDAPControl ***serverctrls); + +void winsync_plugin_call_pre_ad_mod_user_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, Slapi_Entry *ad_entry, Slapi_Entry *ds_entry, Slapi_Mods *smods, int *do_modify); +void winsync_plugin_call_pre_ad_mod_group_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, Slapi_Entry *ad_entry, Slapi_Entry *ds_entry, Slapi_Mods *smods, int *do_modify); +void winsync_plugin_call_pre_ds_mod_user_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, Slapi_Entry *ad_entry, Slapi_Entry *ds_entry, Slapi_Mods *smods, int *do_modify); +void winsync_plugin_call_pre_ds_mod_group_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, Slapi_Entry *ad_entry, Slapi_Entry *ds_entry, Slapi_Mods *smods, int *do_modify); + +void winsync_plugin_call_pre_ds_add_user_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, Slapi_Entry *ad_entry, Slapi_Entry *ds_entry); +void winsync_plugin_call_pre_ds_add_group_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, Slapi_Entry *ad_entry, Slapi_Entry *ds_entry); + +void winsync_plugin_call_get_new_ds_user_dn_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, Slapi_Entry *ad_entry, + char **new_dn_string, const Slapi_DN *ds_suffix, const Slapi_DN *ad_suffix); +void winsync_plugin_call_get_new_ds_group_dn_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, Slapi_Entry *ad_entry, + char **new_dn_string, const Slapi_DN *ds_suffix, const Slapi_DN *ad_suffix); + +void winsync_plugin_call_pre_ad_mod_user_mods_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, const Slapi_DN *local_dn, LDAPMod * const *origmods, Slapi_DN *remote_dn, LDAPMod ***modstosend); +void winsync_plugin_call_pre_ad_mod_group_mods_cb(const Repl_Agmt *ra, const Slapi_Entry *rawentry, const Slapi_DN *local_dn, LDAPMod * const *origmods, Slapi_DN *remote_dn, LDAPMod ***modstosend); + +int winsync_plugin_call_can_add_entry_to_ad_cb(const Repl_Agmt *ra, const Slapi_Entry *local_entry, const Slapi_DN *remote_dn); +/* + Call stack for all places where windows_LDAPMessage2Entry is called: + + windows_LDAPMessage2Entry + ++windows_seach_entry_ext + ++++windows_search_entry + ++++++windows_get_remote_entry + map_dn_values + windows_create_remote_entry + process_replay_add + windows_process_total_add + windows_map_mods_for_replay + windows_replay_update + send_updates + windows_inc_run + windows_create_local_entry + windows_process_dirsync_entry + windows_generate_update_mods + windows_update_remote_entry + process_replay_add + windows_process_total_add + windows_update_local_entry + windows_process_dirsync_entry + process_replay_add + windows_replay_update + map_entry_dn_outbound + map_dn_values + windows_replay_update + windows_process_total_entry + send_entry + windows_tot_run + windows_process_total_add + windows_process_total_entry + send_entry + windows_tot_run + windows_process_dirsync_entry + windows_dirsync_inc_run + find_entry_by_attr_value_remote + map_entry_dn_outbound + ++++windows_get_remote_tombstone + map_windows_tombstone_dn + process_replay_add + ++windows_conn_get_search_result + windows_dirsync_inc_run + + + windows_inc_protocol + ++send_updates + ++++windows_replay_update +*/ +/* #define WINSYNC_TEST 1 */ /* fake ad is really just a regular ds */ diff --git a/ldap/servers/plugins/replication/winsync-plugin.h b/ldap/servers/plugins/replication/winsync-plugin.h new file mode 100644 index 000000000..882e41449 --- /dev/null +++ b/ldap/servers/plugins/replication/winsync-plugin.h @@ -0,0 +1,534 @@ +/** BEGIN COPYRIGHT BLOCK + * This Program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free Software + * Foundation; version 2 of the License. + * + * This Program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along with + * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place, Suite 330, Boston, MA 02111-1307 USA. + * + * In addition, as a special exception, Red Hat, Inc. gives You the additional + * right to link the code of this Program with code not covered under the GNU + * General Public License ("Non-GPL Code") and to distribute linked combinations + * including the two, subject to the limitations in this paragraph. Non-GPL Code + * permitted under this exception must only link to the code of this Program + * through those well defined interfaces identified in the file named EXCEPTION + * found in the source code files (the "Approved Interfaces"). The files of + * Non-GPL Code may instantiate templates or use macros or inline functions from + * the Approved Interfaces without causing the resulting work to be covered by + * the GNU General Public License. Only Red Hat, Inc. may make changes or + * additions to the list of Approved Interfaces. You must obey the GNU General + * Public License in all respects for all of the Program code and other code used + * in conjunction with the Program except the Non-GPL Code covered by this + * exception. If you modify this file, you may extend this exception to your + * version of the file, but you are not obligated to do so. If you do not wish to + * provide this exception without modification, you must delete this exception + * statement from your version and license this file solely under the GPL without + * exception. + * + * + * Copyright (C) 2008 Red Hat, Inc. + * All rights reserved. + * END COPYRIGHT BLOCK **/ +#ifndef WINSYNC_PLUGIN_PUBLIC_API +#define WINSYNC_PLUGIN_PUBLIC_API + +#ifdef HAVE_CONFIG_H +# include <config.h> +#endif + +/* windows_private.c */ + +#include "slapi-plugin.h" + +/* + * WinSync plug-in API + */ +#define WINSYNC_v1_0_GUID "CDA8F029-A3C6-4EBB-80B8-A2E183DB0481" + +/* + * The plugin will define this callback in order to initialize itself. + * The ds subtree and the ad subtree from the sync agreement are passed in. + * These are read only. + * The return value is private data to the plugin that will be passed back + * at each callback + */ +typedef void * (*winsync_plugin_init_cb)(const Slapi_DN *ds_subtree, const Slapi_DN *ad_subtree); +#define WINSYNC_PLUGIN_INIT_CB 1 +/* agmt_dn - const - the original AD base dn from the winsync agreement + scope - set directly e.g. *scope = 42; + base, filter - malloced - to set, free first e.g. + slapi_ch_free_string(filter); + *base = slapi_ch_strdup("(objectclass=foobar)"); + winsync code will use slapi_ch_free_string to free this value, so no static strings + attrs - NULL or null terminated array of strings - can use slapi_ch_array_add to add e.g. + slapi_ch_array_add(attrs, slapi_ch_strdup("myattr")); + attrs will be freed with slapi_ch_array_free, so caller must own the memory + serverctrls - NULL or null terminated array of LDAPControl* - can use slapi_add_control_ext to add + slapi_add_control_ext(serverctrls, mynewctrl, 1 / add a copy /); + serverctrls will be freed with ldap_controls_free, so caller must own memory +*/ +typedef void (*winsync_search_params_cb)(void *cookie, const char *agmt_dn, char **base, int *scope, char **filter, char ***attrs, LDAPControl ***serverctrls); +#define WINSYNC_PLUGIN_DIRSYNC_SEARCH_CB 2 /* serverctrls will already contain the DirSync control */ +#define WINSYNC_PLUGIN_PRE_AD_SEARCH_CB 3 +#define WINSYNC_PLUGIN_PRE_DS_SEARCH_ENTRY_CB 4 +#define WINSYNC_PLUGIN_PRE_DS_SEARCH_ALL_CB 5 +/* + * These callbacks are the main entry points that allow the plugin + * to intercept modifications to local and remote entries. + * rawentry - the raw AD entry, read directly from AD - this is read only + * ad_entry - the "cooked" AD entry - the DN in this entry should be set + * when the operation is to modify the AD entry + * ds_entry - the entry from the ds - the DN in this entry should be set + * when the operation is to modify the DS entry + * smods - the post-processing modifications - these should be modified + * by the plugin as needed + * do_modify - if the code has some modifications that need to be applied, this + * will be set to true - if the plugin has added some items to smods + * this should be set to true - if the plugin has removed all of + * the smods, and no operation should be performed, this should + * be set to false + */ +typedef void (*winsync_pre_mod_cb)(void *cookie, const Slapi_Entry *rawentry, Slapi_Entry *ad_entry, Slapi_Entry *ds_entry, Slapi_Mods *smods, int *do_modify); +#define WINSYNC_PLUGIN_PRE_AD_MOD_USER_CB 6 +#define WINSYNC_PLUGIN_PRE_AD_MOD_GROUP_CB 7 +#define WINSYNC_PLUGIN_PRE_DS_MOD_USER_CB 8 +#define WINSYNC_PLUGIN_PRE_DS_MOD_GROUP_CB 9 +/* + * These callbacks are called when a new entry is being added to the + * local directory server from AD. + * rawentry - the raw AD entry, read directly from AD - this is read only + * ad_entry - the "cooked" AD entry + * ds_entry - the entry to be added to the DS - all modifications should + * be made to this entry, including changing the DN if needed, + * since the DN of this entry will be used as the ADD target DN + * This entry will already have had the default schema mapping applied + */ +typedef void (*winsync_pre_add_cb)(void *cookie, const Slapi_Entry *rawentry, Slapi_Entry *ad_entry, Slapi_Entry *ds_entry); +#define WINSYNC_PLUGIN_PRE_DS_ADD_USER_CB 10 +#define WINSYNC_PLUGIN_PRE_DS_ADD_GROUP_CB 11 +/* + * If a new entry has been added to AD, and we're sync'ing it over + * to the DS, we may need to create a new DN for the entry. The + * code tries to come up with a reasonable DN, but the plugin may + * have different ideas. These callbacks allow the plugin to specify + * what the new DN for the new entry should be. This is called from + * map_entry_dn_inbound which is called from various places where the DN for + * the new entry is needed. The winsync_plugin_call_pre_ds_add_* callbacks + * can also be used to set the DN just before the entry is stored in the DS. + * This is also used when we are mapping a dn valued attribute e.g. owner + * or secretary + * rawentry - the raw AD entry, read directly from AD - this is read only + * ad_entry - the "cooked" AD entry + * new_dn_string - the given value will be the default value created by the sync code + * to change it, slapi_ch_free_string first, then malloc the value to use + * ds_suffix - the suffix from the DS side of the sync agreement + * ad_suffix - the suffix from the AD side of the sync agreement + */ +typedef void (*winsync_get_new_dn_cb)(void *cookie, const Slapi_Entry *rawentry, Slapi_Entry *ad_entry, char **new_dn_string, + const Slapi_DN *ds_suffix, const Slapi_DN *ad_suffix); +#define WINSYNC_PLUGIN_GET_NEW_DS_USER_DN_CB 12 +#define WINSYNC_PLUGIN_GET_NEW_DS_GROUP_DN_CB 13 +/* + * These callbacks are called when a mod operation is going to be replayed + * to AD. This case is different than the pre add or pre mod callbacks + * above because in this context, we may only have the list of modifications + * and the DN to which the mods were applied. + * rawentry - the raw AD entry, read directly from AD - may be NULL + * local_dn - the original local DN used in the modification + * origmods - the original mod list + * remote_dn - this is the DN which will be used with the remote modify operation + * to AD - the winsync code may have already attempted to calculate its value + * modstosend - this is the list of modifications which will be sent - the winsync + * code will already have done its default mapping to these values + * + */ +typedef void (*winsync_pre_ad_mod_mods_cb)(void *cookie, const Slapi_Entry *rawentry, const Slapi_DN *local_dn, LDAPMod * const *origmods, Slapi_DN *remote_dn, LDAPMod ***modstosend); +#define WINSYNC_PLUGIN_PRE_AD_MOD_USER_MODS_CB 14 +#define WINSYNC_PLUGIN_PRE_AD_MOD_GROUP_MODS_CB 15 + +/* + * Callbacks used to determine if an entry should be added to the + * AD side if it does not already exist. + * local_entry - the candidate entry to test + * remote_DN - the candidate remote entry to add + */ +typedef int (*winsync_can_add_to_ad_cb)(void *cookie, const Slapi_Entry *local_entry, const Slapi_DN *remote_dn); +#define WINSYNC_PLUGIN_CAN_ADD_ENTRY_TO_AD_CB 16 + +/* + The following are sample code stubs to show how to implement + a plugin which uses this api +*/ + +#ifdef WINSYNC_SAMPLE_CODE + +#include "slapi-plugin.h" +#include "winsync-plugin.h" + +static char *test_winsync_plugin_name = "test_winsync_api"; + +static void * +test_winsync_api_init(const Slapi_DN *ds_subtree, const Slapi_DN *ad_subtree) +{ + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_init [%s] [%s] -- begin\n", + slapi_sdn_get_dn(ds_subtree), + slapi_sdn_get_dn(ad_subtree)); + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_init -- end\n"); + + return NULL; +} + +static void +test_winsync_dirsync_search_params_cb(void *cbdata, const char *agmt_dn, + char **base, int *scope, char **filter, + char ***attrs, LDAPControl ***serverctrls) +{ + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_dirsync_search_params_cb -- begin\n"); + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_dirsync_search_params_cb -- end\n"); + + return; +} + +/* called before searching for a single entry from AD - agmt_dn will be NULL */ +static void +test_winsync_pre_ad_search_cb(void *cbdata, const char *agmt_dn, + char **base, int *scope, char **filter, + char ***attrs, LDAPControl ***serverctrls) +{ + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_pre_ad_search_cb -- begin\n"); + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_pre_ad_search_cb -- end\n"); + + return; +} + +/* called before an internal search to get a single DS entry - agmt_dn will be NULL */ +static void +test_winsync_pre_ds_search_entry_cb(void *cbdata, const char *agmt_dn, + char **base, int *scope, char **filter, + char ***attrs, LDAPControl ***serverctrls) +{ + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_pre_ds_search_cb -- begin\n"); + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_pre_ds_search_cb -- end\n"); + + return; +} + +/* called before the total update to get all entries from the DS to sync to AD */ +static void +test_winsync_pre_ds_search_all_cb(void *cbdata, const char *agmt_dn, + char **base, int *scope, char **filter, + char ***attrs, LDAPControl ***serverctrls) +{ + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_pre_ds_search_all_cb -- orig filter [%s] -- begin\n", + ((filter && *filter) ? *filter : "NULL")); + + /* We only want to grab users from the ds side - no groups */ + slapi_ch_free_string(filter); + /* maybe use ntUniqueId=* - only get users that have already been + synced with AD already - ntUniqueId and ntUserDomainId are + indexed for equality only - need to add presence? */ + *filter = slapi_ch_strdup("(&(objectclass=ntuser)(ntUserDomainId=*))"); + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_pre_ds_search_all_cb -- end\n"); + + return; +} + +static void +test_winsync_pre_ad_mod_user_cb(void *cbdata, const Slapi_Entry *rawentry, + Slapi_Entry *ad_entry, Slapi_Entry *ds_entry, + Slapi_Mods *smods, int *do_modify) +{ + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_pre_ad_mod_user_cb -- begin\n"); + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_pre_ad_mod_user_cb -- end\n"); + + return; +} + +static void +test_winsync_pre_ad_mod_group_cb(void *cbdata, const Slapi_Entry *rawentry, + Slapi_Entry *ad_entry, Slapi_Entry *ds_entry, + Slapi_Mods *smods, int *do_modify) +{ + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_pre_ad_mod_group_cb -- begin\n"); + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_pre_ad_mod_group_cb -- end\n"); + + return; +} + +static void +test_winsync_pre_ds_mod_user_cb(void *cbdata, const Slapi_Entry *rawentry, + Slapi_Entry *ad_entry, Slapi_Entry *ds_entry, + Slapi_Mods *smods, int *do_modify) +{ + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_pre_ds_mod_user_cb -- begin\n"); + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_pre_ds_mod_user_cb -- end\n"); + + return; +} + +static void +test_winsync_pre_ds_mod_group_cb(void *cbdata, const Slapi_Entry *rawentry, + Slapi_Entry *ad_entry, Slapi_Entry *ds_entry, + Slapi_Mods *smods, int *do_modify) +{ + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_pre_ds_mod_group_cb -- begin\n"); + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_pre_ds_mod_group_cb -- end\n"); + + return; +} + +static void +test_winsync_pre_ds_add_user_cb(void *cbdata, const Slapi_Entry *rawentry, + Slapi_Entry *ad_entry, Slapi_Entry *ds_entry) +{ + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_pre_ds_add_user_cb -- begin\n"); + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_pre_ds_add_user_cb -- end\n"); + + return; +} + +static void +test_winsync_pre_ds_add_group_cb(void *cbdata, const Slapi_Entry *rawentry, + Slapi_Entry *ad_entry, Slapi_Entry *ds_entry) +{ + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_pre_ds_add_group_cb -- begin\n"); + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_pre_ds_add_group_cb -- end\n"); + + return; +} + +static void +test_winsync_get_new_ds_user_dn_cb(void *cbdata, const Slapi_Entry *rawentry, + Slapi_Entry *ad_entry, char **new_dn_string, + const Slapi_DN *ds_suffix, const Slapi_DN *ad_suffix) +{ + char **rdns = NULL; + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_get_new_ds_user_dn_cb -- old dn [%s] -- begin\n", + *new_dn_string); + + rdns = ldap_explode_dn(*new_dn_string, 0); + if (!rdns || !rdns[0]) { + ldap_value_free(rdns); + return; + } + + slapi_ch_free_string(new_dn_string); + *new_dn_string = PR_smprintf("%s,%s", rdns[0], slapi_sdn_get_dn(ds_suffix)); + ldap_value_free(rdns); + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_get_new_ds_user_dn_cb -- new dn [%s] -- end\n", + *new_dn_string); + + return; +} + +static void +test_winsync_get_new_ds_group_dn_cb(void *cbdata, const Slapi_Entry *rawentry, + Slapi_Entry *ad_entry, char **new_dn_string, + const Slapi_DN *ds_suffix, const Slapi_DN *ad_suffix) +{ + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_get_new_ds_group_dn_cb -- begin\n"); + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_get_new_ds_group_dn_cb -- end\n"); + + return; +} + +static void +test_winsync_pre_ad_mod_user_mods_cb(void *cbdata, const Slapi_Entry *rawentry, + const Slapi_DN *local_dn, LDAPMod * const *origmods, + Slapi_DN *remote_dn, LDAPMod ***modstosend) +{ + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_pre_ad_mod_user_mods_cb -- begin\n"); + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_pre_ad_mod_user_mods_cb -- end\n"); + + return; +} + +static void +test_winsync_pre_ad_mod_group_mods_cb(void *cbdata, const Slapi_Entry *rawentry, + const Slapi_DN *local_dn, LDAPMod * const *origmods, + Slapi_DN *remote_dn, LDAPMod ***modstosend) +{ + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_pre_ad_mod_group_mods_cb -- begin\n"); + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_pre_ad_mod_group_mods_cb -- end\n"); + + return; +} + +static int +test_winsync_can_add_entry_to_ad_cb(void *cbdata, const Slapi_Entry *local_entry, + const Slapi_DN *remote_dn) +{ + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_can_add_entry_to_ad_cb -- begin\n"); + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_can_add_entry_to_ad_cb -- end\n"); + + return 0; /* false - do not allow entries to be added to ad */ +} + +/** + * Plugin identifiers + */ +static Slapi_PluginDesc test_winsync_pdesc = { + "test-winsync-plugin", + PLUGIN_MAGIC_VENDOR_STR, + PRODUCTTEXT, + "test winsync plugin" +}; + +static Slapi_ComponentId *test_winsync_plugin_id = NULL; + +static void *test_winsync_api[] = { + NULL, /* reserved for api broker use, must be zero */ + test_winsync_api_init, + test_winsync_dirsync_search_params_cb, + test_winsync_pre_ad_search_cb, + test_winsync_pre_ds_search_entry_cb, + test_winsync_pre_ds_search_all_cb, + test_winsync_pre_ad_mod_user_cb, + test_winsync_pre_ad_mod_group_cb, + test_winsync_pre_ds_mod_user_cb, + test_winsync_pre_ds_mod_group_cb, + test_winsync_pre_ds_add_user_cb, + test_winsync_pre_ds_add_group_cb, + test_winsync_get_new_ds_user_dn_cb, + test_winsync_get_new_ds_group_dn_cb, + test_winsync_pre_ad_mod_user_mods_cb, + test_winsync_pre_ad_mod_group_mods_cb, + test_winsync_can_add_entry_to_ad_cb +}; + +static int +test_winsync_plugin_start(Slapi_PBlock *pb) +{ + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_plugin_start -- begin\n"); + + if( slapi_apib_register(WINSYNC_v1_0_GUID, test_winsync_api) ) { + slapi_log_error( SLAPI_LOG_FATAL, test_winsync_plugin_name, + "<-- test_winsync_plugin_start -- failed to register winsync api -- end\n"); + return -1; + } + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_plugin_start -- end\n"); + return 0; +} + +static int +test_winsync_plugin_close(Slapi_PBlock *pb) +{ + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_plugin_close -- begin\n"); + + slapi_apib_unregister(WINSYNC_v1_0_GUID); + + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_plugin_close -- end\n"); + return 0; +} + +/* this is the slapi plugin init function, + not the one used by the winsync api +*/ +int test_winsync_plugin_init(Slapi_PBlock *pb) +{ + slapi_log_error(SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "--> test_winsync_plugin_init -- begin\n"); + + if ( slapi_pblock_set( pb, SLAPI_PLUGIN_VERSION, + SLAPI_PLUGIN_VERSION_01 ) != 0 || + slapi_pblock_set(pb, SLAPI_PLUGIN_START_FN, + (void *) test_winsync_plugin_start ) != 0 || + slapi_pblock_set(pb, SLAPI_PLUGIN_CLOSE_FN, + (void *) test_winsync_plugin_close ) != 0 || + slapi_pblock_set( pb, SLAPI_PLUGIN_DESCRIPTION, + (void *)&test_winsync_pdesc ) != 0 ) + { + slapi_log_error( SLAPI_LOG_FATAL, test_winsync_plugin_name, + "<-- test_winsync_plugin_init -- failed to register plugin -- end\n"); + return -1; + } + + /* Retrieve and save the plugin identity to later pass to + internal operations */ + if (slapi_pblock_get(pb, SLAPI_PLUGIN_IDENTITY, &test_winsync_plugin_id) != 0) { + slapi_log_error(SLAPI_LOG_FATAL, test_winsync_plugin_name, + "<-- test_winsync_plugin_init -- failed to retrieve plugin identity -- end\n"); + return -1; + } + + slapi_log_error( SLAPI_LOG_PLUGIN, test_winsync_plugin_name, + "<-- test_winsync_plugin_init -- end\n"); + return 0; +} + +/* +dn: cn=Test Winsync API,cn=plugins,cn=config +objectclass: top +objectclass: nsSlapdPlugin +objectclass: extensibleObject +cn: Test Winsync API +nsslapd-pluginpath: libtestwinsync-plugin +nsslapd-plugininitfunc: test_winsync_plugin_init +nsslapd-plugintype: preoperation +nsslapd-pluginenabled: on +nsslapd-plugin-depends-on-type: database +nsslapd-plugin-depends-on-named: Multimaster Replication Plugin +*/ + +#endif /* WINSYNC_SAMPLE_CODE */ + +#endif /* WINSYNC_PLUGIN_PUBLIC_API */ diff --git a/ldap/servers/slapd/charray.c b/ldap/servers/slapd/charray.c index 2f97f4d14..e57d90995 100644 --- a/ldap/servers/slapd/charray.c +++ b/ldap/servers/slapd/charray.c @@ -246,6 +246,11 @@ slapi_ch_array_free( char **array ) charray_free (array); } +void +slapi_ch_array_add( char ***a, char *s ) +{ + charray_add(a, s); +} /* case insensitive search */ int diff --git a/ldap/servers/slapd/control.c b/ldap/servers/slapd/control.c index 5bf4c7e3b..3d0a4f9e6 100644 --- a/ldap/servers/slapd/control.c +++ b/ldap/servers/slapd/control.c @@ -474,9 +474,14 @@ write_controls( BerElement *ber, LDAPControl **ctrls ) /* * duplicate "newctrl" and add it to the array of controls "*ctrlsp" * note that *ctrlsp may be reset and that it is okay to pass NULL for it. + * IF copy is true, a copy of the passed in control will be added - copy + * made with slapi_dup_control - if copy is false, the control + * will be used directly and may be free'd by ldap_controls_free - so + * make sure it is ok for the control array to own the pointer you + * pass in */ void -add_control( LDAPControl ***ctrlsp, LDAPControl *newctrl ) +add_control_ext( LDAPControl ***ctrlsp, LDAPControl *newctrl, int copy ) { int count; @@ -491,10 +496,29 @@ add_control( LDAPControl ***ctrlsp, LDAPControl *newctrl ) *ctrlsp = (LDAPControl **)slapi_ch_realloc( (char *)*ctrlsp, ( count + 2 ) * sizeof(LDAPControl *)); - (*ctrlsp)[ count ] = slapi_dup_control( newctrl ); + if (copy) { + (*ctrlsp)[ count ] = slapi_dup_control( newctrl ); + } else { + (*ctrlsp)[ count ] = newctrl; + } (*ctrlsp)[ ++count ] = NULL; } +/* + * duplicate "newctrl" and add it to the array of controls "*ctrlsp" + * note that *ctrlsp may be reset and that it is okay to pass NULL for it. + */ +void +add_control( LDAPControl ***ctrlsp, LDAPControl *newctrl ) +{ + add_control_ext(ctrlsp, newctrl, 1 /* copy */); +} + +void +slapi_add_control_ext( LDAPControl ***ctrlsp, LDAPControl *newctrl, int copy ) +{ + add_control_ext(ctrlsp, newctrl, copy); +} /* * return a malloc'd copy of "ctrl" @@ -527,6 +551,14 @@ slapi_dup_control( LDAPControl *ctrl ) return( rctrl ); } +void +slapi_add_controls( LDAPControl ***ctrlsp, LDAPControl **newctrls, int copy ) +{ + int ii; + for (ii = 0; newctrls && newctrls[ii]; ++ii) { + slapi_add_control_ext(ctrlsp, newctrls[ii], copy); + } +} int slapi_build_control( char *oid, BerElement *ber, diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h index 56959b3d8..23fa2af9e 100644 --- a/ldap/servers/slapd/slapi-plugin.h +++ b/ldap/servers/slapd/slapi-plugin.h @@ -835,6 +835,24 @@ int slapi_build_control( char *oid, BerElement *ber, int slapi_build_control_from_berval( char *oid, struct berval *bvp, char iscritical, LDAPControl **ctrlp ); +/* Given an array of controls e.g. LDAPControl **ctrls, add the given + control to the end of the array, growing the array with realloc + e.g. slapi_add_control_ext(&ctrls, newctrl, 1); + if ctrls is NULL, the array will be created with malloc + if copy is true, the given control will be copied + if copy is false, the given control will be used and owned by the array + if copy is false, make sure the control can be freed by ldap_controls_free +*/ +void slapi_add_control_ext( LDAPControl ***ctrlsp, LDAPControl *newctrl, int copy ); + +/* Given an array of controls e.g. LDAPControl **ctrls, add all of the given + controls in the newctrls array to the end of ctrls, growing the array with realloc + if ctrls is NULL, the array will be created with malloc + if copy is true, each given control will be copied + if copy is false, each given control will be used and owned by the array + if copy is false, make sure each control can be freed by ldap_controls_free +*/ +void slapi_add_controls( LDAPControl ***ctrlsp, LDAPControl **newctrls, int copy ); /* * routines for dealing with extended operations @@ -865,6 +883,13 @@ int slapi_pwpolicy_make_response_control (Slapi_PBlock *pb, int seconds, int log * routine for freeing the ch_arrays returned by the slapi_get*_copy functions above */ void slapi_ch_array_free( char **array ); +/* + * Add the given string to the given null terminated array. + * s is not copied, so if you want to add a copy of s to the + * array, use slapi_ch_strdup(s) + * if *a is NULL, a new array will be created + */ +void slapi_ch_array_add( char ***array, char *string ); /*
0
bfe5fe5d0ff45ed0ca399d9b768932d9181c5fa0
389ds/389-ds-base
Issue 5082 - slugify: ModuleNotFoundError when running test cases Bug Description: slugify module is used in WebUI tests for creating filenames for screenshots. But it's often not installed by default, since it's not required by lib389. WebUI tests are executed only when a WEBUI environment variable is present, so we should import it under the same condition. Fix Description: Import slugify module only when WEBUI environment variable is present and WebUI tests are executed. Reviewed-by: @progier389 (Thanks!) Fixes: https://github.com/389ds/389-ds-base/issues/5082
commit bfe5fe5d0ff45ed0ca399d9b768932d9181c5fa0 Author: Viktor Ashirov <[email protected]> Date: Thu Aug 10 10:08:20 2023 +0200 Issue 5082 - slugify: ModuleNotFoundError when running test cases Bug Description: slugify module is used in WebUI tests for creating filenames for screenshots. But it's often not installed by default, since it's not required by lib389. WebUI tests are executed only when a WEBUI environment variable is present, so we should import it under the same condition. Fix Description: Import slugify module only when WEBUI environment variable is present and WebUI tests are executed. Reviewed-by: @progier389 (Thanks!) Fixes: https://github.com/389ds/389-ds-base/issues/5082 diff --git a/dirsrvtests/conftest.py b/dirsrvtests/conftest.py index cd1a969d5..c9db07a58 100644 --- a/dirsrvtests/conftest.py +++ b/dirsrvtests/conftest.py @@ -18,8 +18,10 @@ import gzip from .report import getReport from lib389.paths import Paths from enum import Enum -from slugify import slugify -from pathlib import Path + +if "WEBUI" in os.environ: + from slugify import slugify + from pathlib import Path pkgs = ['389-ds-base', 'nss', 'nspr', 'openldap', 'cyrus-sasl'] @@ -139,7 +141,7 @@ def pytest_runtest_makereport(item, call): report.extra = extra # Make a screenshot if WebUI test fails - if call.when == "call": + if call.when == "call" and "WEBUI" in os.environ: if call.excinfo is not None and "page" in item.funcargs: page = item.funcargs["page"] screenshot_dir = Path(".playwright-screenshots")
0
84b8bfd7d18a0613920dce36f1d3775d75e45a3e
389ds/389-ds-base
Bug 1199675 - CVE-2014-8112 CVE-2014-8105 389-ds-base: various flaws [fedora-all] Fix for CVE-2014-8112 If the unhashed pw switch is set to off this should only prevent the generation of the unhashed#user#password attribute. But encoding of pw values and detiecetion which values have to be deleted needs to stay intact. So the check if the switch is set has to be placed close to the generation of the attribute in different 'if' branches Reviewed by Noriko, thanks (cherry picked from commit e5de803f4ab1b097c637c269fcc8b567e664c00d)
commit 84b8bfd7d18a0613920dce36f1d3775d75e45a3e Author: Ludwig Krispenz <[email protected]> Date: Fri Nov 28 14:23:06 2014 +0100 Bug 1199675 - CVE-2014-8112 CVE-2014-8105 389-ds-base: various flaws [fedora-all] Fix for CVE-2014-8112 If the unhashed pw switch is set to off this should only prevent the generation of the unhashed#user#password attribute. But encoding of pw values and detiecetion which values have to be deleted needs to stay intact. So the check if the switch is set has to be placed close to the generation of the attribute in different 'if' branches Reviewed by Noriko, thanks (cherry picked from commit e5de803f4ab1b097c637c269fcc8b567e664c00d) diff --git a/ldap/servers/plugins/retrocl/retrocl_po.c b/ldap/servers/plugins/retrocl/retrocl_po.c index bcf53cd28..61f99cf0a 100644 --- a/ldap/servers/plugins/retrocl/retrocl_po.c +++ b/ldap/servers/plugins/retrocl/retrocl_po.c @@ -101,6 +101,12 @@ static lenstr *make_changes_string(LDAPMod **ldm, const char **includeattrs) continue; } } + if (SLAPD_UNHASHED_PW_NOLOG == slapi_config_get_unhashed_pw_switch()) { + if (0 == strcasecmp(ldm[ i ]->mod_type, PSEUDO_ATTR_UNHASHEDUSERPASSWORD)) { + /* If nsslapd-unhashed-pw-switch == nolog, skip writing it to cl. */ + continue; + } + } switch ( ldm[ i ]->mod_op & ~LDAP_MOD_BVALUES ) { case LDAP_MOD_ADD: addlenstr( l, "add: " ); diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c index 9b2f42d37..ab12f568f 100644 --- a/ldap/servers/slapd/modify.c +++ b/ldap/servers/slapd/modify.c @@ -836,8 +836,7 @@ static void op_shared_modify (Slapi_PBlock *pb, int pw_change, char *old_pw) * before calling the preop plugins */ - if (pw_change && !repl_op && - (SLAPD_UNHASHED_PW_OFF != config_get_unhashed_pw_switch())) { + if (pw_change && !repl_op ) { Slapi_Value **va = NULL; unhashed_pw_attr = slapi_attr_syntax_normalize(PSEUDO_ATTR_UNHASHEDUSERPASSWORD); @@ -907,13 +906,15 @@ static void op_shared_modify (Slapi_PBlock *pb, int pw_change, char *old_pw) * Finally, delete the unhashed userpassword * (this will update the password entry extension) */ - bval.bv_val = password; - bval.bv_len = strlen(password); - bv[0] = &bval; - bv[1] = NULL; - valuearray_init_bervalarray(bv, &va); - slapi_mods_add_mod_values(&smods, pw_mod->mod_op, unhashed_pw_attr, va); - valuearray_free(&va); + if (SLAPD_UNHASHED_PW_OFF != config_get_unhashed_pw_switch()) { + bval.bv_val = password; + bval.bv_len = strlen(password); + bv[0] = &bval; + bv[1] = NULL; + valuearray_init_bervalarray(bv, &va); + slapi_mods_add_mod_values(&smods, pw_mod->mod_op, unhashed_pw_attr, va); + valuearray_free(&va); + } } else { /* * Password is encoded, try and find a matching unhashed_password to delete @@ -945,19 +946,23 @@ static void op_shared_modify (Slapi_PBlock *pb, int pw_change, char *old_pw) if(strcmp(unhashed_pwsp->pws_name, "CLEAR") == 0){ if((*(pwsp->pws_cmp))((char *)unhashed_pwd , valpwd) == 0 ){ /* match, add the delete mod for this particular unhashed userpassword */ - valuearray_init_bervalarray(bv, &va); - slapi_mods_add_mod_values(&smods, pw_mod->mod_op, unhashed_pw_attr, va); - valuearray_free(&va); - free_pw_scheme( unhashed_pwsp ); + if (SLAPD_UNHASHED_PW_OFF != config_get_unhashed_pw_switch()) { + valuearray_init_bervalarray(bv, &va); + slapi_mods_add_mod_values(&smods, pw_mod->mod_op, unhashed_pw_attr, va); + valuearray_free(&va); + free_pw_scheme( unhashed_pwsp ); + } break; } } else { /* * We have a hashed unhashed_userpassword! We must delete it. */ - valuearray_init_bervalarray(bv, &va); - slapi_mods_add_mod_values(&smods, pw_mod->mod_op, unhashed_pw_attr, va); - valuearray_free(&va); + if (SLAPD_UNHASHED_PW_OFF != config_get_unhashed_pw_switch()) { + valuearray_init_bervalarray(bv, &va); + slapi_mods_add_mod_values(&smods, pw_mod->mod_op, unhashed_pw_attr, va); + valuearray_free(&va); + } } free_pw_scheme( unhashed_pwsp ); } @@ -972,7 +977,7 @@ static void op_shared_modify (Slapi_PBlock *pb, int pw_change, char *old_pw) if (remove_unhashed_pw && !slapi_entry_attr_find(e, unhashed_pw_attr, &a)){ slapi_mods_add_mod_values(&smods, pw_mod->mod_op,unhashed_pw_attr, va); } - } else { + } else if (SLAPD_UNHASHED_PW_OFF != config_get_unhashed_pw_switch()) { /* add pseudo password attribute */ valuearray_init_bervalarray_unhashed_only(pw_mod->mod_bvalues, &va); if(va && va[0]){
0
b388580dfc42f55ca4c4cccf12f8af388cb685e4
389ds/389-ds-base
Ticket 49083 - Support prefix for discovery of the defaults.inf file. Bug Description: Due to the change to paths.py which consumes defaults.inf, this broke custom prefix builds and tests with lib389. Fix Description: Allow paths.py to detect the presence of PREFIX, and to search PREFIX/share/dirsrv/inf/defaults.inf for the locations of DS. Once this file is grabbed, everything else will "just work". https://fedorahosted.org/389/ticket/49083 Author: wibrown Review by: nhosoi (Thanks!)
commit b388580dfc42f55ca4c4cccf12f8af388cb685e4 Author: William Brown <[email protected]> Date: Mon Jan 9 10:33:38 2017 +1000 Ticket 49083 - Support prefix for discovery of the defaults.inf file. Bug Description: Due to the change to paths.py which consumes defaults.inf, this broke custom prefix builds and tests with lib389. Fix Description: Allow paths.py to detect the presence of PREFIX, and to search PREFIX/share/dirsrv/inf/defaults.inf for the locations of DS. Once this file is grabbed, everything else will "just work". https://fedorahosted.org/389/ticket/49083 Author: wibrown Review by: nhosoi (Thanks!) diff --git a/src/lib389/lib389/paths.py b/src/lib389/lib389/paths.py index e0c2518a4..ca1586e3f 100644 --- a/src/lib389/lib389/paths.py +++ b/src/lib389/lib389/paths.py @@ -111,6 +111,13 @@ class Paths(object): self._instance = instance def _get_defaults_loc(self, search_paths): + prefix = os.getenv('PREFIX') + if prefix is not None: + spath = os.path.join(prefix, 'share/dirsrv/inf/defaults.inf') + if os.path.isfile(spath): + return spath + else: + raise IOError('defaults.inf not found in prefixed location %s' % spath) for spath in search_paths: if os.path.isfile(spath): return spath
0
3ba819485e0ef0c4bc8d8fd396b678b07845d2b2
389ds/389-ds-base
Update specfile and rust crates Reviewed by: spichugi(Thanks!)
commit 3ba819485e0ef0c4bc8d8fd396b678b07845d2b2 Author: Mark Reynolds <[email protected]> Date: Thu Dec 15 10:09:39 2022 -0500 Update specfile and rust crates Reviewed by: spichugi(Thanks!) diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in index c506d9fa7..857a20f08 100644 --- a/rpm/389-ds-base.spec.in +++ b/rpm/389-ds-base.spec.in @@ -61,7 +61,7 @@ Summary: 389 Directory Server (%{variant}) Name: 389-ds-base Version: __VERSION__ Release: __RELEASE__%{?dist} -License: GPLv3+ +License: GPLv3+ and (ASL 2.0 or MIT) URL: https://www.port389.org/ Group: System Environment/Daemons # Is this still needed? @@ -71,6 +71,103 @@ Obsoletes: %{name}-legacy-tools < 1.4.4.6 Obsoletes: %{name}-legacy-tools-debuginfo < 1.4.4.6 Provides: ldif2ldbm +##### Bundled cargo crates list - START ##### +Provides: bundled(crate(ahash)) = 0.7.6 +Provides: bundled(crate(ansi_term)) = 0.12.1 +Provides: bundled(crate(atty)) = 0.2.14 +Provides: bundled(crate(autocfg)) = 1.1.0 +Provides: bundled(crate(base64)) = 0.13.1 +Provides: bundled(crate(bitflags)) = 1.3.2 +Provides: bundled(crate(byteorder)) = 1.4.3 +Provides: bundled(crate(cbindgen)) = 0.9.1 +Provides: bundled(crate(cc)) = 1.0.78 +Provides: bundled(crate(cfg-if)) = 1.0.0 +Provides: bundled(crate(clap)) = 2.34.0 +Provides: bundled(crate(concread)) = 0.2.21 +Provides: bundled(crate(crossbeam)) = 0.8.2 +Provides: bundled(crate(crossbeam-channel)) = 0.5.6 +Provides: bundled(crate(crossbeam-deque)) = 0.8.2 +Provides: bundled(crate(crossbeam-epoch)) = 0.9.13 +Provides: bundled(crate(crossbeam-queue)) = 0.3.8 +Provides: bundled(crate(crossbeam-utils)) = 0.8.14 +Provides: bundled(crate(entryuuid)) = 0.1.0 +Provides: bundled(crate(entryuuid_syntax)) = 0.1.0 +Provides: bundled(crate(fastrand)) = 1.8.0 +Provides: bundled(crate(fernet)) = 0.1.4 +Provides: bundled(crate(foreign-types)) = 0.3.2 +Provides: bundled(crate(foreign-types-shared)) = 0.1.1 +Provides: bundled(crate(getrandom)) = 0.2.8 +Provides: bundled(crate(hashbrown)) = 0.12.3 +Provides: bundled(crate(hermit-abi)) = 0.1.19 +Provides: bundled(crate(instant)) = 0.1.12 +Provides: bundled(crate(itoa)) = 1.0.4 +Provides: bundled(crate(jobserver)) = 0.1.25 +Provides: bundled(crate(libc)) = 0.2.138 +Provides: bundled(crate(librnsslapd)) = 0.1.0 +Provides: bundled(crate(librslapd)) = 0.1.0 +Provides: bundled(crate(lock_api)) = 0.4.9 +Provides: bundled(crate(log)) = 0.4.17 +Provides: bundled(crate(lru)) = 0.7.8 +Provides: bundled(crate(memoffset)) = 0.7.1 +Provides: bundled(crate(once_cell)) = 1.16.0 +Provides: bundled(crate(openssl)) = 0.10.44 +Provides: bundled(crate(openssl-macros)) = 0.1.0 +Provides: bundled(crate(openssl-sys)) = 0.9.79 +Provides: bundled(crate(parking_lot)) = 0.11.2 +Provides: bundled(crate(parking_lot_core)) = 0.8.6 +Provides: bundled(crate(paste)) = 0.1.18 +Provides: bundled(crate(paste-impl)) = 0.1.18 +Provides: bundled(crate(pin-project-lite)) = 0.2.9 +Provides: bundled(crate(pkg-config)) = 0.3.26 +Provides: bundled(crate(ppv-lite86)) = 0.2.17 +Provides: bundled(crate(proc-macro-hack)) = 0.5.19 +Provides: bundled(crate(proc-macro2)) = 1.0.47 +Provides: bundled(crate(pwdchan)) = 0.1.0 +Provides: bundled(crate(quote)) = 1.0.21 +Provides: bundled(crate(rand)) = 0.8.5 +Provides: bundled(crate(rand_chacha)) = 0.3.1 +Provides: bundled(crate(rand_core)) = 0.6.4 +Provides: bundled(crate(redox_syscall)) = 0.2.16 +Provides: bundled(crate(remove_dir_all)) = 0.5.3 +Provides: bundled(crate(ryu)) = 1.0.11 +Provides: bundled(crate(scopeguard)) = 1.1.0 +Provides: bundled(crate(serde)) = 1.0.150 +Provides: bundled(crate(serde_derive)) = 1.0.150 +Provides: bundled(crate(serde_json)) = 1.0.89 +Provides: bundled(crate(slapd)) = 0.1.0 +Provides: bundled(crate(slapi_r_plugin)) = 0.1.0 +Provides: bundled(crate(smallvec)) = 1.10.0 +Provides: bundled(crate(strsim)) = 0.8.0 +Provides: bundled(crate(syn)) = 1.0.105 +Provides: bundled(crate(synstructure)) = 0.12.6 +Provides: bundled(crate(tempfile)) = 3.3.0 +Provides: bundled(crate(textwrap)) = 0.11.0 +Provides: bundled(crate(tokio)) = 1.23.0 +Provides: bundled(crate(tokio-macros)) = 1.8.2 +Provides: bundled(crate(toml)) = 0.5.10 +Provides: bundled(crate(unicode-ident)) = 1.0.5 +Provides: bundled(crate(unicode-width)) = 0.1.10 +Provides: bundled(crate(unicode-xid)) = 0.2.4 +Provides: bundled(crate(uuid)) = 0.8.2 +Provides: bundled(crate(vcpkg)) = 0.2.15 +Provides: bundled(crate(vec_map)) = 0.8.2 +Provides: bundled(crate(version_check)) = 0.9.4 +Provides: bundled(crate(wasi)) = 0.11.0+wasi_snapshot_preview1 +Provides: bundled(crate(winapi)) = 0.3.9 +Provides: bundled(crate(winapi-i686-pc-windows-gnu)) = 0.4.0 +Provides: bundled(crate(winapi-x86_64-pc-windows-gnu)) = 0.4.0 +Provides: bundled(crate(windows-sys)) = 0.42.0 +Provides: bundled(crate(windows_aarch64_gnullvm)) = 0.42.0 +Provides: bundled(crate(windows_aarch64_msvc)) = 0.42.0 +Provides: bundled(crate(windows_i686_gnu)) = 0.42.0 +Provides: bundled(crate(windows_i686_msvc)) = 0.42.0 +Provides: bundled(crate(windows_x86_64_gnu)) = 0.42.0 +Provides: bundled(crate(windows_x86_64_gnullvm)) = 0.42.0 +Provides: bundled(crate(windows_x86_64_msvc)) = 0.42.0 +Provides: bundled(crate(zeroize)) = 1.5.7 +Provides: bundled(crate(zeroize_derive)) = 1.3.3 +##### Bundled cargo crates list - END ##### + # Attach the buildrequires to the top level package: BuildRequires: nspr-devel BuildRequires: nss-devel >= 3.34 diff --git a/src/Cargo.lock b/src/Cargo.lock index 9086499cd..22f4bd016 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -41,9 +41,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "bitflags" @@ -76,9 +76,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.73" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" +checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" dependencies = [ "jobserver", ] @@ -158,23 +158,22 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.10" +version = "0.9.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1" +checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", "memoffset", - "once_cell", "scopeguard", ] [[package]] name = "crossbeam-queue" -version = "0.3.6" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd42583b04998a5363558e5f9291ee5a5ff6b49944332103f251e7479a82aa7" +checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" dependencies = [ "cfg-if", "crossbeam-utils", @@ -182,12 +181,11 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.11" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51887d4adc7b564537b15adcfb307936f8075dfcd5f00dde9a9f1d29383682bc" +checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" dependencies = [ "cfg-if", - "once_cell", ] [[package]] @@ -251,9 +249,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "getrandom" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if", "libc", @@ -289,24 +287,24 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8af84674fe1f223a982c933a0ee1086ac4d4052aa0fb8060c12c6ad838e754" +checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" [[package]] name = "jobserver" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" +checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" dependencies = [ "libc", ] [[package]] name = "libc" -version = "0.2.132" +version = "0.2.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5" +checksum = "db6d7e329c562c5dfab7a46a2afabc8b987ab9a4834c9d1ca04dc54c1546cef8" [[package]] name = "librnsslapd" @@ -329,9 +327,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" dependencies = [ "autocfg", "scopeguard", @@ -357,24 +355,24 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.6.5" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" dependencies = [ "autocfg", ] [[package]] name = "once_cell" -version = "1.13.1" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "074864da206b4973b84eb91683020dbefd6a8c3f0f38e054d93954e891935e4e" +checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" [[package]] name = "openssl" -version = "0.10.41" +version = "0.10.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "618febf65336490dfcf20b73f885f5651a0c89c64c2d4a8c3662585a70bf5bd0" +checksum = "29d971fd5722fec23977260f6e81aa67d2f22cadbdc2aa049f1022d9a3be1566" dependencies = [ "bitflags", "cfg-if", @@ -398,9 +396,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.75" +version = "0.9.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5f9bd0c2710541a3cda73d6f9ac4f1b240de4ae261065d309dbe73d9dceb42f" +checksum = "5454462c0eced1e97f2ec09036abc8da362e66802f66fd20f86854d9d8cbcbc4" dependencies = [ "autocfg", "cc", @@ -422,9 +420,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ "cfg-if", "instant", @@ -461,15 +459,15 @@ checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pkg-config" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "ppv-lite86" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro-hack" @@ -479,9 +477,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.43" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a2ca2c61bc9f3d74d2886294ab7b9853abd9c1ad903a3ac7815c58989bb7bab" +checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" dependencies = [ "unicode-ident", ] @@ -531,9 +529,9 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] @@ -570,18 +568,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.143" +version = "1.0.150" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53e8e5d5b70924f74ff5c6d64d9a5acd91422117c60f48c4e07855238a254553" +checksum = "e326c9ec8042f1b5da33252c8a37e9ffbd2c9bef0155215b6e6c80c790e05f91" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.143" +version = "1.0.150" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3d8e8de557aee63c26b85b947f5e59b690d0454c753f3adeb5cd7835ab88391" +checksum = "42a3df25b0713732468deadad63ab9da1f1fd75a48a15024b50363f128db627e" dependencies = [ "proc-macro2", "quote", @@ -590,9 +588,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.83" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38dd04e3c8279e75b31ef29dbdceebfe5ad89f4d0937213c53f7d49d01b3d5a7" +checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db" dependencies = [ "itoa", "ryu", @@ -617,9 +615,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "strsim" @@ -629,9 +627,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "syn" -version = "1.0.99" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58dbef6ec655055e20b86b15a8cc6d439cca19b667537ac6a1369572d151ab13" +checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908" dependencies = [ "proc-macro2", "quote", @@ -675,21 +673,21 @@ dependencies = [ [[package]] name = "tokio" -version = "1.20.1" +version = "1.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a8325f63a7d4774dd041e363b2409ed1c5cbbd0f867795e661df066b2b0a581" +checksum = "eab6d665857cc6ca78d6e80303a02cea7a7851e85dfbd77cbdc09bd129f1ef46" dependencies = [ "autocfg", - "once_cell", "pin-project-lite", "tokio-macros", + "windows-sys", ] [[package]] name = "tokio-macros" -version = "1.8.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" +checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ "proc-macro2", "quote", @@ -698,30 +696,30 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" +checksum = "1333c76748e868a4d9d1017b5ab53171dfd095f70c712fdb4653a406547f598f" dependencies = [ "serde", ] [[package]] name = "unicode-ident" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4f5b37a154999a8f3f98cc23a628d850e154479cd94decf3414696e12e31aaf" +checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" [[package]] name = "unicode-width" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "unicode-xid" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "uuid" @@ -778,6 +776,63 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" + [[package]] name = "zeroize" version = "1.5.7" @@ -789,9 +844,9 @@ dependencies = [ [[package]] name = "zeroize_derive" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" +checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c" dependencies = [ "proc-macro2", "quote",
0
1b9504533e4687a08c0fbb9141b4ea835f19f479
389ds/389-ds-base
Ticket 49316 - fix missing not condition in clock cleanu Bug Description: During the clock cleanup I removed a macro. This broke a not condition which caused 100% cpu load. Fix Description: Add braces around the condition. https://pagure.io/389-ds-base/issue/49316 Author: lkrispen Review by: wibrown (Thanks Ludwig!)
commit 1b9504533e4687a08c0fbb9141b4ea835f19f479 Author: Ludwig Krispenz <[email protected]> Date: Mon Jul 17 13:32:54 2017 +0200 Ticket 49316 - fix missing not condition in clock cleanu Bug Description: During the clock cleanup I removed a macro. This broke a not condition which caused 100% cpu load. Fix Description: Add braces around the condition. https://pagure.io/389-ds-base/issue/49316 Author: lkrispen Review by: wibrown (Thanks Ludwig!) diff --git a/ldap/servers/slapd/eventq.c b/ldap/servers/slapd/eventq.c index 64c1d6b35..1089dc35a 100644 --- a/ldap/servers/slapd/eventq.c +++ b/ldap/servers/slapd/eventq.c @@ -315,7 +315,7 @@ eq_loop(void *arg __attribute__((unused))) PRIntervalTime timeout; int until; PR_Lock(eq->eq_lock); - while (!(NULL != eq->eq_queue) && (eq->eq_queue->ec_when <= curtime)) { + while (!((NULL != eq->eq_queue) && (eq->eq_queue->ec_when <= curtime))) { if (!eq_running) { PR_Unlock(eq->eq_lock); goto bye;
0
810fc87751090a8ec643be70ad6c5dd106d3a8ac
389ds/389-ds-base
Issue 5262 - high contention in find_entry_internal_dn on mixed load (#5264) Bug description: Under high mixed load (update/read), the pressure on the entry cache is so high that during an update by the time the entry is added into the cache, it is then removed from the cache before it is locked for the update. If such event occurs 1000 times in a raw, the update fails. Another issue is that when updating the parent entry of a deleted entry (numsubordinates), if it fails to lock the parent it does not return the parent entry. So refcnt becomes invalid. Fix description: To reduce the likelyhood of failure, if an entry is removed from the entry cache before it is locked, then pause 100ms before retrying to upload/lock it. In the case of the failure to lock the parent entry, the entry should be returned. relates: #5262 Reviewed by: Pierre Rogier, Mark Reynolds, Simon Pichugin, William Brown
commit 810fc87751090a8ec643be70ad6c5dd106d3a8ac Author: tbordaz <[email protected]> Date: Fri Sep 30 13:35:05 2022 +0200 Issue 5262 - high contention in find_entry_internal_dn on mixed load (#5264) Bug description: Under high mixed load (update/read), the pressure on the entry cache is so high that during an update by the time the entry is added into the cache, it is then removed from the cache before it is locked for the update. If such event occurs 1000 times in a raw, the update fails. Another issue is that when updating the parent entry of a deleted entry (numsubordinates), if it fails to lock the parent it does not return the parent entry. So refcnt becomes invalid. Fix description: To reduce the likelyhood of failure, if an entry is removed from the entry cache before it is locked, then pause 100ms before retrying to upload/lock it. In the case of the failure to lock the parent entry, the entry should be returned. relates: #5262 Reviewed by: Pierre Rogier, Mark Reynolds, Simon Pichugin, William Brown diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c index d7e842240..e187e9125 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c @@ -542,6 +542,7 @@ replace_entry: if (cache_retry == RETRY_CACHE_LOCK && cache_retry_count < LDBM_CACHE_RETRY_COUNT) { /* try again */ + CACHE_RETURN(&(inst->inst_cache), &parent); DS_Sleep(PR_MillisecondsToInterval(100)); cache_retry_count++; continue;
0
1362fc2e63488b9ffe5313024ffa129da6a6125b
389ds/389-ds-base
Resolves #222918 Summary: server crash after deleting supposedly deleted attribute Description: index.c: if there is no attribute to delete, don't call index_addordel_values_svstring.c: changed string_values2keys to handle NULL bvals
commit 1362fc2e63488b9ffe5313024ffa129da6a6125b Author: Noriko Hosoi <[email protected]> Date: Fri Sep 28 23:46:40 2007 +0000 Resolves #222918 Summary: server crash after deleting supposedly deleted attribute Description: index.c: if there is no attribute to delete, don't call index_addordel_values_svstring.c: changed string_values2keys to handle NULL bvals diff --git a/ldap/servers/plugins/syntaxes/string.c b/ldap/servers/plugins/syntaxes/string.c index f79c40a1e..945b69bf6 100644 --- a/ldap/servers/plugins/syntaxes/string.c +++ b/ldap/servers/plugins/syntaxes/string.c @@ -310,59 +310,54 @@ int string_values2keys( Slapi_PBlock *pb, Slapi_Value **bvals, Slapi_Value ***ivals, int syntax, int ftype ) { - int nsubs, numbvals, i, n, j; - Slapi_Value **nbvals; + int nsubs, numbvals = 0, n; + Slapi_Value **nbvals, **nbvlp; + Slapi_Value **bvlp; char *w, *c, *p; - char buf[SUBLEN+1]; switch ( ftype ) { case LDAP_FILTER_EQUALITY: /* allocate a new array for the normalized values */ - for ( numbvals = 0; bvals[numbvals] != NULL; numbvals++ ) { - /* NULL */ + for ( bvlp = bvals; bvlp && *bvlp; bvlp++ ) { + numbvals++; } - nbvals = (Slapi_Value **) slapi_ch_malloc( (numbvals+1) * sizeof(Slapi_Value *)); + nbvals = (Slapi_Value **) slapi_ch_calloc( (numbvals + 1), sizeof(Slapi_Value *)); - for ( i = 0; i < numbvals; i++ ) + for ( bvlp = bvals, nbvlp = nbvals; bvlp && *bvlp; bvlp++, nbvlp++ ) { - c = slapi_ch_strdup(slapi_value_get_string(bvals[i])); + c = slapi_ch_strdup(slapi_value_get_string(*bvlp)); /* if the NORMALIZED flag is set, skip normalizing */ - if (!(slapi_value_get_flags(bvals[i]) & SLAPI_ATTR_FLAG_NORMALIZED)) + if (!(slapi_value_get_flags(*bvlp) & SLAPI_ATTR_FLAG_NORMALIZED)) value_normalize( c, syntax, 1 /* trim leading blanks */ ); - nbvals[i] = slapi_value_new_string_passin(c); + *nbvlp = slapi_value_new_string_passin(c); } - nbvals[i] = NULL; *ivals = nbvals; break; case LDAP_FILTER_APPROX: /* XXX should not do this twice! XXX */ /* get an upper bound on the number of ivals */ - numbvals = 0; - for ( i = 0; bvals[i] != NULL; i++ ) { - for ( w = first_word( (char*)slapi_value_get_string(bvals[i]) ); w != NULL; - w = next_word( w ) ) { + for ( bvlp = bvals; bvlp && *bvlp; bvlp++ ) { + for ( w = first_word( (char*)slapi_value_get_string(*bvlp) ); + w != NULL; w = next_word( w ) ) { numbvals++; } } - nbvals = (Slapi_Value **) slapi_ch_malloc( (numbvals + 1) * sizeof(Slapi_Value *) ); + nbvals = (Slapi_Value **) slapi_ch_calloc( (numbvals + 1), sizeof(Slapi_Value *) ); n = 0; - for ( i = 0; bvals[i] != NULL; i++ ) { - for ( w = first_word( (char*)slapi_value_get_string(bvals[i]) ); w != NULL; - w = next_word( w ) ) { + nbvlp = nbvals; + for ( bvlp = bvals; bvlp && *bvlp; bvlp++ ) { + for ( w = first_word( (char*)slapi_value_get_string(*bvlp) ); + w != NULL; w = next_word( w ) ) { if ( (c = phonetic( w )) != NULL ) { - nbvals[n] = slapi_value_new_string_passin(c); - n++; + *nbvlp = slapi_value_new_string_passin(c); + nbvlp++; } } } - nbvals[n] = NULL; - if ( n == 0 ) { - slapi_ch_free((void**)ivals ); - return( 0 ); - } + /* even if (n == 0), we should return the array nbvals w/ NULL items */ *ivals = nbvals; break; @@ -370,9 +365,11 @@ string_values2keys( Slapi_PBlock *pb, Slapi_Value **bvals, { /* XXX should remove duplicates! XXX */ Slapi_Value *bvdup; - const struct berval *bvp; + const struct berval *bvp; + char buf[SUBLEN+1]; + int i; nsubs = 0; - for ( i = 0; bvals[i] != NULL; i++ ) { + for ( bvlp = bvals; bvlp && *bvlp; bvlp++ ) { /* * Note: this calculation may err on the high side, * because value_normalize(), which is called below @@ -384,27 +381,26 @@ string_values2keys( Slapi_PBlock *pb, Slapi_Value **bvals, * the only downside is that we allocate more space than * we really need. */ - nsubs += slapi_value_get_length(bvals[i]) - SUBLEN + 3; + nsubs += slapi_value_get_length(*bvlp) - SUBLEN + 3; } - *ivals = (Slapi_Value **) slapi_ch_malloc( (nsubs + 1) * sizeof(Slapi_Value *) ); + *ivals = (Slapi_Value **) slapi_ch_calloc( (nsubs + 1), sizeof(Slapi_Value *) ); buf[SUBLEN] = '\0'; n = 0; bvdup= slapi_value_new(); - for ( i = 0; bvals[i] != NULL; i++ ) - { - c = slapi_ch_strdup(slapi_value_get_string(bvals[i])); + for ( bvlp = bvals; bvlp && *bvlp; bvlp++ ) { + c = slapi_ch_strdup(slapi_value_get_string(*bvlp)); value_normalize( c, syntax, 1 /* trim leading blanks */ ); - slapi_value_set_string_passin(bvdup, c); + slapi_value_set_string_passin(bvdup, c); - bvp = slapi_value_get_berval(bvdup); + bvp = slapi_value_get_berval(bvdup); /* leading */ if ( bvp->bv_len > SUBLEN - 2 ) { buf[0] = '^'; - for ( j = 0; j < SUBLEN - 1; j++ ) { - buf[j + 1] = bvp->bv_val[j]; + for ( i = 0; i < SUBLEN - 1; i++ ) { + buf[i + 1] = bvp->bv_val[i]; } (*ivals)[n] = slapi_value_new_string(buf); n++; @@ -414,8 +410,8 @@ string_values2keys( Slapi_PBlock *pb, Slapi_Value **bvals, for ( p = bvp->bv_val; p < (bvp->bv_val + bvp->bv_len - SUBLEN + 1); p++ ) { - for ( j = 0; j < SUBLEN; j++ ) { - buf[j] = p[j]; + for ( i = 0; i < SUBLEN; i++ ) { + buf[i] = p[i]; } buf[SUBLEN] = '\0'; (*ivals)[n] = slapi_value_new_string(buf); @@ -425,8 +421,8 @@ string_values2keys( Slapi_PBlock *pb, Slapi_Value **bvals, /* trailing */ if ( bvp->bv_len > SUBLEN - 2 ) { p = bvp->bv_val + bvp->bv_len - SUBLEN + 1; - for ( j = 0; j < SUBLEN - 1; j++ ) { - buf[j] = p[j]; + for ( i = 0; i < SUBLEN - 1; i++ ) { + buf[i] = p[i]; } buf[SUBLEN - 1] = '$'; (*ivals)[n] = slapi_value_new_string(buf); @@ -434,7 +430,6 @@ string_values2keys( Slapi_PBlock *pb, Slapi_Value **bvals, } } slapi_value_free(&bvdup); - (*ivals)[n] = NULL; } break; } diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c index dab71e288..9da2389ca 100644 --- a/ldap/servers/slapd/back-ldbm/index.c +++ b/ldap/servers/slapd/back-ldbm/index.c @@ -628,9 +628,12 @@ index_add_mods( flags = BE_INDEX_DEL|BE_INDEX_PRESENCE|BE_INDEX_EQUALITY; } - /* Update the index */ - index_addordel_values_sv( be, mods[i]->mod_type, - deleted_valueArray, evals, id, flags, txn); + /* Update the index, if necessary */ + if (deleted_valueArray) { + index_addordel_values_sv( be, mods[i]->mod_type, + deleted_valueArray, evals, id, + flags, txn ); + } slapi_valueset_free(mod_vals); } else { @@ -645,18 +648,18 @@ index_add_mods( flags = BE_INDEX_DEL; } - /* If the same value doesn't exist in a subtype, set - * BE_INDEX_EQUALITY flag so the equality index is - * removed. - */ - slapi_entry_attr_find( olde->ep_entry, mods[i]->mod_type, &curr_attr); + /* If the same value doesn't exist in a subtype, set + * BE_INDEX_EQUALITY flag so the equality index is + * removed. + */ + slapi_entry_attr_find( olde->ep_entry, mods[i]->mod_type, &curr_attr); for (j = 0; mods_valueArray[j] != NULL; j++ ) { - if ( valuearray_find(curr_attr, evals, mods_valueArray[j]) == -1 ) { + if ( valuearray_find(curr_attr, evals, mods_valueArray[j]) == -1 ) { if (!(flags & BE_INDEX_EQUALITY)) { - flags |= BE_INDEX_EQUALITY; + flags |= BE_INDEX_EQUALITY; } } - } + } rc = index_addordel_values_sv( be, basetype, mods_valueArray,
0
d19eafcd211d89cffdac1b2c3432087443e7d122
389ds/389-ds-base
Added capability to validate syntax of values being added to the database. Also added numericstring syntax support. For more details, see the design doc at http://directory.fedoraproject.org/wiki/Syntax_Validation_Design
commit d19eafcd211d89cffdac1b2c3432087443e7d122 Author: Nathan Kinder <[email protected]> Date: Fri May 8 09:14:42 2009 -0700 Added capability to validate syntax of values being added to the database. Also added numericstring syntax support. For more details, see the design doc at http://directory.fedoraproject.org/wiki/Syntax_Validation_Design diff --git a/Makefile.am b/Makefile.am index ddfe01164..b9cdc1804 100644 --- a/Makefile.am +++ b/Makefile.am @@ -138,14 +138,22 @@ LIBBITWISE_PLUGIN = libbitwise-plugin.la enable_bitwise = 1 endif +if enable_presence +LIBPRESENCE_PLUGIN = libpresence-plugin.la +LIBPRESENCE_SCHEMA = $(srcdir)/ldap/schema/10presence.ldif +enable_presence = on +else +enable_presence = off +endif + serverplugin_LTLIBRARIES = libacl-plugin.la libattr-unique-plugin.la \ libback-ldbm.la libchainingdb-plugin.la libcos-plugin.la libdes-plugin.la \ libdistrib-plugin.la libhttp-client-plugin.la libcollation-plugin.la \ - libmemberof-plugin.la libpassthru-plugin.la libpresence-plugin.la \ - libpwdstorage-plugin.la libreferint-plugin.la libreplication-plugin.la \ - libretrocl-plugin.la libroles-plugin.la libstatechange-plugin.la \ - libsyntax-plugin.la libviews-plugin.la libschemareload-plugin.la \ - $(LIBPAM_PASSTHRU_PLUGIN) $(LIBDNA_PLUGIN) $(LIBBITWISE_PLUGIN) + libmemberof-plugin.la libpassthru-plugin.la libpwdstorage-plugin.la \ + libreferint-plugin.la libreplication-plugin.la libretrocl-plugin.la \ + libroles-plugin.la libstatechange-plugin.la libsyntax-plugin.la \ + libviews-plugin.la libschemareload-plugin.la $(LIBPAM_PASSTHRU_PLUGIN) \ + $(LIBDNA_PLUGIN) $(LIBBITWISE_PLUGIN) $(LIBPRESENCE_PLUGIN) nodist_property_DATA = ns-slapd.properties @@ -200,13 +208,13 @@ sampledata_DATA = $(srcdir)/ldap/ldif/Ace.ldif \ $(srcdir)/ldap/schema/60radius.ldif \ $(srcdir)/ldap/schema/60rfc4876.ldif \ $(srcdir)/ldap/schema/60samba.ldif \ - $(srcdir)/ldap/schema/60samba3.ldif + $(srcdir)/ldap/schema/60samba3.ldif \ + $(LIBPRESENCE_SCHEMA) schema_DATA = $(srcdir)/ldap/schema/00core.ldif \ $(srcdir)/ldap/schema/01common.ldif \ $(srcdir)/ldap/schema/05rfc2247.ldif \ $(srcdir)/ldap/schema/05rfc2927.ldif \ - $(srcdir)/ldap/schema/10presence.ldif \ $(srcdir)/ldap/schema/10rfc2307.ldif \ $(srcdir)/ldap/schema/20subscriber.ldif \ $(srcdir)/ldap/schema/25java-object.ldif \ @@ -295,6 +303,7 @@ task_SCRIPTS = ldap/admin/src/scripts/template-bak2db \ ldap/admin/src/scripts/template-ns-inactivate.pl \ ldap/admin/src/scripts/template-ns-newpwpolicy.pl \ ldap/admin/src/scripts/template-schema-reload.pl \ + ldap/admin/src/scripts/template-syntax-validate.pl \ ldap/admin/src/scripts/template-verify-db.pl \ ldap/admin/src/scripts/template-dbverify @@ -894,10 +903,13 @@ libsyntax_plugin_la_SOURCES = ldap/servers/plugins/syntaxes/bin.c \ ldap/servers/plugins/syntaxes/debug.c \ ldap/servers/plugins/syntaxes/dn.c \ ldap/servers/plugins/syntaxes/int.c \ + ldap/servers/plugins/syntaxes/numericstring.c \ ldap/servers/plugins/syntaxes/phonetic.c \ ldap/servers/plugins/syntaxes/sicis.c \ ldap/servers/plugins/syntaxes/string.c \ ldap/servers/plugins/syntaxes/tel.c \ + ldap/servers/plugins/syntaxes/validate.c \ + ldap/servers/plugins/syntaxes/validate_task.c \ ldap/servers/plugins/syntaxes/value.c libsyntax_plugin_la_CPPFLAGS = $(PLUGIN_CPPFLAGS) @@ -1149,6 +1161,7 @@ fixupcmd = sed \ -e 's,@enable_dna\@,$(enable_dna),g' \ -e 's,@enable_autobind\@,$(enable_autobind),g' \ -e 's,@enable_auto_dn_suffix\@,$(enable_auto_dn_suffix),g' \ + -e 's,@enable_presence\@,$(enable_presence),g' \ -e 's,@ECHO_N\@,$(ECHO_N),g' \ -e 's,@ECHO_C\@,$(ECHO_C),g' \ -e 's,@brand\@,$(brand),g' \ @@ -1199,6 +1212,7 @@ fixupcmd = sed \ -e 's,@enable_dna\@,$(enable_dna),g' \ -e 's,@enable_autobind\@,$(enable_autobind),g' \ -e 's,@enable_auto_dn_suffix\@,$(enable_auto_dn_suffix),g' \ + -e 's,@enable_presence\@,$(enable_presence),g' \ -e 's,@ECHO_N\@,$(ECHO_N),g' \ -e 's,@ECHO_C\@,$(ECHO_C),g' \ -e 's,@brand\@,$(brand),g' \ diff --git a/config.h.in b/config.h.in index e3175ca99..981e8157c 100644 --- a/config.h.in +++ b/config.h.in @@ -39,6 +39,9 @@ /* enable the pam passthru auth plugin */ #undef ENABLE_PAM_PASSTHRU +/* enable the presence plugin */ +#undef ENABLE_PRESENCE + /* Define to 1 if you have the <arpa/inet.h> header file. */ #undef HAVE_ARPA_INET_H diff --git a/configure.ac b/configure.ac index 962617297..57dd54d20 100644 --- a/configure.ac +++ b/configure.ac @@ -167,6 +167,21 @@ else fi AM_CONDITIONAL(enable_bitwise,test "$enable_bitwise" = "yes") +if test -z "$enable_presence" ; then + enable_presence=no # if not set on cmdline, set default +fi +AC_MSG_CHECKING(for --enable-presence) +AC_ARG_ENABLE(presence, + AS_HELP_STRING([--enable-presence], + [enable the presence plugin (default: no)])) +if test "$enable_presence" = yes ; then + AC_MSG_RESULT(yes) + AC_DEFINE([ENABLE_PRESENCE], [1], [enable the presence plugin]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL(enable_presence,test "$enable_presence" = "yes") + # the default prefix - override with --prefix or --with-fhs AC_PREFIX_DEFAULT([/opt/$PACKAGE_NAME]) diff --git a/ldap/admin/src/scripts/template-syntax-validate.pl.in b/ldap/admin/src/scripts/template-syntax-validate.pl.in new file mode 100644 index 000000000..4e4fa7417 --- /dev/null +++ b/ldap/admin/src/scripts/template-syntax-validate.pl.in @@ -0,0 +1,163 @@ +#{{PERL-EXEC}} +# +# BEGIN COPYRIGHT BLOCK +# This Program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation; version 2 of the License. +# +# This Program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# this Program; if not, write to the Free Software Foundation, Inc., 59 Temple +# Place, Suite 330, Boston, MA 02111-1307 USA. +# +# In addition, as a special exception, Red Hat, Inc. gives You the additional +# right to link the code of this Program with code not covered under the GNU +# General Public License ("Non-GPL Code") and to distribute linked combinations +# including the two, subject to the limitations in this paragraph. Non-GPL Code +# permitted under this exception must only link to the code of this Program +# through those well defined interfaces identified in the file named EXCEPTION +# found in the source code files (the "Approved Interfaces"). The files of +# Non-GPL Code may instantiate templates or use macros or inline functions from +# the Approved Interfaces without causing the resulting work to be covered by +# the GNU General Public License. Only Red Hat, Inc. may make changes or +# additions to the list of Approved Interfaces. You must obey the GNU General +# Public License in all respects for all of the Program code and other code used +# in conjunction with the Program except the Non-GPL Code covered by this +# exception. If you modify this file, you may extend this exception to your +# version of the file, but you are not obligated to do so. If you do not wish to +# provide this exception without modification, you must delete this exception +# statement from your version and license this file solely under the GPL without +# exception. +# +# +# Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +# Copyright (C) 2009 Red Hat, Inc. +# All rights reserved. +# END COPYRIGHT BLOCK +# + +sub usage { + print(STDERR "Usage: $0 [-v] -D rootdn { -w password | -w - | -j filename } \n"); + print(STDERR " -b baseDN [-f filter]\n"); + print(STDERR " Opts: -D rootdn - Directory Manager.\n"); + print(STDERR " : -w password - Directory Manager's password.\n"); + print(STDERR " : -w - - Prompt for Directory Manager's password.\n"); + print(STDERR " : -j filename - Read Directory Manager's password from file.\n"); + print(STDERR " : -b baseDN - Base DN that contains entries to validate.\n"); + print(STDERR " : -f filter - Filter for entries to validate.\n"); + print(STDERR " If omitted, all entries under the specified\n"); + print(STDERR " base will have their attribute values\n"); + print(STDERR " validated.\n"); + print(STDERR " : -v - Verbose.\n"); +} + +$rootdn = ""; +$passwd = ""; +$passwdfile = ""; +$basedn_arg = ""; +$filter_arg = ""; +$filter = ""; +$verbose = 0; + +$prefix = "{{DS-ROOT}}"; + +$ENV{'PATH'} = "$prefix@ldapsdk_bindir@:$prefix/usr/bin:@ldapsdk_bindir@:/usr/bin"; +$ENV{'LD_LIBRARY_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; +$ENV{'SHLIB_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib"; + +$i = 0; +while ($i <= $#ARGV) +{ + if ("$ARGV[$i]" eq "-b") + { + # base DN + $i++; $basedn_arg = $ARGV[$i]; + } + elsif ("$ARGV[$i]" eq "-f") + { + # filter + $i++; $filter_arg = $ARGV[$i]; + } + elsif ("$ARGV[$i]" eq "-D") + { + # Directory Manager + $i++; $rootdn = $ARGV[$i]; + } + elsif ("$ARGV[$i]" eq "-w") + { + # Directory Manager's password + $i++; $passwd = $ARGV[$i]; + } + elsif ("$ARGV[$i]" eq "-j") + { + # Read Directory Manager's password from a file + $i++; $passwdfile = $ARGV[$i]; + } + elsif ("$ARGV[$i]" eq "-v") + { + # verbose + $verbose = 1; + } + else + { + &usage; exit(1); + } + $i++; +} + +if ($passwdfile ne ""){ +# Open file and get the password + unless (open (RPASS, $passwdfile)) { + die "Error, cannot open password file $passwdfile\n"; + } + $passwd = <RPASS>; + chomp($passwd); + close(RPASS); +} elsif ($passwd eq "-"){ +# Read the password from terminal + print "Bind Password: "; + # Disable console echo + system("stty -echo"); + # read the answer + $passwd = <STDIN>; + # Enable console echo + system("stty echo"); + print "\n"; + chop($passwd); # trim trailing newline +} + +if ( $rootdn eq "" || $passwd eq "" || $basedn_arg eq "" ) +{ + &usage; + exit(1); +} + +$vstr = ""; +if ($verbose != 0) +{ + $vstr = "-v"; +} + +# Use a timestamp as part of the task entry name +($s, $m, $h, $dy, $mn, $yr, $wdy, $ydy, $r) = localtime(time); +$mn++; $yr += 1900; +$taskname = "syntax_validate_${yr}_${mn}_${dy}_${h}_${m}_${s}"; + +# Build the task entry to add +$dn = "dn: cn=$taskname, cn=syntax validate, cn=tasks, cn=config\n"; +$misc = "changetype: add\nobjectclass: top\nobjectclass: extensibleObject\n"; +$cn = "cn: $taskname\n"; +$basedn = "basedn: $basedn_arg\n"; + +if ( $filter_arg ne "" ) +{ + $filter = "filter: $filter_arg\n"; +} + +$entry = "${dn}${misc}${cn}${basedn}${filter}"; +open(FOO, "| ldapmodify $vstr -h {{SERVER-NAME}} -p {{SERVER-PORT}} -D \"$rootdn\" -w \"$passwd\" -a" ); +print(FOO "$entry"); +close(FOO); diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in index 36a556637..232d9f2e7 100644 --- a/ldap/ldif/template-dse.ldif.in +++ b/ldap/ldif/template-dse.ldif.in @@ -24,6 +24,7 @@ nsslapd-accesslog: %log_dir%/access nsslapd-enquote-sup-oc: off nsslapd-localhost: %fqdn% nsslapd-schemacheck: on +nsslapd-syntaxcheck: on nsslapd-rewrite-rfc1274: off nsslapd-return-exact-case: on nsslapd-ssl-check-hostname: on @@ -181,6 +182,16 @@ nsslapd-pluginarg0: nsmultiplexorcredentials nsslapd-pluginarg1: nsds5ReplicaCredentials nsslapd-pluginid: des-storage-scheme +dn: cn=Syntax Validation Task,cn=plugins,cn=config +objectclass: top +objectclass: nsSlapdPlugin +objectclass: extensibleObject +cn: Syntax Validation Task +nsslapd-pluginpath: libsyntax-plugin +nsslapd-plugininitfunc: syntax_validate_task_init +nsslapd-plugintype: object +nsslapd-pluginenabled: on + dn: cn=Case Ignore String Syntax,cn=plugins,cn=config objectclass: top objectclass: nsSlapdPlugin @@ -219,7 +230,7 @@ cn: Space Insensitive String Syntax nsslapd-pluginpath: libsyntax-plugin nsslapd-plugininitfunc: sicis_init nsslapd-plugintype: syntax -nsslapd-pluginenabled: on +nsslapd-pluginenabled: @enable_presence@ dn: cn=Binary Syntax,cn=plugins,cn=config objectclass: top @@ -309,7 +320,7 @@ cn: URI Syntax nsslapd-pluginpath: libsyntax-plugin nsslapd-plugininitfunc: uri_init nsslapd-plugintype: syntax -nsslapd-pluginenabled: on +nsslapd-pluginenabled: off dn: cn=JPEG Syntax,cn=plugins,cn=config objectclass: top @@ -341,6 +352,16 @@ nsslapd-plugininitfunc: postal_init nsslapd-plugintype: syntax nsslapd-pluginenabled: on +dn: cn=Numeric String Syntax,cn=plugins,cn=config +objectclass: top +objectclass: nsSlapdPlugin +objectclass: extensibleObject +cn: Numeric String Syntax +nsslapd-pluginpath: libsyntax-plugin +nsslapd-plugininitfunc: numstr_init +nsslapd-plugintype: syntax +nsslapd-pluginenabled: on + dn: cn=State Change Plugin,cn=plugins,cn=config objectclass: top objectclass: nsSlapdPlugin diff --git a/ldap/schema/60mozilla.ldif b/ldap/schema/60mozilla.ldif index f01c3d676..e53e442e7 100644 --- a/ldap/schema/60mozilla.ldif +++ b/ldap/schema/60mozilla.ldif @@ -200,10 +200,10 @@ attributeTypes: ( ) # ################################################################################ -# nsAIMid is already defined in 10presence.ldif as 2.16.840.1.113730.3.1.2013 +# attributeTypes: ( 1.3.6.1.4.1.13769.2.4 - NAME ( 'nscpaimscreenname' ) + NAME ( 'nsAIMid' 'nscpaimscreenname' ) EQUALITY telephoneNumberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.50 diff --git a/ldap/servers/plugins/syntaxes/bin.c b/ldap/servers/plugins/syntaxes/bin.c index 6d6c76328..b7be0d1af 100644 --- a/ldap/servers/plugins/syntaxes/bin.c +++ b/ldap/servers/plugins/syntaxes/bin.c @@ -43,8 +43,7 @@ /* bin.c - bin syntax routines */ /* - * This file actually implements two syntax plugins: OctetString and Binary. - * We treat them identically for now. XXXmcs: check if that is correct. + * This file actually implements three syntax plugins: OctetString, JPEG, and Binary. */ #include <stdio.h> @@ -73,6 +72,9 @@ static char *octetstring_names[] = { "OctetString", OCTETSTRING_SYNTAX_OID, 0 }; static char *jpeg_names[] = { "JPEG", JPEG_SYNTAX_OID, 0 }; +/* This syntax has "gone away" in RFC 4517, however we still use it for + * a number of attributes in our default schema. We should try to eliminate + * it's use and remove support for it. */ static Slapi_PluginDesc bin_pdesc = { "bin-syntax", PLUGIN_MAGIC_VENDOR_STR, PRODUCTTEXT, "binary attribute syntax plugin" diff --git a/ldap/servers/plugins/syntaxes/ces.c b/ldap/servers/plugins/syntaxes/ces.c index a7ffee5a5..68b642f0e 100644 --- a/ldap/servers/plugins/syntaxes/ces.c +++ b/ldap/servers/plugins/syntaxes/ces.c @@ -40,7 +40,9 @@ # include <config.h> #endif -/* ces.c - caseexactstring syntax routines */ +/* ces.c - caseexactstring syntax routines. Implements support for: + * - IA5String + * - URI (DEPRECATED - This is non-standard and isn't used in the default schema.) */ #include <stdio.h> #include <string.h> @@ -58,6 +60,7 @@ static int ces_assertion2keys_ava( Slapi_PBlock *pb, Slapi_Value *val, static int ces_assertion2keys_sub( Slapi_PBlock *pb, char *initial, char **any, char *final, Slapi_Value ***ivals ); static int ces_compare(struct berval *v1, struct berval *v2); +static int ia5_validate(struct berval *val); /* the first name is the official one from RFC 2252 */ static char *ia5_names[] = { "IA5String", "ces", "caseexactstring", @@ -78,7 +81,7 @@ static Slapi_PluginDesc uri_pdesc = { "uri-syntax", PLUGIN_MAGIC_VENDOR_STR, */ static int register_ces_like_plugin( Slapi_PBlock *pb, Slapi_PluginDesc *pdescp, - char **names, char *oid ) + char **names, char *oid, void *validate_fn ) { int rc, flags; @@ -105,6 +108,10 @@ register_ces_like_plugin( Slapi_PBlock *pb, Slapi_PluginDesc *pdescp, (void *) oid ); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_COMPARE, (void *) ces_compare ); + if (validate_fn != NULL) { + rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_VALIDATE, + (void *)validate_fn ); + } return( rc ); } @@ -116,7 +123,7 @@ ces_init( Slapi_PBlock *pb ) LDAPDebug( LDAP_DEBUG_PLUGIN, "=> ces_init\n", 0, 0, 0 ); - rc = register_ces_like_plugin(pb,&ia5_pdesc,ia5_names,IA5STRING_SYNTAX_OID); + rc = register_ces_like_plugin(pb,&ia5_pdesc,ia5_names,IA5STRING_SYNTAX_OID, ia5_validate); LDAPDebug( LDAP_DEBUG_PLUGIN, "<= ces_init %d\n", rc, 0, 0 ); return( rc ); @@ -130,7 +137,7 @@ uri_init( Slapi_PBlock *pb ) LDAPDebug( LDAP_DEBUG_PLUGIN, "=> uri_init\n", 0, 0, 0 ); rc = register_ces_like_plugin(pb,&uri_pdesc,uri_names, - "1.3.6.1.4.1.4401.1.1.1"); + "1.3.6.1.4.1.4401.1.1.1", NULL); LDAPDebug( LDAP_DEBUG_PLUGIN, "<= uri_init %d\n", rc, 0, 0 ); return( rc ); @@ -203,3 +210,31 @@ static int ces_compare( { return value_cmp(v1,v2,SYNTAX_CES,3 /* Normalise both values */); } + +static int +ia5_validate( + struct berval *val +) +{ + int rc = 0; /* assume the value is valid */ + int i = 0; + + if (val == NULL) { + rc = 1; + goto exit; + } + + /* Per RFC 4517: + * + * IA5String = *(%x00-7F) + */ + for (i=0; i < val->bv_len; i++) { + if (!IS_UTF1(val->bv_val[i])) { + rc = 1; + goto exit; + } + } + +exit: + return rc; +} diff --git a/ldap/servers/plugins/syntaxes/cis.c b/ldap/servers/plugins/syntaxes/cis.c index 20b990dc4..f20ae5eb6 100644 --- a/ldap/servers/plugins/syntaxes/cis.c +++ b/ldap/servers/plugins/syntaxes/cis.c @@ -43,13 +43,15 @@ /* cis.c - caseignorestring syntax routines */ /* - * This file actually implements three syntax plugins: - * DirectoryString + * This file actually implements numerous syntax plugins: + * * Boolean + * CountryString + * DirectoryString * GeneralizedTime + * OID + * PostalAddress * - * We treat them identically for now. XXXmcs: we could do some validation on - * Boolean and GeneralizedTime values (someday, maybe). */ #include <stdio.h> @@ -68,6 +70,12 @@ static int cis_assertion2keys_ava( Slapi_PBlock *pb, Slapi_Value *val, static int cis_assertion2keys_sub( Slapi_PBlock *pb, char *initial, char **any, char *final, Slapi_Value ***ivals ); static int cis_compare(struct berval *v1, struct berval *v2); +static int dirstring_validate(struct berval *val); +static int boolean_validate(struct berval *val); +static int time_validate(struct berval *val); +static int country_validate(struct berval *val); +static int postal_validate(struct berval *val); +static int oid_validate(struct berval *val); /* * Attribute syntaxes. We treat all of these the same for now, even though @@ -170,7 +178,7 @@ static Slapi_PluginDesc oid_pdesc = { "oid-syntax", */ static int register_cis_like_plugin( Slapi_PBlock *pb, Slapi_PluginDesc *pdescp, - char **names, char *oid ) + char **names, char *oid, void *validate_fn ) { int rc, flags; @@ -197,11 +205,14 @@ register_cis_like_plugin( Slapi_PBlock *pb, Slapi_PluginDesc *pdescp, (void *) oid ); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_COMPARE, (void *) cis_compare ); + if (validate_fn != NULL) { + rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_VALIDATE, + (void *)validate_fn ); + } return( rc ); } - int cis_init( Slapi_PBlock *pb ) { @@ -209,7 +220,7 @@ cis_init( Slapi_PBlock *pb ) LDAPDebug( LDAP_DEBUG_PLUGIN, "=> cis_init\n", 0, 0, 0 ); rc = register_cis_like_plugin( pb, &dirstring_pdesc, dirstring_names, - DIRSTRING_SYNTAX_OID ); + DIRSTRING_SYNTAX_OID, dirstring_validate ); LDAPDebug( LDAP_DEBUG_PLUGIN, "<= cis_init %d\n", rc, 0, 0 ); return( rc ); } @@ -222,12 +233,11 @@ boolean_init( Slapi_PBlock *pb ) LDAPDebug( LDAP_DEBUG_PLUGIN, "=> boolean_init\n", 0, 0, 0 ); rc = register_cis_like_plugin( pb, &boolean_pdesc, boolean_names, - BOOLEAN_SYNTAX_OID ); + BOOLEAN_SYNTAX_OID, boolean_validate ); LDAPDebug( LDAP_DEBUG_PLUGIN, "<= boolean_init %d\n", rc, 0, 0 ); return( rc ); } - int time_init( Slapi_PBlock *pb ) { @@ -235,7 +245,7 @@ time_init( Slapi_PBlock *pb ) LDAPDebug( LDAP_DEBUG_PLUGIN, "=> time_init\n", 0, 0, 0 ); rc = register_cis_like_plugin( pb, &time_pdesc, time_names, - GENERALIZEDTIME_SYNTAX_OID ); + GENERALIZEDTIME_SYNTAX_OID, time_validate ); /* also register this plugin for matching rules */ rc |= slapi_matchingrule_register(&generalizedTimeMatch); rc |= slapi_matchingrule_register(&generalizedTimeOrderingMatch); @@ -250,7 +260,7 @@ country_init( Slapi_PBlock *pb ) LDAPDebug( LDAP_DEBUG_PLUGIN, "=> country_init\n", 0, 0, 0 ); rc = register_cis_like_plugin( pb, &country_pdesc, country_names, - COUNTRYSTRING_SYNTAX_OID ); + COUNTRYSTRING_SYNTAX_OID, country_validate ); LDAPDebug( LDAP_DEBUG_PLUGIN, "<= country_init %d\n", rc, 0, 0 ); return( rc ); } @@ -262,7 +272,7 @@ postal_init( Slapi_PBlock *pb ) LDAPDebug( LDAP_DEBUG_PLUGIN, "=> postal_init\n", 0, 0, 0 ); rc = register_cis_like_plugin( pb, &postal_pdesc, postal_names, - POSTALADDRESS_SYNTAX_OID ); + POSTALADDRESS_SYNTAX_OID, postal_validate ); LDAPDebug( LDAP_DEBUG_PLUGIN, "<= postal_init %d\n", rc, 0, 0 ); return( rc ); } @@ -274,7 +284,7 @@ oid_init( Slapi_PBlock *pb ) int rc; LDAPDebug( LDAP_DEBUG_PLUGIN, "=> oid_init\n", 0, 0, 0 ); - rc = register_cis_like_plugin( pb, &oid_pdesc, oid_names, OID_SYNTAX_OID ); + rc = register_cis_like_plugin( pb, &oid_pdesc, oid_names, OID_SYNTAX_OID, oid_validate ); LDAPDebug( LDAP_DEBUG_PLUGIN, "<= oid_init %d\n", rc, 0, 0 ); return( rc ); } @@ -349,3 +359,449 @@ static int cis_compare( { return value_cmp(v1,v2,SYNTAX_CIS,3 /* Normalise both values */); } + +static int dirstring_validate( + struct berval *val +) +{ + int rc = 0; /* assume the value is valid */ + char *p = NULL; + char *end = NULL; + + /* Per RFC4517: + * + * DirectoryString = 1*UTF8 + */ + if ((val != NULL) && (val->bv_len > 0)) { + p = val->bv_val; + end = &(val->bv_val[val->bv_len - 1]); + rc = utf8string_validate(p, end, NULL); + } else { + rc = 1; + goto exit; + } + +exit: + return( rc ); +} + +static int boolean_validate( + struct berval *val +) +{ + int rc = 0; /* assume the value is valid */ + + /* Per RFC4517: + * + * Boolean = "TRUE" / "FALSE" + */ + if (val != NULL) { + if (val->bv_len == 4) { + if (strncmp(val->bv_val, "TRUE", 4) != 0) { + rc = 1; + goto exit; + } + } else if (val->bv_len == 5) { + if (strncmp(val->bv_val, "FALSE", 5) != 0) { + rc = 1; + goto exit; + } + } else { + rc = 1; + goto exit; + } + } else { + rc = 1; + } + +exit: + return(rc); +} + +static int time_validate( + struct berval *val +) +{ + int rc = 0; /* assume the value is valid */ + int i = 0; + const char *p = NULL; + char *end = NULL; + + /* Per RFC4517: + * + * GeneralizedTime = century year month day hour + * [ minute [ second / leap-second ] ] + * [ fraction ] + * g-time-zone + * + * century = 2(%x30-39) ; "00" to "99" + * year = 2(%x30-39) ; "00" to "99" + * month = ( %x30 %x31-39 ) ; "01" (January) to "09" + * / ( %x31 %x30-32 ) ; "10 to "12" + * day = ( %x30 %x31-39 ) ; "01" to "09" + * / ( %x31-x32 %x30-39 ) ; "10" to "29" + * / ( %x33 %x30-31 ) ; "30" to "31" + * hour = ( %x30-31 %x30-39 ) / ( %x32 %x30-33 ) ; "00" to "23" + * minute = %x30-35 %x30-39 ; "00" to "59" + * + * second = ( %x30-35 - %x30-39 ) ; "00" to "59" + * leap-second = ( %x36 %x30 ) ; "60" + * + * fraction = ( DOT / COMMA ) 1*(%x30-39) + * g-time-zone = %x5A ; "Z" + * / g-differential + * g-differential = ( MINUS / PLUS ) hour [ minute ] + */ + if (val != NULL) { + /* A valid GeneralizedTime should be at least 11 characters. There + * is no upper bound due to the variable length of "fraction". */ + if (val->bv_len < 11) { + rc = 1; + goto exit; + } + + /* We're guaranteed that the value is at least 11 characters, so we + * don't need to bother checking if we're at the end of the value + * until we start processing the "minute" part of the value. */ + p = val->bv_val; + end = &(val->bv_val[val->bv_len - 1]); + + /* Process "century year". First 4 characters can be any valid digit. */ + for (i=0; i<4; i++) { + if (!isdigit(*p)) { + rc = 1; + goto exit; + } + p++; + } + + /* Process "month". Next character can be "0" or "1". */ + if (*p == '0') { + p++; + /* any LDIGIT is valid now */ + if (!IS_LDIGIT(*p)) { + rc = 1; + goto exit; + } + p++; + } else if (*p == '1') { + p++; + /* only "0"-"2" are valid now */ + if ((*p < '0') || (*p > '2')) { + rc = 1; + goto exit; + } + p++; + } else { + rc = 1; + goto exit; + } + + /* Process "day". Next character can be "0"-"3". */ + if (*p == '0') { + p++; + /* any LDIGIT is valid now */ + if (!IS_LDIGIT(*p)) { + rc = 1; + goto exit; + } + p++; + } else if ((*p == '1') || (*p == '2')) { + p++; + /* any digit is valid now */ + if (!isdigit(*p)) { + rc = 1; + goto exit; + } + p++; + } else if (*p == '3') { + p++; + /* only "0"-"1" are valid now */ + if ((*p != '0') && (*p != '1')) { + rc = 1; + goto exit; + } + p++; + } else { + rc = 1; + goto exit; + } + + /* Process "hour". Next character can be "0"-"2". */ + if ((*p == '0') || (*p == '1')) { + p++; + /* any digit is valid now */ + if (!isdigit(*p)) { + rc = 1; + goto exit; + } + p++; + } else if (*p == '2') { + p++; + /* only "0"-"3" are valid now */ + if ((*p < '0') || (*p > '3')) { + rc = 1; + goto exit; + } + p++; + } else { + rc = 1; + goto exit; + } + + /* Time for the optional stuff. We know we have at least one character here, but + * we need to start checking for the end of the string afterwards. + * + * See if a "minute" was specified. */ + if ((*p >= '0') && (*p <= '5')) { + p++; + /* any digit is valid for the second char of a minute */ + if ((p > end) || (!isdigit(*p))) { + rc = 1; + goto exit; + } + p++; + + /* At this point, there has to at least be a "g-time-zone" left. + * Make sure we're not at the end of the string. */ + if (p > end) { + rc = 1; + goto exit; + } + + /* See if a "second" or "leap-second" was specified. */ + if ((*p >= '0') && (*p <= '5')) { + p++; + /* any digit is valid now */ + if ((p > end) || (!isdigit(*p))) { + rc = 1; + goto exit; + } + p++; + } else if (*p == '6') { + p++; + /* only a '0' is valid now */ + if ((p > end) || (*p != '0')) { + rc = 1; + goto exit; + } + p++; + } + + /* At this point, there has to at least be a "g-time-zone" left. + * Make sure we're not at the end of the string. */ + if (p > end) { + rc = 1; + goto exit; + } + } + + /* See if a fraction was specified. */ + if ((*p == '.') || (*p == ',')) { + p++; + /* An arbitrary length string of digit chars is allowed here. + * Ensure we have at least one digit character. */ + if ((p >= end) || (!isdigit(*p))) { + rc = 1; + goto exit; + } + + /* Just loop through the rest of the fraction until we encounter a non-digit */ + p++; + while ((p < end) && (isdigit(*p))) { + p++; + } + } + + /* Process "g-time-zone". We either end with 'Z', or have a differential. */ + if (p == end) { + if (*p != 'Z') { + rc = 1; + goto exit; + } + } else if (p < end) { + if ((*p != '-') && (*p != '+')) { + rc = 1; + goto exit; + } else { + /* A "g-differential" was specified. An "hour" must be present now. */ + p++; + if ((*p == '0') || (*p == '1')) { + p++; + /* any digit is valid now */ + if ((p > end) || !isdigit(*p)) { + rc = 1; + goto exit; + } + p++; + } else if (*p == '2') { + p++; + /* only "0"-"3" are valid now */ + if ((p > end) || (*p < '0') || (*p > '3')) { + rc = 1; + goto exit; + } + p++; + } else { + rc = 1; + goto exit; + } + + /* See if an optional minute is present ("00"-"59"). */ + if (p <= end) { + /* "0"-"5" are valid now */ + if ((*p < '0') || (*p > '5')) { + rc = 1; + goto exit; + } + p++; + + /* We should be at the last character of the string + * now, which must be a valid digit. */ + if ((p != end) || !isdigit(*p)) { + rc = 1; + goto exit; + } + } + } + } else { + /* Premature end of string */ + rc = 1; + goto exit; + } + } else { + rc = 1; + goto exit; + } + +exit: + return( rc ); +} + +static int country_validate( + struct berval *val +) +{ + int rc = 0; /* assume the value is valid */ + + /* Per RFC4517: + * + * CountryString = 2(PrintableCharacter) + */ + if (val != NULL) { + if ((val->bv_len != 2) || !IS_PRINTABLE(val->bv_val[0]) || !IS_PRINTABLE(val->bv_val[1])) { + rc = 1; + goto exit; + } + + + } else { + rc = 1; + } + +exit: + return(rc); +} + +static int postal_validate( + struct berval *val +) +{ + int rc = 0; /* assume the value is valid */ + const char *p = NULL; + const char *start = NULL; + char *end = NULL; + + /* Per RFC4517: + * PostalAddress = line *( DOLLAR line ) + * line = 1*line-char + * line-char = %x00-23 + * / (%x5C "24") ; escaped "$" + * / %x25-5B + * / (%x5C "5C") ; escaped "\" + * / %x5D-7F + * / UTFMB + */ + if (val != NULL) { + start = val->bv_val; + end = &(val->bv_val[val->bv_len - 1]); + for (p = start; p <= end; p++) { + /* look for a '\' and make sure it's only used to escape a '$' or a '\' */ + if (*p == '\\') { + p++; + /* ensure that we're not at the end of the value */ + if ((p > end) || (strncmp(p, "24", 2) != 0) && (strncasecmp(p, "5C", 2) != 0)) { + rc = 1; + goto exit; + } else { + /* advance the pointer to point to the end + * of the hex code for the escaped character */ + p++; + } + } else if (*p == '$') { + /* This signifies the end of a line. We need + * to ensure that the line is not empty. */ + if (p == start) { + rc = 1; + goto exit; + } + + /* make sure the value doesn't end with a '$' */ + if (p == end) { + rc = 1; + goto exit; + } + + /* Make sure the line (start to p) is valid UTF-8. */ + if ((rc = utf8string_validate(start, p, NULL)) != 0) { + goto exit; + } + + /* make the start pointer point to the + * beginning of the next line */ + start = p + 1; + } + } + } else { + rc = 1; + } + +exit: + return(rc); +} + +static int oid_validate( + struct berval *val +) +{ + int rc = 0; /* assume the value is valid */ + const char *p = NULL; + char *end = NULL; + + /* Per RFC4512: + * + * oid = descr / numericoid + * descr = keystring + */ + if ((val != NULL) && (val->bv_len > 0)) { + p = val->bv_val; + end = &(val->bv_val[val->bv_len - 1]); + + /* check if the value matches the descr form */ + if (IS_LEADKEYCHAR(*p)) { + rc = keystring_validate(p, end); + /* check if the value matches the numericoid form */ + } else if (isdigit(*p)) { + rc = numericoid_validate(p, end); + } else { + rc = 1; + goto exit; + } + } else { + rc = 1; + } + +exit: + return( rc ); +} + diff --git a/ldap/servers/plugins/syntaxes/dn.c b/ldap/servers/plugins/syntaxes/dn.c index c7d34758d..a6dcceda6 100644 --- a/ldap/servers/plugins/syntaxes/dn.c +++ b/ldap/servers/plugins/syntaxes/dn.c @@ -57,6 +57,8 @@ static int dn_assertion2keys_ava( Slapi_PBlock *pb, Slapi_Value *val, Slapi_Value ***ivals, int ftype ); static int dn_assertion2keys_sub( Slapi_PBlock *pb, char *initial, char **any, char *final, Slapi_Value ***ivals ); +static int dn_validate( struct berval *val ); +static int rdn_validate( char *begin, char *end, char **last ); /* the first name is the official one from RFC 2252 */ static char *names[] = { "DN", DN_SYNTAX_OID, 0 }; @@ -89,6 +91,8 @@ dn_init( Slapi_PBlock *pb ) (void *) names ); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_OID, (void *) DN_SYNTAX_OID ); + rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_VALIDATE, + (void *) dn_validate ); LDAPDebug( LDAP_DEBUG_PLUGIN, "<= dn_init %d\n", rc, 0, 0 ); return( rc ); @@ -133,3 +137,214 @@ dn_assertion2keys_sub( Slapi_PBlock *pb, char *initial, char **any, char *final, return( string_assertion2keys_sub( pb, initial, any, final, ivals, SYNTAX_CIS | SYNTAX_DN ) ); } + +static int dn_validate( struct berval *val ) +{ + int rc = 0; /* Assume value is valid */ + + if (val != NULL) { + /* Per RFC 4514: + * + * distinguishedName = [ relativeDistinguishedName + * *( COMMA relativeDistinguishedName ) ] + * relativeDistinguishedName = attributeTypeAndValue + * *( PLUS attributeTypeAndValue ) + * attributeTypeAndValue = attribyteType EQUALS attributeValue + * attributeType = descr / numericoid + * attributeValue = string / hexstring + */ + if (val->bv_len > 0) { + char *p = val->bv_val; + char *end = &(val->bv_val[val->bv_len - 1]); + char *last = NULL; + + /* Validate one RDN at a time in a loop. */ + while (p <= end) { + if ((rc = rdn_validate(p, end, &last)) != 0) { + goto exit; + } + p = last + 1; + + /* p should be pointing at a comma, or one past + * the end of the entire dn value. If we have + * not reached the end, ensure that the next + * character is a comma and that there is at + * least another character after the comma. */ + if ((p <= end) && ((p == end) || (*p != ','))) { + rc = 1; + goto exit; + } + + /* Advance the pointer past the comma so it + * points at the beginning of the next RDN + * (if there is one). */ + p++; + } + } + } else { + rc = 1; + goto exit; + } +exit: + return rc; +} + +/* + * Helper function for validating a DN. This function will validate + * a single RDN. If the RDN is valid, 0 will be returned, otherwise + * non-zero will be returned. A pointer to the last character processed + * will be set in the "last parameter. This will be the end of the RDN + * in the valid case, and the illegal character in the invalid case. + */ +static int rdn_validate( char *begin, char *end, char **last ) +{ + int rc = 0; /* Assume RDN is valid */ + int numericform = 0; + char *separator = NULL; + char *p = begin; + + /* Find the '=', then use the helpers for descr and numericoid */ + if ((separator = PL_strnchr(p, '=', end - begin + 1)) == NULL) { + rc = 1; + goto exit; + } + + /* Process an attribute type. The 'descr' + * form must start with a 'leadkeychar'. */ + if (IS_LEADKEYCHAR(*p)) { + if (rc = keystring_validate(p, separator - 1)) { + goto exit; + } + /* See if the 'numericoid' form is being used */ + } else if (isdigit(*p)) { + numericform = 1; + if (rc = numericoid_validate(p, separator - 1)) { + goto exit; + } + } else { + rc = 1; + goto exit; + } + + /* Advance the pointer past the '=' and make sure + * we're not past the end of the string. */ + p = separator + 1; + if (p > end) { + rc = 1; + goto exit; + } + + /* The value must be a 'hexstring' if the 'numericoid' + * form of 'attributeType' is used. Per RFC 4514: + * + * hexstring = SHARP 1*hexpair + * hexpair = HEX HEX + */ + if (numericform) { + if ((p == end) || !IS_SHARP(*p)) { + rc = 1; + goto exit; + } + p++; + /* The value must be a 'string' when the 'descr' form + * of 'attributeType' is used. Per RFC 4514: + * + * string = [ ( leadchar / pair ) [ *( stringchar / pair ) + * ( trailchar / pair ) ] ] + * + * leadchar = LUTF1 / UTFMB + * trailchar = TUTF1 / UTFMB + * stringchar = SUTF1 / UTFMB + * + * pair = ESC (ESC / special / hexpair ) + * special = escaped / SPACE / SHARP / EQUALS + * escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE + * hexpair = HEX HEX + */ + } else { + /* Check the leadchar to see if anything illegal + * is there. We need to allow a 'pair' to get + * through, so we'll assume that a '\' is the + * start of a 'pair' for now. */ + if (IS_UTF1(*p) && !IS_ESC(*p) && !IS_LUTF1(*p)) { + rc = 1; + goto exit; + } + } + + /* Loop through string until we find the ',' separator, a '+' + * char indicating a multi-value RDN, or we reach the end. */ + while ((p <= end) && (*p != ',') && (*p != '+')) { + if (numericform) { + /* Process a single 'hexpair' */ + if ((p == end) || !isxdigit(*p) || !isxdigit(*p + 1)) { + rc = 1; + goto exit; + } + p = p + 2; + } else { + /* Check for a valid 'stringchar'. We handle + * multi-byte characters separately. */ + if (IS_UTF1(*p)) { + /* If we're at the end, check if we have + * a valid 'trailchar'. */ + if ((p == end) && !IS_TUTF1(*p)) { + rc = 1; + goto exit; + /* Check for a 'pair'. */ + } else if (IS_ESC(*p)) { + /* We're guaranteed to still have at + * least one more character, so lets + * take a look at it. */ + p++; + if (!IS_ESC(*p) && !IS_SPECIAL(*p)) { + /* The only thing valid now + * is a 'hexpair'. */ + if ((p == end) || !isxdigit(*p) ||!isxdigit(*p + 1)) { + rc = 1; + goto exit; + } + p++; + } + p++; + /* Only allow 'SUTF1' chars now. */ + } else if (!IS_SUTF1(*p)) { + rc = 1; + goto exit; + } + + p++; + } else { + /* Validate a single 'UTFMB' (multi-byte) character. */ + if (utf8char_validate(p, end, &p ) != 0) { + rc = 1; + goto exit; + } + + /* Advance the pointer past the multi-byte char. */ + p++; + } + } + } + + /* We'll end up either at the comma, a '+', or one past end. + * If we are processing a multi-valued RDN, we recurse to + * process the next 'attributeTypeAndValue'. */ + if ((p <= end) && (*p == '+')) { + /* Make sure that there is something after the '+'. */ + if (p == end) { + rc = 1; + goto exit; + } + p++; + + /* Recurse to process the next value. We need to reset p to + * ensure that last is set correctly for the original caller. */ + rc = rdn_validate( p, end, last ); + p = *last + 1; + } + +exit: + *last = p - 1; + return rc; +} diff --git a/ldap/servers/plugins/syntaxes/int.c b/ldap/servers/plugins/syntaxes/int.c index 73c879a7b..0372d3a6f 100644 --- a/ldap/servers/plugins/syntaxes/int.c +++ b/ldap/servers/plugins/syntaxes/int.c @@ -54,6 +54,7 @@ static int int_values2keys( Slapi_PBlock *pb, Slapi_Value **val, static int int_assertion2keys( Slapi_PBlock *pb, Slapi_Value *val, Slapi_Value ***ivals, int ftype ); static int int_compare(struct berval *v1, struct berval *v2); +static int int_validate(struct berval *val); /* the first name is the official one from RFC 2252 */ static char *names[] = { "INTEGER", "int", INTEGER_SYNTAX_OID, 0 }; @@ -101,6 +102,8 @@ int_init( Slapi_PBlock *pb ) (void *) INTEGER_SYNTAX_OID ); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_COMPARE, (void *) int_compare ); + rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_VALIDATE, + (void *) int_validate ); /* also register this plugin for matching rules */ rc |= slapi_matchingrule_register(&integerMatch); @@ -139,3 +142,56 @@ static int int_compare( { return value_cmp(v1, v2, SYNTAX_INT|SYNTAX_CES, 3 /* Normalise both values */); } + +/* return 0 if valid, non-0 if invalid */ +static int int_validate( + struct berval *val +) +{ + int rc = 0; /* assume the value is valid */ + char *p = NULL; + char *end = NULL; + + /* Per RFC4517: + * + * Integer = (HYPHEN LDIGIT *DIGIT) / number + * number = DIGIT / (LDIGIT 1*DIGIT) + */ + if ((val != NULL) && (val->bv_len > 0)) { + p = val->bv_val; + end = &(val->bv_val[val->bv_len - 1]); + + /* If the first character is HYPHEN, we need + * to make sure the next char is a LDIGIT. */ + if (*p == '-') { + p++; + if ((p > end) || !IS_LDIGIT(*p)) { + rc = 1; + goto exit; + } + p++; + } else if (*p == '0') { + /* 0 is allowed by itself, but not as + * a leading 0 before other digits */ + if (p != end) { + rc = 1; + } + + /* We're done here */ + goto exit; + } + + /* Now we can simply allow the rest to be DIGIT */ + for (; p <= end; p++) { + if (!isdigit(*p)) { + rc = 1; + goto exit; + } + } + } else { + rc = 1; + } + +exit: + return(rc); +} diff --git a/ldap/servers/plugins/syntaxes/numericstring.c b/ldap/servers/plugins/syntaxes/numericstring.c new file mode 100644 index 000000000..180f8f7db --- /dev/null +++ b/ldap/servers/plugins/syntaxes/numericstring.c @@ -0,0 +1,188 @@ +/** BEGIN COPYRIGHT BLOCK + * This Program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free Software + * Foundation; version 2 of the License. + * + * This Program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along with + * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place, Suite 330, Boston, MA 02111-1307 USA. + * + * In addition, as a special exception, Red Hat, Inc. gives You the additional + * right to link the code of this Program with code not covered under the GNU + * General Public License ("Non-GPL Code") and to distribute linked combinations + * including the two, subject to the limitations in this paragraph. Non-GPL Code + * permitted under this exception must only link to the code of this Program + * through those well defined interfaces identified in the file named EXCEPTION + * found in the source code files (the "Approved Interfaces"). The files of + * Non-GPL Code may instantiate templates or use macros or inline functions from + * the Approved Interfaces without causing the resulting work to be covered by + * the GNU General Public License. Only Red Hat, Inc. may make changes or + * additions to the list of Approved Interfaces. You must obey the GNU General + * Public License in all respects for all of the Program code and other code used + * in conjunction with the Program except the Non-GPL Code covered by this + * exception. If you modify this file, you may extend this exception to your + * version of the file, but you are not obligated to do so. If you do not wish to + * provide this exception without modification, you must delete this exception + * statement from your version and license this file solely under the GPL without + * exception. + * + * + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. + * Copyright (C) 2009 Red Hat, Inc. + * All rights reserved. + * END COPYRIGHT BLOCK **/ + +#ifdef HAVE_CONFIG_H +# include <config.h> +#endif + +/* numericstring.c - Numeric String syntax routines */ + +#include <stdio.h> +#include <string.h> +#include <sys/types.h> +#include "syntax.h" + +static int numstr_filter_ava( Slapi_PBlock *pb, struct berval *bvfilter, + Slapi_Value **bvals, int ftype, Slapi_Value **retVal ); +static int numstr_values2keys( Slapi_PBlock *pb, Slapi_Value **val, + Slapi_Value ***ivals, int ftype ); +static int numstr_assertion2keys( Slapi_PBlock *pb, Slapi_Value *val, + Slapi_Value ***ivals, int ftype ); +static int numstr_compare(struct berval *v1, struct berval *v2); +static int numstr_validate(struct berval *val); + +/* the first name is the official one from RFC 4517 */ +static char *names[] = { "Numeric String", "numstr", NUMERICSTRING_SYNTAX_OID, 0 }; + +#define NUMERICSTRINGMATCH_OID "2.5.13.8" +#define NUMERICSTRINGORDERINGMATCH_OID "2.5.13.9" +#define NUMERICSTRINGSUBSTRINGMATCH_OID "2.5.13.10" + +static Slapi_PluginDesc pdesc = { "numstr-syntax", PLUGIN_MAGIC_VENDOR_STR, + PRODUCTTEXT, "numeric string attribute syntax plugin" }; + +static Slapi_MatchingRuleEntry +numericStringMatch = { NUMERICSTRINGMATCH_OID, NULL /* no alias? */, + "numericStringMatch", "The rule evaluates to TRUE if and only if the prepared " + "attribute value character string and the prepared assertion value character " + "string have the same number of characters and corresponding characters have " + "the same code point.", + NUMERICSTRING_SYNTAX_OID, 0 /* not obsolete */ }; + +static Slapi_MatchingRuleEntry +numericStringOrderingMatch = { NUMERICSTRINGORDERINGMATCH_OID, NULL /* no alias? */, + "numericStringOrderingMatch", "The rule evaluates to TRUE if and only if, " + "in the code point collation order, the prepared attribute value character " + "string appears earlier than the prepared assertion value character string; " + "i.e., the attribute value is less than the assertion value.", + NUMERICSTRING_SYNTAX_OID, 0 /* not obsolete */ }; + +static Slapi_MatchingRuleEntry +numericStringSubstringMatch = { NUMERICSTRINGSUBSTRINGMATCH_OID, NULL /* no alias? */, + "numericStringSubstringMatch", "The rule evaluates to TRUE if and only if (1) " + "the prepared substrings of the assertion value match disjoint portions of " + "the prepared attribute value, (2) an initial substring, if present, matches " + "the beginning of the prepared attribute value character string, and (3) a " + "final substring, if present, matches the end of the prepared attribute value " + "character string.", + NUMERICSTRING_SYNTAX_OID, 0 /* not obsolete */ }; + +int +numstr_init( Slapi_PBlock *pb ) +{ + int rc, flags; + + LDAPDebug( LDAP_DEBUG_PLUGIN, "=> numstr_init\n", 0, 0, 0 ); + + rc = slapi_pblock_set( pb, SLAPI_PLUGIN_VERSION, + (void *) SLAPI_PLUGIN_VERSION_01 ); + rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_DESCRIPTION, + (void *)&pdesc ); + rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_FILTER_AVA, + (void *) numstr_filter_ava ); + rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_VALUES2KEYS, + (void *) numstr_values2keys ); + rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_ASSERTION2KEYS_AVA, + (void *) numstr_assertion2keys ); + flags = SLAPI_PLUGIN_SYNTAX_FLAG_ORDERING; + rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_FLAGS, + (void *) &flags ); + rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_NAMES, + (void *) names ); + rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_OID, + (void *) INTEGER_SYNTAX_OID ); + rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_COMPARE, + (void *) numstr_compare ); + rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_VALIDATE, + (void *) numstr_validate ); + + /* also register this plugin for matching rules */ + rc |= slapi_matchingrule_register(&numericStringMatch); + rc |= slapi_matchingrule_register(&numericStringOrderingMatch); + rc |= slapi_matchingrule_register(&numericStringSubstringMatch); + + LDAPDebug( LDAP_DEBUG_PLUGIN, "<= numstr_init %d\n", rc, 0, 0 ); + return( rc ); +} + +static int +numstr_filter_ava( Slapi_PBlock *pb, struct berval *bvfilter, + Slapi_Value **bvals, int ftype, Slapi_Value **retVal ) +{ + return( string_filter_ava( bvfilter, bvals, SYNTAX_SI | SYNTAX_CES, + ftype, retVal ) ); +} + +static int +numstr_values2keys( Slapi_PBlock *pb, Slapi_Value **vals, Slapi_Value ***ivals, int ftype ) +{ + return( string_values2keys( pb, vals, ivals, SYNTAX_SI | SYNTAX_CES, + ftype ) ); +} + +static int +numstr_assertion2keys( Slapi_PBlock *pb, Slapi_Value *val, Slapi_Value ***ivals, int ftype ) +{ + return(string_assertion2keys_ava( pb, val, ivals, + SYNTAX_SI | SYNTAX_CES, ftype )); +} + +static int numstr_compare( + struct berval *v1, + struct berval *v2 +) +{ + return value_cmp(v1, v2, SYNTAX_SI | SYNTAX_CES, 3 /* Normalise both values */); +} + +/* return 0 if valid, non-0 if invalid */ +static int numstr_validate( + struct berval *val +) +{ + int rc = 0; /* assume the value is valid */ + const char *p = NULL; + + /* Per RFC4517: + * + * NumericString = 1*(DIGIT / SPACE) + */ + if (val != NULL) { + for (p = val->bv_val; p < &(val->bv_val[val->bv_len]); p++) { + if (!isdigit(*p) && !IS_SPACE(*p)) { + rc = 1; + goto exit; + } + } + } else { + rc = 1; + } + +exit: + return(rc); +} diff --git a/ldap/servers/plugins/syntaxes/sicis.c b/ldap/servers/plugins/syntaxes/sicis.c index fe7188cd0..07fee069c 100644 --- a/ldap/servers/plugins/syntaxes/sicis.c +++ b/ldap/servers/plugins/syntaxes/sicis.c @@ -43,6 +43,9 @@ /* * sicis.c - space insensitive string syntax routines. * these strings are also case insensitive. + * + * This is a non-standard syntax. It is only used by the presence plug-in. + * It will be disabled by default unless the presence plug-in is compiled. */ #include <stdio.h> #include <string.h> diff --git a/ldap/servers/plugins/syntaxes/syntax.h b/ldap/servers/plugins/syntaxes/syntax.h index fc7a2db9a..b9a013700 100644 --- a/ldap/servers/plugins/syntaxes/syntax.h +++ b/ldap/servers/plugins/syntaxes/syntax.h @@ -66,6 +66,46 @@ #define MIN( a, b ) (a < b ? a : b ) #endif +#define SYNTAX_PLUGIN_SUBSYSTEM "syntax-plugin" + +/* The following are derived from RFC 4512, section 1.4. */ +#define IS_LEADKEYCHAR(c) ( isalpha(c) ) +#define IS_KEYCHAR(c) ( isalnum(c) || (c == '-') ) +#define IS_SPACE(c) ( (c == ' ') ) +#define IS_LDIGIT(c) ( (c != '0') && isdigit(c) ) +#define IS_SHARP(c) ( (c == '#') ) +#define IS_ESC(c) ( (c == '\\') ) +#define IS_UTF0(c) ( (c >= '\x80') && (c <= '\xBF') ) +#define IS_UTF1(c) ( !(c & 128) ) +/* These are only checking the first byte of the multibyte character. They + * do not verify that the entire multibyte character is correct. */ +#define IS_UTF2(c) ( (c >= '\xC2') && (c <= '\xDF') ) +#define IS_UTF3(c) ( (c >= '\xE0') && (c <= '\xEF') ) +#define IS_UTF4(c) ( (c >= '\xF0') && (c <= '\xF4') ) +#define IS_UTFMB(c) ( IS_UTF2(c) || IS_UTF3(c) || IS_UTF4(c) ) +#define IS_UTF8(c) ( IS_UTF1(c) || IS_UTFMB(c) ) + +/* The following are derived from RFC 4514, section 3. */ +#define IS_ESCAPED(c) ( (c == '"') || (c == '+') || (c == ',') || \ + (c == ';') || (c == '<') || (c == '>') ) +#define IS_SPECIAL(c) ( IS_ESCAPED(c) || IS_SPACE(c) || \ + IS_SHARP(c) || (c == '=') ) +#define IS_LUTF1(c) ( IS_UTF1(c) && !IS_ESCAPED(c) && !IS_SPACE(c) && \ + !IS_SHARP(c) && !IS_ESC(c) ) +#define IS_TUTF1(c) ( IS_UTF1(c) && !IS_ESCAPED(c) && !IS_SPACE(c) && \ + !IS_ESC(c) ) +#define IS_SUTF1(c) ( IS_UTF1(c) && !IS_ESCAPED(c) && !IS_ESC(c) ) + +/* Per RFC 4517: + * + * PrintableCharacter = ALPHA / DIGIT / SQUOTE / LPAREN / RPAREN / + * PLUS / COMMA / HYPHEN / DOT / EQUALS / + * SLASH / COLON / QUESTION / SPACE + */ +#define IS_PRINTABLE(c) ( isalnum(c) || (c == '\'') || (c == '(') || \ + (c == ')') || (c == '+') || (c == ',') || (c == '-') || (c == '.') || \ + (c == '=') || (c == '/') || (c == ':') || (c == '?') || IS_SPACE(c) ) + int string_filter_sub( Slapi_PBlock *pb, char *initial, char **any, char *final,Slapi_Value **bvals, int syntax ); int string_filter_ava( struct berval *bvfilter, Slapi_Value **bvals, int syntax,int ftype, Slapi_Value **retVal ); int string_values2keys( Slapi_PBlock *pb, Slapi_Value **bvals,Slapi_Value ***ivals, int syntax, int ftype ); @@ -78,5 +118,10 @@ char *first_word( char *s ); char *next_word( char *s ); char *phonetic( char *s ); +/* Validation helper functions */ +int keystring_validate( char *begin, char *end ); +int numericoid_validate( char *begin, char *end ); +int utf8char_validate( char *begin, char *end, char **last ); +int utf8string_validate( char *begin, char *end, char **last ); #endif diff --git a/ldap/servers/plugins/syntaxes/tel.c b/ldap/servers/plugins/syntaxes/tel.c index b67fb78b2..3a2edd688 100644 --- a/ldap/servers/plugins/syntaxes/tel.c +++ b/ldap/servers/plugins/syntaxes/tel.c @@ -58,6 +58,7 @@ static int tel_assertion2keys_ava( Slapi_PBlock *pb, Slapi_Value *val, static int tel_assertion2keys_sub( Slapi_PBlock *pb, char *initial, char **any, char *final, Slapi_Value ***ivals ); static int tel_compare(struct berval *v1, struct berval *v2); +static int tel_validate(struct berval *val); /* the first name is the official one from RFC 2252 */ static char *names[] = { "TelephoneNumber", "tel", TELEPHONE_SYNTAX_OID, 0 }; @@ -95,6 +96,8 @@ tel_init( Slapi_PBlock *pb ) (void *) TELEPHONE_SYNTAX_OID ); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_COMPARE, (void *) tel_compare ); + rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_VALIDATE, + (void *) tel_validate ); LDAPDebug( LDAP_DEBUG_PLUGIN, "<= tel_init %d\n", rc, 0, 0 ); return( rc ); @@ -170,3 +173,35 @@ static int tel_compare( { return value_cmp(v1, v2, SYNTAX_TEL|SYNTAX_CIS, 3 /* Normalise both values */); } + +static int +tel_validate( + struct berval *val +) +{ + int rc = 0; /* assume the value is valid */ + int i = 0; + + /* Per RFC4517: + * + * TelephoneNumber = PrintableString + * PrintableString = 1*PrintableCharacter + */ + + /* Don't allow a 0 length string */ + if ((val == NULL) || (val->bv_len == 0)) { + rc = 1; + goto exit; + } + + /* Make sure all chars are a PrintableCharacter */ + for (i=0; i < val->bv_len; i++) { + if (!IS_PRINTABLE(val->bv_val[i])) { + rc = 1; + goto exit; + } + } + +exit: + return rc; +} diff --git a/ldap/servers/plugins/syntaxes/validate.c b/ldap/servers/plugins/syntaxes/validate.c new file mode 100644 index 000000000..8367e0839 --- /dev/null +++ b/ldap/servers/plugins/syntaxes/validate.c @@ -0,0 +1,352 @@ +/** BEGIN COPYRIGHT BLOCK + * This Program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free Software + * Foundation; version 2 of the License. + * + * This Program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along with + * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place, Suite 330, Boston, MA 02111-1307 USA. + * + * In addition, as a special exception, Red Hat, Inc. gives You the additional + * right to link the code of this Program with code not covered under the GNU + * General Public License ("Non-GPL Code") and to distribute linked combinations + * including the two, subject to the limitations in this paragraph. Non-GPL Code + * permitted under this exception must only link to the code of this Program + * through those well defined interfaces identified in the file named EXCEPTION + * found in the source code files (the "Approved Interfaces"). The files of + * Non-GPL Code may instantiate templates or use macros or inline functions from + * the Approved Interfaces without causing the resulting work to be covered by + * the GNU General Public License. Only Red Hat, Inc. may make changes or + * additions to the list of Approved Interfaces. You must obey the GNU General + * Public License in all respects for all of the Program code and other code used + * in conjunction with the Program except the Non-GPL Code covered by this + * exception. If you modify this file, you may extend this exception to your + * version of the file, but you are not obligated to do so. If you do not wish to + * provide this exception without modification, you must delete this exception + * statement from your version and license this file solely under the GPL without + * exception. + * + * + * Copyright (C) 2009 Red Hat, Inc. + * All rights reserved. + * END COPYRIGHT BLOCK **/ + +#ifdef HAVE_CONFIG_H +# include <config.h> +#endif + +/* validate.c - syntax validation helper functions */ + +#include <stdio.h> +#include <string.h> +#include <sys/types.h> +#include "syntax.h" + +/* Helper function for processing a 'keystring'. + * + * Returns 0 is the value between begin and end is a valid 'keystring'. + * Returns non-zero if the value is not a valide 'keystring'. + */ +int keystring_validate( + char *begin, + char *end +) +{ + int rc = 0; /* assume the value is valid */ + const char *p = begin; + + if ((begin == NULL) || (end == NULL)) { + rc = 1; + goto exit; + } + + /* Per RFC4512: + * + * keystring = leadkeychar *keychar + */ + if (IS_LEADKEYCHAR(*p)) { + for (p++; p <= end; p++) { + if (!IS_KEYCHAR(*p)) { + rc = 1; + goto exit; + } + } + } else { + rc = 1; + goto exit; + } + +exit: + return( rc ); +} + +/* Helper function for processing a 'numericoid'. + * + * Returns 0 is the value between begin and end is a valid 'numericoid'. + * Returns non-zero if the value is not a valide 'numericoid'. + */ +int numericoid_validate( + char *begin, + char *end +) +{ + int rc = 0; /* assume the value is valid */ + int found_separator = 0; + char *p = NULL; + + if ((begin == NULL) || (end == NULL)) { + rc = 1; + goto exit; + } + + /* Per RFC 4512: + * + * numericoid = number 1*( DOT number ) + */ + + /* one pass of this loop should process one element of the oid (number DOT) */ + for (p = begin; p <= end; p++) { + if (IS_LDIGIT(*p)) { + /* loop until we get to a separator char */ + while(*p != '.') { + p++; + if (p > end) { + /* ensure we got at least 2 elements */ + if (!found_separator) { + rc = 1; + goto exit; + } else { + /* looks like a valid numericoid */ + goto exit; + } + } else if (*p == '.') { + /* we can not end with a '.' */ + if (p == end) { + rc = 1; + goto exit; + } else { + found_separator = 1; + } + } else if (!isdigit(*p)) { + rc = 1; + goto exit; + } + } + } else if (*p == '0') { + p++; + if (p > end) { + /* ensure we got at least 2 elements */ + if (!found_separator) { + rc = 1; + goto exit; + } else { + /* looks like a valid numericoid */ + goto exit; + } + } else if (*p != '.') { + /* a leading 0 is not allowed unless the entire element is simply 0 */ + rc = 1; + goto exit; + } + + /* At this point, *p is '.'. We can not end with a '.' */ + if (p == end) { + rc = 1; + goto exit; + } else { + found_separator = 1; + } + } else { + rc = 1; + goto exit; + } + } + +exit: + return(rc); +} + +/* Helper to validate a single UTF-8 character. + * It is assumed that the first byte of the character + * is pointed to by begin. This function will not read + * past the byte pointed to by the end parameter. The + * last pointer will be filled in the the address of + * the last byte of the validated character if the + * character is valid, or the last byte processed + * in the invalid case. + * + * Returns 0 if it is valid and non-zero otherwise. */ +int utf8char_validate( + char *begin, + char *end, + char **last +) +{ + int rc = 0; /* Assume char is valid */ + char *p = begin; + + if ((begin == NULL) || (end == NULL)) { + rc = 1; + goto exit; + } + + /* Per RFC 4512: + * + * UTF8 = UTF1 / UTFMB + * UTFMB = UTF2 / UTF3 / UTF4 + * UTF0 = %x80-BF + * UTF1 = %x00-7F + * UTF2 = %xC2-DF UTF0 + * UTF3 = %xE0 %xA0-BF UTF0 / %xE1-EC 2(UTF0) / + * %xED %x80-9F UTF0 / %xEE-EF 2(UTF0) + * UTF4 = %xF0 %x90-BF 2(UTF0) / %xF1-F3 3(UTF0) / + * %xF4 %x80-8F 2(UTF0) + */ + + /* If we have a single byte (ASCII) character, we + * don't really have any work to do. */ + if (IS_UTF1(*p)) { + goto exit; + } else if (IS_UTF2(*p)) { + /* Ensure that there is another byte + * and that is is 'UTF0'. */ + if ((p == end) || !IS_UTF0(*(p + 1))) { + rc = 1; + goto exit; + } + + /* Advance p so last is set correctly */ + p++; + } else if (IS_UTF3(*p)) { + /* Ensure that there are at least 2 more bytes. */ + if (end - p < 2) { + rc = 1; + goto exit; + } + + /* The first byte determines what is legal for + * the second byte. */ + if (*p == '\xE0') { + /* The next byte must be %xA0-BF. */ + p++; + if ((*p < '\xA0') || (*p > '\xBF')) { + rc = 1; + goto exit; + } + } else if (*p == '\xED') { + /* The next byte must be %x80-9F. */ + p++; + if ((*p < '\x80') || (*p > '\x9F')) { + rc = 1; + goto exit; + } + } else { + /* The next byte must each be 'UTF0'. */ + p++; + if (!IS_UTF0(*p)) { + rc = 1; + goto exit; + } + } + + /* The last byte must be 'UTF0'. */ + p++; + if (!IS_UTF0(*p)) { + rc = 1; + goto exit; + } + } else if (IS_UTF4(*p)) { + /* Ensure that there are at least 3 more bytes. */ + if (end - p < 3) { + rc = 1; + goto exit; + } + + /* The first byte determines what is legal for + * the second byte. */ + if (*p == '\xF0') { + /* The next byte must be %x90-BF. */ + if ((*p < '\x90') || (*p > '\xBF')) { + rc = 1; + goto exit; + } + } else if (*p == '\xF4') { + /* The next byte must be %x80-BF. */ + if ((*p < '\x80') || (*p > '\xBF')) { + rc = 1; + goto exit; + } + } else { + /* The next byte must each be 'UTF0'. */ + p++; + if (!IS_UTF0(*p)) { + rc = 1; + goto exit; + } + } + + /* The last 2 bytes must be 'UTF0'. */ + p++; + if (!IS_UTF0(*p) || !IS_UTF0(*(p + 1))) { + rc = 1; + goto exit; + } + + /* Advance the pointer so last is set correctly + * when we return. */ + p++; + } else { + /* We found an illegal first byte. */ + rc = 1; + goto exit; + } + +exit: + if (last) { + *last = p; + } + return(rc); +} + +/* Validates that a non '\0' terminated string is UTF8. This + * function will not read past the byte pointed to by the end + * parameter. The last pointer will be filled in to point to + * the address of the last byte of the last validated character + * if the string is valid, or the last byte processed in the + * invalid case. + * + * Returns 0 if it is valid and non-zero otherwise. */ +int utf8string_validate( + char *begin, + char *end, + char **last +) +{ + int rc = 0; /* Assume string is valid */ + char *p = NULL; + + if ((begin == NULL) || (end == NULL)) { + rc = 1; + goto exit; + } + + for (p = begin; p <= end; p++) { + if ((rc = utf8char_validate(p, end, &p)) != 0) { + goto exit; + } + } + + /* Adjust the pointer so last is set correctly for caller. */ + p--; + +exit: + if (last) { + *last = p; + } + return(rc); +} + diff --git a/ldap/servers/plugins/syntaxes/validate_task.c b/ldap/servers/plugins/syntaxes/validate_task.c new file mode 100644 index 000000000..d469ccd61 --- /dev/null +++ b/ldap/servers/plugins/syntaxes/validate_task.c @@ -0,0 +1,303 @@ +/** BEGIN COPYRIGHT BLOCK + * This Program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free Software + * Foundation; version 2 of the License. + * + * This Program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along with + * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place, Suite 330, Boston, MA 02111-1307 USA. + * + * In addition, as a special exception, Red Hat, Inc. gives You the additional + * right to link the code of this Program with code not covered under the GNU + * General Public License ("Non-GPL Code") and to distribute linked combinations + * including the two, subject to the limitations in this paragraph. Non-GPL Code + * permitted under this exception must only link to the code of this Program + * through those well defined interfaces identified in the file named EXCEPTION + * found in the source code files (the "Approved Interfaces"). The files of + * Non-GPL Code may instantiate templates or use macros or inline functions from + * the Approved Interfaces without causing the resulting work to be covered by + * the GNU General Public License. Only Red Hat, Inc. may make changes or + * additions to the list of Approved Interfaces. You must obey the GNU General + * Public License in all respects for all of the Program code and other code used + * in conjunction with the Program except the Non-GPL Code covered by this + * exception. If you modify this file, you may extend this exception to your + * version of the file, but you are not obligated to do so. If you do not wish to + * provide this exception without modification, you must delete this exception + * statement from your version and license this file solely under the GPL without + * exception. + * + * + * Copyright (C) 2009 Red Hat, Inc. + * All rights reserved. + * END COPYRIGHT BLOCK **/ + +#ifdef HAVE_CONFIG_H +# include <config.h> +#endif + +/* validate_task.c - syntax validation task */ + +#include <stdio.h> +#include <string.h> +#include <sys/types.h> +#include "syntax.h" + +/* + * Globals + */ +static Slapi_PluginDesc pdesc = { "syntax-validate-task", PLUGIN_MAGIC_VENDOR_STR, + PRODUCTTEXT, "syntax validation task plugin" }; +static void* _PluginID = NULL; + + +/* + * Data Structures + */ +typedef struct _task_data +{ + char *dn; + char *filter_str; + Slapi_Counter *invalid_entries; +} task_data; + + +/* + * Function Prototypes + */ +int syntax_validate_task_init(Slapi_PBlock *pb); +static int syntax_validate_task_start(Slapi_PBlock *pb); +static int syntax_validate_task_add(Slapi_PBlock *pb, Slapi_Entry *e, + Slapi_Entry *eAfter, int *returncode, + char *returntext, void *arg); +static void syntax_validate_task_destructor(Slapi_Task *task); +static void syntax_validate_task_thread(void *arg); +static int syntax_validate_task_callback(Slapi_Entry *e, void *callback_data); +static const char *fetch_attr(Slapi_Entry *e, const char *attrname, + const char *default_val); +static void syntax_validate_set_plugin_id(void * plugin_id); +static void *syntax_validate_get_plugin_id(); + + +/* + * Function Implementations + */ +int +syntax_validate_task_init(Slapi_PBlock *pb) +{ + int rc = 0; + char *syntax_validate_plugin_identity = NULL; + + /* Save plugin ID. */ + slapi_pblock_get (pb, SLAPI_PLUGIN_IDENTITY, &syntax_validate_plugin_identity); + PR_ASSERT (syntax_validate_plugin_identity); + syntax_validate_set_plugin_id(syntax_validate_plugin_identity); + + /* Register task callback. */ + rc = slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION, + (void *) SLAPI_PLUGIN_VERSION_03 ); + rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_START_FN, + (void *) syntax_validate_task_start ); + + return rc; +} + +static int +syntax_validate_task_start(Slapi_PBlock *pb) +{ + int rc = slapi_task_register_handler("syntax validate", syntax_validate_task_add); + return rc; +} + +static int +syntax_validate_task_add(Slapi_PBlock *pb, Slapi_Entry *e, + Slapi_Entry *eAfter, int *returncode, + char *returntext, void *arg) +{ + PRThread *thread = NULL; + int rv = SLAPI_DSE_CALLBACK_OK; + task_data *mytaskdata = NULL; + Slapi_Task *task = NULL; + const char *filter; + const char *dn = 0; + + *returncode = LDAP_SUCCESS; + /* get arg(s) */ + if ((dn = fetch_attr(e, "basedn", 0)) == NULL) { + *returncode = LDAP_OBJECT_CLASS_VIOLATION; + rv = SLAPI_DSE_CALLBACK_ERROR; + goto out; + } + + if ((filter = fetch_attr(e, "filter", "(objectclass=*)")) == NULL) { + *returncode = LDAP_OBJECT_CLASS_VIOLATION; + rv = SLAPI_DSE_CALLBACK_ERROR; + goto out; + } + + /* setup our task data */ + mytaskdata = (task_data*)slapi_ch_malloc(sizeof(task_data)); + if (mytaskdata == NULL) { + *returncode = LDAP_OPERATIONS_ERROR; + rv = SLAPI_DSE_CALLBACK_ERROR; + goto out; + } + mytaskdata->dn = slapi_ch_strdup(dn); + mytaskdata->filter_str = slapi_ch_strdup(filter); + mytaskdata->invalid_entries = slapi_counter_new(); + + /* allocate new task now */ + task = slapi_new_task(slapi_entry_get_ndn(e)); + + /* register our destructor for cleaning up our private data */ + slapi_task_set_destructor_fn(task, syntax_validate_task_destructor); + + /* Stash a pointer to our data in the task */ + slapi_task_set_data(task, mytaskdata); + + /* start the sample task as a separate thread */ + thread = PR_CreateThread(PR_USER_THREAD, syntax_validate_task_thread, + (void *)task, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, + PR_UNJOINABLE_THREAD, SLAPD_DEFAULT_THREAD_STACKSIZE); + if (thread == NULL) { + slapi_log_error( SLAPI_LOG_FATAL, SYNTAX_PLUGIN_SUBSYSTEM, + "unable to create task thread!\n"); + *returncode = LDAP_OPERATIONS_ERROR; + rv = SLAPI_DSE_CALLBACK_ERROR; + slapi_task_finish(task, *returncode); + } else { + rv = SLAPI_DSE_CALLBACK_OK; + } + +out: + return rv; +} + +static void +syntax_validate_task_destructor(Slapi_Task *task) +{ + if (task) { + task_data *mydata = (task_data *)slapi_task_get_data(task); + if (mydata) { + slapi_ch_free_string(&mydata->dn); + slapi_ch_free_string(&mydata->filter_str); + slapi_counter_destroy(&mydata->invalid_entries); + /* Need to cast to avoid a compiler warning */ + slapi_ch_free((void **)&mydata); + } + } +} + +static void +syntax_validate_task_thread(void *arg) +{ + int rc = 0; + Slapi_Task *task = (Slapi_Task *)arg; + task_data *td = NULL; + Slapi_PBlock *search_pb = slapi_pblock_new(); + + /* Fetch our task data from the task */ + td = (task_data *)slapi_task_get_data(task); + + /* Log started message. */ + slapi_task_begin(task, 1); + slapi_task_log_notice(task, "Syntax validation task starting (arg: %s) ...\n", + td->filter_str); + slapi_log_error(SLAPI_LOG_FATAL, SYNTAX_PLUGIN_SUBSYSTEM, + "Syntax validate task starting (base: \"%s\", filter: \"%s\") ...\n", + td->dn, td->filter_str); + + /* Perform the search and use a callback + * to validate each matching entry. */ + slapi_search_internal_set_pb(search_pb, td->dn, + LDAP_SCOPE_SUBTREE, td->filter_str, 0, 0, + 0, 0, syntax_validate_get_plugin_id(), 0); + + rc = slapi_search_internal_callback_pb(search_pb, + td, 0, syntax_validate_task_callback, 0); + + slapi_pblock_destroy(search_pb); + + /* Log finished message. */ + slapi_task_log_notice(task, "Syntax validate task complete. Found %" NSPRIu64 + " invalid entries.\n", slapi_counter_get_value(td->invalid_entries)); + slapi_task_log_status(task, "Syntax validate task complete. Found %" NSPRIu64 + " invalid entries.\n", slapi_counter_get_value(td->invalid_entries)); + slapi_log_error(SLAPI_LOG_FATAL, SYNTAX_PLUGIN_SUBSYSTEM, "Syntax validate task complete." + " Found %" NSPRIu64 " invalid entries.\n", + slapi_counter_get_value(td->invalid_entries)); + slapi_task_inc_progress(task); + + /* this will queue the destruction of the task */ + slapi_task_finish(task, rc); +} + +static int +syntax_validate_task_callback(Slapi_Entry *e, void *callback_data) +{ + int rc = 0; + char *dn = slapi_entry_get_dn(e); + task_data *td = (task_data *)callback_data; + Slapi_PBlock *pb = NULL; + + /* Override the syntax checking config to force syntax checking. */ + if (slapi_entry_syntax_check(NULL, e, 1) != 0) { + char *error_text = NULL; + + /* We need a pblock to get more details on the syntax violation, + * but we don't want to allocate a pblock unless we need it for + * performance reasons. This means that we will actually call + * slapi_entry_syntax_check() twice for entries that have a + * syntax violation. */ + pb = slapi_pblock_new(); + slapi_entry_syntax_check(pb, e, 1); + slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &error_text); + slapi_log_error(SLAPI_LOG_FATAL, SYNTAX_PLUGIN_SUBSYSTEM, + "Entry \"%s\" violates syntax.\n%s", + dn, error_text); + slapi_pblock_destroy(pb); + + /* Keep a tally of the number of invalid entries found. */ + slapi_counter_increment(td->invalid_entries); + } + + return rc; +} + +/* extract a single value from the entry (as a string) -- if it's not in the + * entry, the default will be returned (which can be NULL). + * you do not need to free anything returned by this. + */ +static const char * +fetch_attr(Slapi_Entry *e, const char *attrname, + const char *default_val) +{ +Slapi_Attr *attr; +Slapi_Value *val = NULL; + + if (slapi_entry_attr_find(e, attrname, &attr) != 0) { + return default_val; + } + + slapi_attr_first_value(attr, &val); + + return slapi_value_get_string(val); +} + +/* + * Plug-in identity management helper functions + */ +static void +syntax_validate_set_plugin_id(void * plugin_id) +{ + _PluginID=plugin_id; +} + +static void * +syntax_validate_get_plugin_id() +{ + return _PluginID; +} diff --git a/ldap/servers/slapd/add.c b/ldap/servers/slapd/add.c index b69da2e57..6607eff98 100644 --- a/ldap/servers/slapd/add.c +++ b/ldap/servers/slapd/add.c @@ -800,6 +800,16 @@ static void handle_fast_add(Slapi_PBlock *pb, Slapi_Entry *entry) return; } + /* syntax check */ + if (slapi_entry_syntax_check(pb, entry, 0) != 0) { + char *errtext; + LDAPDebug(LDAP_DEBUG_TRACE, "entry failed syntax check\n", 0, 0, 0); + slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &errtext); + send_ldap_result(pb, LDAP_INVALID_SYNTAX, NULL, errtext, 0, NULL); + slapi_entry_free(entry); + return; + } + /* Check if the entry being added is a Tombstone. Could be if we are * doing a replica init. */ if (slapi_entry_attr_hasvalue(entry, SLAPI_ATTR_OBJECTCLASS, diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c index 7a1bcba1c..7cde2bfc5 100644 --- a/ldap/servers/slapd/back-ldbm/import-threads.c +++ b/ldap/servers/slapd/back-ldbm/import-threads.c @@ -534,9 +534,27 @@ void import_producer(void *param) "violates schema, ending line %d of file " "\"%s\"", escape_string(slapi_entry_get_dn(e), ebuf), curr_lineno, curr_filename); - if (e) + if (e) { + slapi_entry_free(e); + } + + job->skipped++; + continue; + } + + /* Check attribute syntax */ + if (slapi_entry_syntax_check(NULL, e, 0) != 0) + { + char ebuf[BUFSIZ]; + import_log_notice(job, "WARNING: skipping entry \"%s\" which " + "violates attribute syntax, ending line %d of " + "file \"%s\"", escape_string(slapi_entry_get_dn(e), ebuf), + curr_lineno, curr_filename); + if (e) { slapi_entry_free(e); - job->skipped++; + } + + job->skipped++; continue; } diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c index 764cff995..b9f573a72 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_add.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c @@ -305,6 +305,15 @@ ldbm_back_add( Slapi_PBlock *pb ) goto error_return; } + /* Check attribute syntax */ + if (slapi_entry_syntax_check(pb, e, 0) != 0) + { + LDAPDebug(LDAP_DEBUG_TRACE, "entry failed syntax check\n", 0, 0, 0); + ldap_result_code = LDAP_INVALID_SYNTAX; + slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); + goto error_return; + } + opcsn = operation_get_csn (operation); if(is_resurect_operation) { diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c index c169e9edb..1cbe92de1 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c @@ -188,6 +188,7 @@ ldbm_back_modify( Slapi_PBlock *pb ) struct backentry *e, *ec = NULL; Slapi_Entry *postentry = NULL; LDAPMod **mods; + Slapi_Mods smods; back_txn txn; back_txnid parent_txn; int retval = -1; @@ -279,11 +280,10 @@ ldbm_back_modify( Slapi_PBlock *pb ) slapi_pblock_get(pb, SLAPI_RESULT_CODE, &ldap_result_code); /* The Plugin may have messed about with some of the PBlock parameters... ie. mods */ slapi_pblock_get( pb, SLAPI_MODIFY_MODS, &mods ); + slapi_mods_init_byref(&smods,mods); { - Slapi_Mods smods; CSN *csn = operation_get_csn(operation); - slapi_mods_init_byref(&smods,mods); if ( (change_entry = mods_have_effect (ec->ep_entry, &smods)) ) { ldap_result_code = entry_apply_mods_wsi(ec->ep_entry, &smods, csn, operation_is_flag_set(operation,OP_FLAG_REPLICATED)); /* @@ -301,7 +301,6 @@ ldbm_back_modify( Slapi_PBlock *pb ) slapi_pblock_set ( pb, SLAPI_ENTRY_POST_OP, postentry ); postentry = NULL; /* avoid removal/free in error_return code */ } - slapi_mods_done(&smods); if ( !change_entry || ldap_result_code != 0 ) { /* change_entry == 0 is not an error, but we need to free lock etc */ goto error_return; @@ -340,6 +339,14 @@ ldbm_back_modify( Slapi_PBlock *pb ) goto error_return; } + /* check attribute syntax for the new values */ + if (slapi_mods_syntax_check(pb, mods, 0) != 0) + { + ldap_result_code = LDAP_INVALID_SYNTAX; + slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); + goto error_return; + } + /* * make sure the entry contains all values in the RDN. * if not, the modification must have removed them. @@ -506,6 +513,7 @@ error_return: common_return: + slapi_mods_done(&smods); if (ec_in_cache) { diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c index 1cb35ab80..c71dd8eed 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c @@ -530,6 +530,17 @@ ldbm_back_modrdn( Slapi_PBlock *pb ) goto error_return; } + /* Check attribute syntax if any new values are being added for the new RDN */ + if (slapi_mods_get_num_mods(&smods_operation_wsi)>0) + { + if (slapi_mods_syntax_check(pb, smods_generated_wsi.mods, 0) != 0) + { + ldap_result_code = LDAP_INVALID_SYNTAX; + slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); + goto error_return; + } + } + /* * Update the DN CSN of the entry. */ diff --git a/ldap/servers/slapd/back-ldif/add.c b/ldap/servers/slapd/back-ldif/add.c index 231f54837..277999733 100644 --- a/ldap/servers/slapd/back-ldif/add.c +++ b/ldap/servers/slapd/back-ldif/add.c @@ -92,6 +92,13 @@ ldif_back_add( Slapi_PBlock *pb ) return( -1 ); } + /* Check if the attribute values in the entry obey the syntaxes */ + if ( slapi_entry_syntax_check( pb, e, 0 ) != 0 ) { + LDAPDebug( LDAP_DEBUG_TRACE, "entry failed syntax_check\n", 0, 0, 0 ); + slapi_send_ldap_result( pb, LDAP_INVALID_SYNTAX, NULL, NULL, 0, NULL ); + return( -1 ); + } + prev = NULL; /*Lock the database*/ diff --git a/ldap/servers/slapd/back-ldif/modify.c b/ldap/servers/slapd/back-ldif/modify.c index 58229ecc5..7fff06707 100644 --- a/ldap/servers/slapd/back-ldif/modify.c +++ b/ldap/servers/slapd/back-ldif/modify.c @@ -140,6 +140,13 @@ ldif_back_modify( Slapi_PBlock *pb ) PR_Unlock( db->ldif_lock ); goto error_return; } + + /* Check if the attribute values in the mods obey the syntaxes */ + if ( slapi_mods_syntax_check( pb, mods, 0 ) != 0 ) { + slapi_send_ldap_result( pb, LDAP_INVALID_SYNTAX, NULL, NULL, 0, NULL ); + PR_Unlock( db->ldif_lock ); + goto error_return; + } /* Check for abandon again */ if ( slapi_op_abandoned( pb ) ) { diff --git a/ldap/servers/slapd/config.c b/ldap/servers/slapd/config.c index 9cf56ddd4..1af1b77b4 100644 --- a/ldap/servers/slapd/config.c +++ b/ldap/servers/slapd/config.c @@ -239,11 +239,13 @@ slapd_bootstrap_config(const char *configdir) char _localuser[BUFSIZ]; char logenabled[BUFSIZ]; char schemacheck[BUFSIZ]; + char syntaxcheck[BUFSIZ]; + char syntaxlogging[BUFSIZ]; Slapi_DN plug_dn; workpath[0] = loglevel[0] = maxdescriptors[0] = '\0'; - val[0] = logenabled[0] = schemacheck[0] = '\0'; - _localuser[0] = '\0'; + val[0] = logenabled[0] = schemacheck[0] = syntaxcheck[0] = '\0'; + syntaxlogging[0] = _localuser[0] = '\0'; /* Convert LDIF to entry structures */ slapi_sdn_init_dn_byref(&plug_dn, PLUGIN_BASE_DN); @@ -460,6 +462,34 @@ slapd_bootstrap_config(const char *configdir) } } + /* see if we need to enable syntax checking */ + if (!syntaxcheck[0] && + entry_has_attr_and_value(e, CONFIG_SYNTAXCHECK_ATTRIBUTE, + syntaxcheck, sizeof(syntaxcheck))) + { + if (config_set_syntaxcheck(CONFIG_SYNTAXCHECK_ATTRIBUTE, + syntaxcheck, errorbuf, CONFIG_APPLY) + != LDAP_SUCCESS) + { + LDAPDebug(LDAP_DEBUG_ANY, "%s: %s: %s\n", configfile, + CONFIG_SYNTAXCHECK_ATTRIBUTE, errorbuf); + } + } + + /* see if we need to enable syntax warnings */ + if (!syntaxlogging[0] && + entry_has_attr_and_value(e, CONFIG_SYNTAXLOGGING_ATTRIBUTE, + syntaxlogging, sizeof(syntaxlogging))) + { + if (config_set_syntaxlogging(CONFIG_SYNTAXLOGGING_ATTRIBUTE, + syntaxlogging, errorbuf, CONFIG_APPLY) + != LDAP_SUCCESS) + { + LDAPDebug(LDAP_DEBUG_ANY, "%s: %s: %s\n", configfile, + CONFIG_SYNTAXLOGGING_ATTRIBUTE, errorbuf); + } + } + /* see if we need to expect quoted schema values */ if (entry_has_attr_and_value(e, CONFIG_ENQUOTE_SUP_OC_ATTRIBUTE, val, sizeof(val))) diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c index 4d5935006..956c29d46 100644 --- a/ldap/servers/slapd/dse.c +++ b/ldap/servers/slapd/dse.c @@ -1864,6 +1864,17 @@ dse_modify(Slapi_PBlock *pb) /* JCM There should only be one exit point from thi return dse_modify_return( -1, ec, ecc ); } + /* Check if the attribute values in the mods obey the syntaxes */ + if ( slapi_mods_syntax_check( pb, mods, 0 ) != 0 ) + { + char *errtext; + + slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &errtext); + slapi_send_ldap_result( pb, LDAP_INVALID_SYNTAX, NULL, errtext, 0, NULL ); + slapi_sdn_done(&sdn); + return dse_modify_return( -1, ec, ecc ); + } + /* Change the entry itself both on disk and in the AVL tree */ /* dse_replace_entry free's the existing entry. */ if (dse_replace_entry( pdse, ecc, !dont_write_file, DSE_USE_LOCK )!=0 ) @@ -1941,6 +1952,18 @@ dse_add(Slapi_PBlock *pb) /* JCM There should only be one exit point from this f return error; } + /* Check if the attribute values in the entry obey the syntaxes */ + if ( slapi_entry_syntax_check( pb, e, 0 ) != 0 ) + { + char *errtext; + LDAPDebug( SLAPI_DSE_TRACELEVEL, + "dse_add: entry failed syntax check\n", 0, 0, 0 ); + slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &errtext); + slapi_send_ldap_result( pb, LDAP_INVALID_SYNTAX, NULL, errtext, 0, NULL ); + slapi_sdn_done(&sdn); + return error; + } + /* * Attempt to find this dn. */ diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c index f71b7fdd4..beec7d5c6 100644 --- a/ldap/servers/slapd/fedse.c +++ b/ldap/servers/slapd/fedse.c @@ -143,14 +143,7 @@ static const char *internal_entries[] = "objectclass:top\n" "objectclass:nsSNMP\n" "cn:SNMP\n" - "nsSNMPEnabled:on\n" - "nsSNMPName:\n" - "nsSNMPOrganization:\n" - "nsSNMPLocation:\n" - "nsSNMPContact:\n" - "nsSNMPDescription:\n" - "nsSNMPMasterHost:\n" - "nsSNMPMasterPort:\n" + "nsSNMPEnabled: on\n" "aci:(target=\"ldap:///cn=SNMP,cn=config\")(targetattr !=\"aci\")(version 3.0;acl \"snmp\";allow (read, search, compare)(userdn = \"ldap:///anyone\");)\n", }; @@ -161,7 +154,7 @@ static char *easter_egg_entry= "1E14405A150F47341F0E09191B0A1F5A3E13081F190E1508035A2E1F1B1756191447171514" "130E1508701518101F190E39161B0909405A0E150A701518101F190E39161B0909405A1508" "1D1B1413001B0E1315141B162F14130E701518101F190E39161B0909405A1E13081F190E15" -"0803040E1F1B17041F020E1F14091318161F041518101F190E70150F405A341F0E09191B0A" +"0803570E1F1B17571F020E1F14091318161F571518101F190E70150F405A341F0E09191B0A" "1F5A291F190F08130E035A2915160F0E1315140970150F405A341F0E09191B0A1F5A3E1308" "1F190E1508035A2E1F1B17701E1F091908130A0E131514405A3E1B0C131E5A3815081F121B" "17565A301B190B0F1F1613141F5A3815081F121B17565A3B140E121514035A3C15020D1508" diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c index e473663c5..30ad5f3a6 100644 --- a/ldap/servers/slapd/libglobs.c +++ b/ldap/servers/slapd/libglobs.c @@ -321,6 +321,12 @@ static struct config_get_and_set { {CONFIG_SCHEMACHECK_ATTRIBUTE, config_set_schemacheck, NULL, 0, (void**)&global_slapdFrontendConfig.schemacheck, CONFIG_ON_OFF, NULL}, + {CONFIG_SYNTAXCHECK_ATTRIBUTE, config_set_syntaxcheck, + NULL, 0, + (void**)&global_slapdFrontendConfig.syntaxcheck, CONFIG_ON_OFF, NULL}, + {CONFIG_SYNTAXLOGGING_ATTRIBUTE, config_set_syntaxlogging, + NULL, 0, + (void**)&global_slapdFrontendConfig.syntaxlogging, CONFIG_ON_OFF, NULL}, {CONFIG_DS4_COMPATIBLE_SCHEMA_ATTRIBUTE, config_set_ds4_compatible_schema, NULL, 0, (void**)&global_slapdFrontendConfig.ds4_compatible_schema, @@ -891,6 +897,8 @@ FrontendConfig_init () { cfg->sizelimit = SLAPD_DEFAULT_SIZELIMIT; cfg->timelimit = SLAPD_DEFAULT_TIMELIMIT; cfg->schemacheck = LDAP_ON; + cfg->syntaxcheck = LDAP_OFF; + cfg->syntaxlogging = LDAP_OFF; cfg->ds4_compatible_schema = LDAP_OFF; cfg->enquote_sup_oc = LDAP_OFF; cfg->lastmod = LDAP_ON; @@ -2422,6 +2430,33 @@ config_set_schemacheck( const char *attrname, char *value, char *errorbuf, int a return retVal; } +int +config_set_syntaxcheck( const char *attrname, char *value, char *errorbuf, int apply ) { + int retVal = LDAP_SUCCESS; + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + + retVal = config_set_onoff ( attrname, + value, + &(slapdFrontendConfig->syntaxcheck), + errorbuf, + apply); + + return retVal; +} + +int +config_set_syntaxlogging( const char *attrname, char *value, char *errorbuf, int apply ) { + int retVal = LDAP_SUCCESS; + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + + retVal = config_set_onoff ( attrname, + value, + &(slapdFrontendConfig->syntaxlogging), + errorbuf, + apply); + + return retVal; +} int config_set_ds4_compatible_schema( const char *attrname, char *value, char *errorbuf, int apply ) { @@ -4033,6 +4068,30 @@ config_get_schemacheck() { return retVal; } +int +config_get_syntaxcheck() { + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + int retVal; + + CFG_LOCK_READ(slapdFrontendConfig); + retVal = slapdFrontendConfig->syntaxcheck; + CFG_UNLOCK_READ(slapdFrontendConfig); + + return retVal; +} + +int +config_get_syntaxlogging() { + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + int retVal; + + CFG_LOCK_READ(slapdFrontendConfig); + retVal = slapdFrontendConfig->syntaxlogging; + CFG_UNLOCK_READ(slapdFrontendConfig); + + return retVal; +} + int config_get_ds4_compatible_schema() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c index 6ac6aa8e6..062a87f82 100644 --- a/ldap/servers/slapd/pblock.c +++ b/ldap/servers/slapd/pblock.c @@ -1072,6 +1072,12 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value ) case SLAPI_SYNTAX_SUBSTRLENS: (*(int **)value) = pblock->pb_substrlens; break; + case SLAPI_PLUGIN_SYNTAX_VALIDATE: + if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX ) { + return( -1 ); + } + (*(int *)value) = pblock->pb_plugin->plg_syntax_validate; + break; /* controls we know about */ case SLAPI_MANAGEDSAIT: @@ -2314,6 +2320,12 @@ slapi_pblock_set( Slapi_PBlock *pblock, int arg, void *value ) case SLAPI_SYNTAX_SUBSTRLENS: pblock->pb_substrlens = (int *) value; break; + case SLAPI_PLUGIN_SYNTAX_VALIDATE: + if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX ) { + return( -1 ); + } + pblock->pb_plugin->plg_syntax_validate = (IFP) value; + break; case SLAPI_ENTRY_PRE_OP: pblock->pb_pre_op_entry = (Slapi_Entry *) value; break; diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c index a7ad7dffa..5ae63564e 100644 --- a/ldap/servers/slapd/plugin.c +++ b/ldap/servers/slapd/plugin.c @@ -1878,35 +1878,37 @@ plugin_add_descriptive_attributes( Slapi_Entry *e, struct slapdplugin *plugin ) if ( NULL == plugin ) { + /* This can happen for things such as disabled syntax plug-ins. We + * just treat this as a warning to allow the description attributes + * to be set to a default value to avoid an objectclass violation. */ LDAPDebug(LDAP_DEBUG_PLUGIN, - "Error: failed to add descriptive values for plugin %s" - " (could not find plugin entry)\n", + "Warning: couldn't find plugin %s in global list. " + "Adding default descriptive values.\n", slapi_entry_get_dn_const(e), 0, 0 ); - return 1; /* failure */ } } if (add_plugin_description(e, ATTR_PLUGIN_PLUGINID, - plugin->plg_desc.spd_id)) + plugin ? plugin->plg_desc.spd_id : NULL)) { status = 1; } if (add_plugin_description(e, ATTR_PLUGIN_VERSION, - plugin->plg_desc.spd_version)) + plugin ? plugin->plg_desc.spd_version : NULL)) { status = 1; } if (add_plugin_description(e, ATTR_PLUGIN_VENDOR, - plugin->plg_desc.spd_vendor)) + plugin ? plugin->plg_desc.spd_vendor: NULL)) { status = 1; } if (add_plugin_description(e, ATTR_PLUGIN_DESC, - plugin->plg_desc.spd_description)) + plugin ? plugin->plg_desc.spd_description : NULL)) { status = 1; } diff --git a/ldap/servers/slapd/plugin_syntax.c b/ldap/servers/slapd/plugin_syntax.c index cb3cde9f4..3290a9547 100644 --- a/ldap/servers/slapd/plugin_syntax.c +++ b/ldap/servers/slapd/plugin_syntax.c @@ -261,6 +261,183 @@ plugin_call_syntax_filter_sub_sv( return( rc ); } +/* Checks if the values of all attributes in an entry are valid for the + * syntax specified for the attribute in question. Setting override to + * 1 will force syntax checking to be performed, even if syntax checking + * is disabled in the config. Setting override to 0 will obey the config + * settings. + * + * Returns 1 if there is a syntax violation and sets the error message + * appropriately. Returns 0 if everything checks out fine. + */ +int +slapi_entry_syntax_check( + Slapi_PBlock *pb, Slapi_Entry *e, int override +) +{ + int ret = 0; + int i = 0; + int is_replicated_operation = 0; + int badval = 0; + int syntaxcheck = config_get_syntaxcheck(); + int syntaxlogging = config_get_syntaxlogging(); + Slapi_Attr *prevattr = NULL; + Slapi_Attr *a = NULL; + char errtext[ BUFSIZ ]; + char *errp = &errtext[0]; + size_t err_remaining = sizeof(errtext); + + if (pb != NULL) { + slapi_pblock_get(pb, SLAPI_IS_REPLICATED_OPERATION, &is_replicated_operation); + } + + /* If syntax checking and logging are off, or if this is a + * replicated operation, just return that the syntax is OK. */ + if (((syntaxcheck == 0) && (syntaxlogging == 0) && (override == 0)) || + is_replicated_operation) { + goto exit; + } + + i = slapi_entry_first_attr(e, &a); + + while ((-1 != i) && a && (a->a_plugin != NULL)) { + /* If no validate function is available for this type, just + * assume that the value is valid. */ + if ( a->a_plugin->plg_syntax_validate != NULL ) { + int numvals = 0; + + slapi_attr_get_numvalues(a, &numvals); + if ( numvals > 0 ) { + Slapi_Value *val = NULL; + const struct berval *bval = NULL; + int hint = slapi_attr_first_value(a, &val); + + /* iterate through each value to check if it's valid */ + while (val != NULL) { + bval = slapi_value_get_berval(val); + if ((a->a_plugin->plg_syntax_validate( bval )) != 0) { + if (syntaxlogging) { + slapi_log_error( SLAPI_LOG_FATAL, "Syntax Check", + "\"%s\": (%s) value #%d invalid per syntax\n", + slapi_entry_get_dn(e), a->a_type, hint ); + } + + if (syntaxcheck || override) { + if (pb) { + /* Append new text to any existing text. */ + errp += PR_snprintf( errp, err_remaining, + "%s: value #%d invalid per syntax\n", a->a_type, hint ); + err_remaining -= errp - &errtext[0]; + } + ret = 1; + } + } + + hint = slapi_attr_next_value(a, hint, &val); + } + } + } + + prevattr = a; + i = slapi_entry_next_attr(e, prevattr, &a); + } + + /* See if we need to set the error text in the pblock. */ + if (errp != &errtext[0]) { + slapi_pblock_set( pb, SLAPI_PB_RESULT_TEXT, errtext ); + } + +exit: + return( ret ); +} + +/* Checks if the values of all attributes being added in a Slapi_Mods + * are valid for the syntax specified for the attribute in question. + * The new values in an add or replace modify operation and the newrdn + * value for a modrdn operation will be checked. + * Returns 1 if there is a syntax violation and sets the error message + * appropriately. Returns 0 if everything checks out fine. + */ +int +slapi_mods_syntax_check( + Slapi_PBlock *pb, LDAPMod **mods, int override +) +{ + int ret = 0; + int i, j = 0; + int is_replicated_operation = 0; + int badval = 0; + int syntaxcheck = config_get_syntaxcheck(); + int syntaxlogging = config_get_syntaxlogging(); + char errtext[ BUFSIZ ]; + char *errp = &errtext[0]; + size_t err_remaining = sizeof(errtext); + char *dn = NULL; + LDAPMod *mod = NULL; + + if (mods == NULL) { + ret = 1; + goto exit; + } + + if (pb != NULL) { + slapi_pblock_get(pb, SLAPI_IS_REPLICATED_OPERATION, &is_replicated_operation); + slapi_pblock_get(pb, SLAPI_TARGET_DN, &dn); + } + + /* If syntax checking and logging are off, or if this is a + * replicated operation, just return that the syntax is OK. */ + if (((syntaxcheck == 0) && (syntaxlogging == 0) && (override == 0)) || + is_replicated_operation) { + goto exit; + } + + /* Loop through mods */ + for (i = 0; mods[i] != NULL; i++) { + mod = mods[i]; + + /* We only care about replace and add modify operations that + * are truly adding new values to the entry. */ + if ((SLAPI_IS_MOD_REPLACE(mod->mod_op) || SLAPI_IS_MOD_ADD(mod->mod_op)) && + (mod->mod_bvalues != NULL)) { + struct slapdplugin *syntax_plugin = NULL; + + /* Find the plug-in for this type, then call it's + * validate function.*/ + slapi_attr_type2plugin(mod->mod_type, (void **)&syntax_plugin); + if ((syntax_plugin != NULL) && (syntax_plugin->plg_syntax_validate != NULL)) { + /* Loop through the values and validate each one */ + for (j = 0; mod->mod_bvalues[j] != NULL; j++) { + if (syntax_plugin->plg_syntax_validate(mod->mod_bvalues[j]) != 0) { + if (syntaxlogging) { + slapi_log_error( SLAPI_LOG_FATAL, "Syntax Check", "\"%s\": (%s) value #%d invalid per syntax\n", + dn ? dn : "NULL", mod->mod_type, j ); + } + + if (syntaxcheck || override) { + if (pb) { + /* Append new text to any existing text. */ + errp += PR_snprintf( errp, err_remaining, + "%s: value #%d invalid per syntax\n", mod->mod_type, j ); + err_remaining -= errp - &errtext[0]; + } + ret = 1; + } + } + } + } + } + } + + /* See if we need to set the error text in the pblock. */ + if (errp != &errtext[0]) { + slapi_pblock_set( pb, SLAPI_PB_RESULT_TEXT, errtext ); + } + +exit: + return( ret ); +} + SLAPI_DEPRECATED int slapi_call_syntax_values2keys( /* JCM SLOW FUNCTION */ void *vpi, diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h index 7c25b18d7..c561196df 100644 --- a/ldap/servers/slapd/proto-slap.h +++ b/ldap/servers/slapd/proto-slap.h @@ -264,6 +264,8 @@ int config_set_accesscontrol( const char *attrname, char *value, char *errorbuf, int config_set_security( const char *attrname, char *value, char *errorbuf, int apply ); int config_set_readonly( const char *attrname, char *value, char *errorbuf, int apply ); int config_set_schemacheck( const char *attrname, char *value, char *errorbuf, int apply ); +int config_set_syntaxcheck( const char *attrname, char *value, char *errorbuf, int apply ); +int config_set_syntaxlogging( const char *attrname, char *value, char *errorbuf, int apply ); int config_set_ds4_compatible_schema( const char *attrname, char *value, char *errorbuf, int apply ); int config_set_schema_ignore_trailing_spaces( const char *attrname, char *value, char *errorbuf, int apply ); int config_set_rootdn( const char *attrname, char *value, char *errorbuf, int apply ); @@ -406,6 +408,8 @@ int config_get_return_exact_case(); int config_get_result_tweak(); int config_get_security(); int config_get_schemacheck(); +int config_get_syntaxcheck(); +int config_get_syntaxlogging(); int config_get_ds4_compatible_schema(); int config_get_schema_ignore_trailing_spaces(); char *config_get_rootdn(); diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c index 04b13d09d..e331a946e 100644 --- a/ldap/servers/slapd/schema.c +++ b/ldap/servers/slapd/schema.c @@ -3415,7 +3415,9 @@ read_at_ldif(const char *input, struct asyntaxinfo **asipp, char *errorbuf, schema_errprefix_at, first_attr_name, "Missing parent attribute syntax OID"); status = invalid_syntax_error; - } else { + /* We only want to use the parent syntax if a SYNTAX + * wasn't explicitly specified for this attribute. */ + } else if (NULL == pSyntax) { char *pso = plugin_syntax2oid(asi_parent->asi_plugin); if (pso) { diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h index ceeb11e9a..cec186f96 100644 --- a/ldap/servers/slapd/slap.h +++ b/ldap/servers/slapd/slap.h @@ -287,8 +287,8 @@ typedef void (*VFP0)(); #define SLAPD_SCHEMA_DN "cn=schema" #define SLAPD_CONFIG_DN "cn=config" -#define EGG_OBJECT_CLASS "directory~team~extensible~object" -#define EGG_FILTER "(objectclass=directory~team~extensible~object)" +#define EGG_OBJECT_CLASS "directory-team-extensible-object" +#define EGG_FILTER "(objectclass=directory-team-extensible-object)" #define BE_LIST_SIZE 100 /* used by mapping tree code to hold be_list stuff */ @@ -501,16 +501,17 @@ typedef int (*SyntaxEnumFunc)(char **names, Slapi_PluginDesc *plugindesc, /* OIDs for some commonly used syntaxes */ #define BINARY_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.5" -#define BOOLEAN_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.7" +#define BOOLEAN_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.7" #define COUNTRYSTRING_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.11" #define DN_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.12" #define DIRSTRING_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.15" #define GENERALIZEDTIME_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.24" #define IA5STRING_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.26" #define INTEGER_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.27" -#define JPEG_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.28" +#define JPEG_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.28" +#define NUMERICSTRING_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.36" +#define OID_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.38" #define OCTETSTRING_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.40" -#define OID_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.38" #define POSTALADDRESS_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.41" #define TELEPHONE_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.50" #define SPACE_INSENSITIVE_STRING_SYNTAX_OID "2.16.840.1.113730.3.7.1" @@ -967,6 +968,7 @@ struct slapdplugin { char **plg_un_syntax_names; char *plg_un_syntax_oid; IFP plg_un_syntax_compare; + IFP plg_un_syntax_validate; } plg_un_syntax; #define plg_syntax_filter_ava plg_un.plg_un_syntax.plg_un_syntax_filter_ava #define plg_syntax_filter_sub plg_un.plg_un_syntax.plg_un_syntax_filter_sub @@ -976,7 +978,8 @@ struct slapdplugin { #define plg_syntax_flags plg_un.plg_un_syntax.plg_un_syntax_flags #define plg_syntax_names plg_un.plg_un_syntax.plg_un_syntax_names #define plg_syntax_oid plg_un.plg_un_syntax.plg_un_syntax_oid -#define plg_syntax_compare plg_un.plg_un_syntax.plg_un_syntax_compare +#define plg_syntax_compare plg_un.plg_un_syntax.plg_un_syntax_compare +#define plg_syntax_validate plg_un.plg_un_syntax.plg_un_syntax_validate struct plg_un_acl_struct { IFP plg_un_acl_init; @@ -1519,6 +1522,9 @@ typedef struct daemon_ports_s { /* Definition for plugin syntax compare routine */ typedef int (*value_compare_fn_type)(const struct berval *,const struct berval *); +/* Definition for plugin syntax validate routine */ +typedef int (*value_validate_fn_type)(const struct berval *); + #include "pw.h" #include "proto-slap.h" @@ -1631,6 +1637,8 @@ typedef struct _slapdEntryPoints { #define CONFIG_OBJECTCLASS_ATTRIBUTE "nsslapd-objectclass" #define CONFIG_ATTRIBUTE_ATTRIBUTE "nsslapd-attribute" #define CONFIG_SCHEMACHECK_ATTRIBUTE "nsslapd-schemacheck" +#define CONFIG_SYNTAXCHECK_ATTRIBUTE "nsslapd-syntaxcheck" +#define CONFIG_SYNTAXLOGGING_ATTRIBUTE "nsslapd-syntaxlogging" #define CONFIG_DS4_COMPATIBLE_SCHEMA_ATTRIBUTE "nsslapd-ds4-compatible-schema" #define CONFIG_SCHEMA_IGNORE_TRAILING_SPACES "nsslapd-schema-ignore-trailing-spaces" #define CONFIG_SCHEMAREPLACE_ATTRIBUTE "nsslapd-schemareplace" @@ -1846,6 +1854,8 @@ typedef struct _slapdFrontendConfig { int readonly; int reservedescriptors; int schemacheck; + int syntaxcheck; + int syntaxlogging; int ds4_compatible_schema; int schema_ignore_trailing_spaces; int secureport; diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h index 3c0cf72df..70556e980 100644 --- a/ldap/servers/slapd/slapi-plugin.h +++ b/ldap/servers/slapd/slapi-plugin.h @@ -280,6 +280,8 @@ int slapi_entry_next_attr( const Slapi_Entry *e, Slapi_Attr *prevattr, Slapi_Att const char *slapi_entry_get_uniqueid( const Slapi_Entry *e ); void slapi_entry_set_uniqueid( Slapi_Entry *e, char *uniqueid ); int slapi_entry_schema_check( Slapi_PBlock *pb, Slapi_Entry *e ); +int slapi_entry_syntax_check( Slapi_PBlock *pb, Slapi_Entry *e, int override ); +int slapi_mods_syntax_check( Slapi_PBlock *pb, LDAPMod **mods, int override ); int slapi_entry_rdn_values_present( const Slapi_Entry *e ); int slapi_entry_add_rdn_values( Slapi_Entry *e ); int slapi_entry_attr_delete( Slapi_Entry *e, const char *type ); @@ -1702,9 +1704,9 @@ typedef struct slapi_plugindesc { #define SLAPI_PLUGIN_SYNTAX_OID 706 #define SLAPI_PLUGIN_SYNTAX_FLAGS 707 #define SLAPI_PLUGIN_SYNTAX_COMPARE 708 - /* user defined substrlen; not stored in slapdplugin, but pblock itself */ -#define SLAPI_SYNTAX_SUBSTRLENS 709 +#define SLAPI_SYNTAX_SUBSTRLENS 709 +#define SLAPI_PLUGIN_SYNTAX_VALIDATE 710 /* ACL plugin functions and arguments */ #define SLAPI_PLUGIN_ACL_INIT 730
0
cd55c8f7504fdfb59e0d08541671f58f02d2702a
389ds/389-ds-base
Ticket 48906 Allow nsslapd-db-locks to be configurable online Bug Description: Currently online update of nsslapd-db-locks is failing because: "nsslapd-db-locks can't be modified while the server is running." Fix Description: The reason is that nsslapd-db-locks is missing CONFIG_FLAG_ALLOW_RUNNING_CHANGE in the config array. two fields (li_dblock and li_new_dblock) are added to ldbminfo. li_dblock is used to store the current dblock config and is used to initialize the dblayer_private field 'dblayer_lock_config'. li_new_dblock is used to store the future dblock config. So online update will be done on this field instead on the li_dblock and at shutdown, the dse.ldif will contain li_new_dblock but the guardian file will keep the current value (li_dblock). So at startup the env will be recreated https://fedorahosted.org/389/ticket/48906 Reviewed by: Mark Reynolds, William Brown, Noriko Hosoi Platforms tested: F23 Flag Day: no Doc impact: no
commit cd55c8f7504fdfb59e0d08541671f58f02d2702a Author: Thierry Bordaz <[email protected]> Date: Thu Sep 8 19:29:25 2016 +0200 Ticket 48906 Allow nsslapd-db-locks to be configurable online Bug Description: Currently online update of nsslapd-db-locks is failing because: "nsslapd-db-locks can't be modified while the server is running." Fix Description: The reason is that nsslapd-db-locks is missing CONFIG_FLAG_ALLOW_RUNNING_CHANGE in the config array. two fields (li_dblock and li_new_dblock) are added to ldbminfo. li_dblock is used to store the current dblock config and is used to initialize the dblayer_private field 'dblayer_lock_config'. li_new_dblock is used to store the future dblock config. So online update will be done on this field instead on the li_dblock and at shutdown, the dse.ldif will contain li_new_dblock but the guardian file will keep the current value (li_dblock). So at startup the env will be recreated https://fedorahosted.org/389/ticket/48906 Reviewed by: Mark Reynolds, William Brown, Noriko Hosoi Platforms tested: F23 Flag Day: no Doc impact: no diff --git a/dirsrvtests/tests/tickets/ticket48906_test.py b/dirsrvtests/tests/tickets/ticket48906_test.py new file mode 100644 index 000000000..b201b1f6f --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48906_test.py @@ -0,0 +1,367 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +import shutil +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from ldap.controls import SimplePagedResultsControl +from ldap.controls.simple import GetEffectiveRightsControl +import fnmatch + +log = logging.getLogger(__name__) + +installation_prefix = None + +CONFIG_DN = 'cn=config' +RDN_VAL_SUFFIX = 'ticket48906.org' +MYSUFFIX = 'dc=%s' % RDN_VAL_SUFFIX +MYSUFFIXBE = 'ticket48906' + +SEARCHFILTER = '(objectclass=person)' + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 +DBLOCK_DEFAULT="10000" +DBLOCK_LDAP_UPDATE="20000" +DBLOCK_EDIT_UPDATE="40000" +DBLOCK_MIN_UPDATE=DBLOCK_DEFAULT +DBLOCK_ATTR_CONFIG="nsslapd-db-locks" +DBLOCK_ATTR_MONITOR="nsslapd-db-configured-locks" +DBLOCK_ATTR_GUARDIAN="locks" + +DBCACHE_DEFAULT="10000000" +DBCACHE_LDAP_UPDATE="20000000" +DBCACHE_EDIT_UPDATE="40000000" +DBCACHE_ATTR_CONFIG="nsslapd-dbcachesize" +DBCACHE_ATTR_GUARDIAN="cachesize" + +ldbm_config = "cn=config,%s" % (DN_LDBM) +ldbm_monitor = "cn=database,cn=monitor,%s" % (DN_LDBM) + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + [email protected](scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=True) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket48906_setup(topology): + """ + Check there is no core + Create a second backend + stop DS (that should trigger the core) + check there is no core + """ + log.info('Testing Ticket 48906 - ns-slapd crashes during the shutdown after adding attribute with a matching rule') + + # bind as directory manager + topology.standalone.log.info("Bind as %s" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + # check there is no core + entry = topology.standalone.search_s(CONFIG_DN, ldap.SCOPE_BASE, "(cn=config)",['nsslapd-workingdir']) + assert entry + assert entry[0] + assert entry[0].hasAttr('nsslapd-workingdir') + path = entry[0].getValue('nsslapd-workingdir') + cores = fnmatch.filter(os.listdir(path), 'core.*') + assert len(cores) == 0 + + + # add dummy entries on backend + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + topology.standalone.log.info("\n\n######################### SEARCH ALL ######################\n") + topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER) + topology.standalone.log.info("Returned %d entries.\n", len(entries)) + + assert MAX_OTHERS == len(entries) + + topology.standalone.log.info('%d person entries are successfully created under %s.' % (len(entries), SUFFIX)) + +def _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=None, required=False): + entries = topology.standalone.search_s(ldbm_config, ldap.SCOPE_BASE, 'cn=config') + if required: + assert(entries[0].hasValue(attr)) + elif entries[0].hasValue(attr): + assert(entries[0].getValue(attr) == expected_value) + +def _check_monitored_value(topology, expected_value): + entries = topology.standalone.search_s(ldbm_monitor, ldap.SCOPE_BASE, '(objectclass=*)') + assert(entries[0].hasValue(DBLOCK_ATTR_MONITOR) and entries[0].getValue(DBLOCK_ATTR_MONITOR) == expected_value) + +def _check_dse_ldif_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE): + dse_ref_ldif = topology.standalone.confdir + '/dse.ldif' + dse_ref = open(dse_ref_ldif, "r") + + # Check the DBLOCK in dse.ldif + value=None + while True: + line = dse_ref.readline() + if (line == ''): + break + elif attr in line.lower(): + value = line.split()[1] + assert(value == expected_value) + break + assert(value) + +def _check_guardian_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=None): + guardian_file = topology.standalone.dbdir + '/db/guardian' + assert(os.path.exists(guardian_file)) + guardian = open(guardian_file, "r") + + value=None + while True: + line = guardian.readline() + if (line == ''): + break + elif attr in line.lower(): + value = line.split(':')[1].replace("\n", "") + print "line" + print line + print "expected_value" + print expected_value + print "value" + print value + assert(str(value) == str(expected_value)) + break + assert(value) + +def test_ticket48906_dblock_default(topology): + topology.standalone.log.info('###################################') + topology.standalone.log.info('###') + topology.standalone.log.info('### Check that before any change config/monitor') + topology.standalone.log.info('### contains the default value') + topology.standalone.log.info('###') + topology.standalone.log.info('###################################') + _check_monitored_value(topology, DBLOCK_DEFAULT) + _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_DEFAULT, required=False) + _check_configured_value(topology, attr=DBCACHE_ATTR_CONFIG, expected_value=DBCACHE_DEFAULT, required=False) + + +def test_ticket48906_dblock_ldap_update(topology): + topology.standalone.log.info('###################################') + topology.standalone.log.info('###') + topology.standalone.log.info('### Check that after ldap update') + topology.standalone.log.info('### - monitor contains DEFAULT') + topology.standalone.log.info('### - configured contains DBLOCK_LDAP_UPDATE') + topology.standalone.log.info('### - After stop dse.ldif contains DBLOCK_LDAP_UPDATE') + topology.standalone.log.info('### - After stop guardian contains DEFAULT') + topology.standalone.log.info('### In fact guardian should differ from config to recreate the env') + topology.standalone.log.info('### Check that after restart (DBenv recreated)') + topology.standalone.log.info('### - monitor contains DBLOCK_LDAP_UPDATE ') + topology.standalone.log.info('### - configured contains DBLOCK_LDAP_UPDATE') + topology.standalone.log.info('### - dse.ldif contains DBLOCK_LDAP_UPDATE') + topology.standalone.log.info('###') + topology.standalone.log.info('###################################') + + topology.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, DBLOCK_LDAP_UPDATE)]) + _check_monitored_value(topology, DBLOCK_DEFAULT) + _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) + + topology.standalone.stop(timeout=10) + _check_dse_ldif_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE) + _check_guardian_value(topology, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_DEFAULT) + + # Check that the value is the same after restart and recreate + topology.standalone.start(timeout=10) + _check_monitored_value(topology, DBLOCK_LDAP_UPDATE) + _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) + _check_dse_ldif_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE) + +def test_ticket48906_dblock_edit_update(topology): + topology.standalone.log.info('###################################') + topology.standalone.log.info('###') + topology.standalone.log.info('### Check that after stop') + topology.standalone.log.info('### - dse.ldif contains DBLOCK_LDAP_UPDATE') + topology.standalone.log.info('### - guardian contains DBLOCK_LDAP_UPDATE') + topology.standalone.log.info('### Check that edit dse+restart') + topology.standalone.log.info('### - monitor contains DBLOCK_EDIT_UPDATE') + topology.standalone.log.info('### - configured contains DBLOCK_EDIT_UPDATE') + topology.standalone.log.info('### Check that after stop') + topology.standalone.log.info('### - dse.ldif contains DBLOCK_EDIT_UPDATE') + topology.standalone.log.info('### - guardian contains DBLOCK_EDIT_UPDATE') + topology.standalone.log.info('###') + topology.standalone.log.info('###################################') + + topology.standalone.stop(timeout=10) + _check_dse_ldif_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE) + _check_guardian_value(topology, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_LDAP_UPDATE) + + dse_ref_ldif = topology.standalone.confdir + '/dse.ldif' + dse_new_ldif = topology.standalone.confdir + '/dse.ldif.new' + dse_ref = open(dse_ref_ldif, "r") + dse_new = open(dse_new_ldif, "w") + + # Change the DBLOCK in dse.ldif + value=None + while True: + line = dse_ref.readline() + if (line == ''): + break + elif DBLOCK_ATTR_CONFIG in line.lower(): + value = line.split()[1] + assert(value == DBLOCK_LDAP_UPDATE) + new_value = [line.split()[0], DBLOCK_EDIT_UPDATE, ] + new_line = "%s\n" % " ".join(new_value) + else: + new_line = line + dse_new.write(new_line) + + assert(value) + dse_ref.close() + dse_new.close() + shutil.move(dse_new_ldif, dse_ref_ldif) + + # Check that the value is the same after restart + topology.standalone.start(timeout=10) + _check_monitored_value(topology, DBLOCK_EDIT_UPDATE) + _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_EDIT_UPDATE, required=True) + + topology.standalone.stop(timeout=10) + _check_dse_ldif_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_EDIT_UPDATE) + _check_guardian_value(topology, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_EDIT_UPDATE) + +def test_ticket48906_dblock_robust(topology): + topology.standalone.log.info('###################################') + topology.standalone.log.info('###') + topology.standalone.log.info('### Check that the following values are rejected') + topology.standalone.log.info('### - negative value') + topology.standalone.log.info('### - insuffisant value') + topology.standalone.log.info('### - invalid value') + topology.standalone.log.info('### Check that minimum value is accepted') + topology.standalone.log.info('###') + topology.standalone.log.info('###################################') + + topology.standalone.start(timeout=10) + _check_monitored_value(topology, DBLOCK_EDIT_UPDATE) + _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_EDIT_UPDATE, required=True) + + # Check negative value + try: + topology.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, "-1")]) + except ldap.UNWILLING_TO_PERFORM: + pass + _check_monitored_value(topology, DBLOCK_EDIT_UPDATE) + _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) + + # Check insuffisant value + too_small = int(DBLOCK_MIN_UPDATE) - 1 + try: + topology.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, str(too_small))]) + except ldap.UNWILLING_TO_PERFORM: + pass + _check_monitored_value(topology, DBLOCK_EDIT_UPDATE) + _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) + + # Check invalid value + try: + topology.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, "dummy")]) + except ldap.UNWILLING_TO_PERFORM: + pass + _check_monitored_value(topology, DBLOCK_EDIT_UPDATE) + _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) + + #now check the minimal value + topology.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, DBLOCK_MIN_UPDATE)]) + _check_monitored_value(topology, DBLOCK_EDIT_UPDATE) + _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_MIN_UPDATE, required=True) + + topology.standalone.stop(timeout=10) + _check_dse_ldif_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_MIN_UPDATE) + _check_guardian_value(topology, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_EDIT_UPDATE) + + topology.standalone.start(timeout=10) + _check_monitored_value(topology, DBLOCK_MIN_UPDATE) + _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_MIN_UPDATE, required=True) + +def text_ticket48906_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket48906_setup(topo) + test_ticket48906_dblock_default(topo) + test_ticket48906_dblock_ldap_update(topo) + test_ticket48906_dblock_edit_update(topo) + test_ticket48906_dblock_robust(topo) + test_ticket48906_final(topo) + + +if __name__ == '__main__': + run_isolated() + + diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h index 2d77a8aec..fe9e345c5 100644 --- a/ldap/servers/slapd/back-ldbm/back-ldbm.h +++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h @@ -537,6 +537,7 @@ struct ldbminfo { char *li_directory; int li_reslimit_lookthrough_handle; size_t li_dbcachesize; + int li_dblock; int li_dbncache; int li_import_cache_autosize; /* % of free memory to use * for the import caches @@ -566,11 +567,12 @@ struct ldbminfo { int li_noparentcheck; /* check if parent exists on * add */ - /* the next 2 fields are for the params that don't get changed until + /* the next 3 fields are for the params that don't get changed until * the server is restarted (used by the admin console) */ char *li_new_directory; size_t li_new_dbcachesize; + int li_new_dblock; int li_new_dbncache; diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c index 3c07316fe..95b37dd1a 100644 --- a/ldap/servers/slapd/back-ldbm/dblayer.c +++ b/ldap/servers/slapd/back-ldbm/dblayer.c @@ -1347,6 +1347,7 @@ dblayer_start(struct ldbminfo *li, int dbmode) * li_directory needs to live beyond dblayer. */ priv->dblayer_home_directory = slapi_ch_strdup(li->li_directory); priv->dblayer_cachesize = li->li_dbcachesize; + priv->dblayer_lock_config = li->li_dblock; priv->dblayer_file_mode = li->li_mode; priv->dblayer_ncache = li->li_dbncache; PR_Unlock(li->li_config_mutex); @@ -1482,13 +1483,13 @@ dblayer_start(struct ldbminfo *li, int dbmode) } if (priv->dblayer_lock_config != priv->dblayer_previous_lock_config) { /* - * The default value of nsslapd-db-locks is 10000. + * The default value of nsslapd-db-locks is BDB_LOCKS_MIN. * We don't allow lower value than that. */ - if (priv->dblayer_lock_config <= 10000) { - LDAPDebug0Args(LDAP_DEBUG_ANY, LOG_ERR, "New max db lock count is too small. " - "Resetting it to the default value 10000.\n"); - priv->dblayer_lock_config = 10000; + if (priv->dblayer_lock_config <= BDB_LOCK_NB_MIN) { + LDAPDebug1Arg(LDAP_DEBUG_ANY, LOG_ERR, "New max db lock count is too small. " + "Resetting it to the default value %d.\n", BDB_LOCK_NB_MIN); + priv->dblayer_lock_config = BDB_LOCK_NB_MIN; } if (priv->dblayer_lock_config != priv->dblayer_previous_lock_config) { LDAPDebug(LDAP_DEBUG_ANY, LOG_ERR, "resizing max db lock count: %d -> %d\n", @@ -6658,6 +6659,7 @@ int dblayer_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task, char * slapi_ch_free_string(&priv->dblayer_home_directory); priv->dblayer_home_directory = slapi_ch_strdup(li->li_directory); priv->dblayer_cachesize = li->li_dbcachesize; + priv->dblayer_lock_config = li->li_dblock; priv->dblayer_ncache = li->li_dbncache; priv->dblayer_file_mode = li->li_mode; PR_Unlock(li->li_config_mutex); diff --git a/ldap/servers/slapd/back-ldbm/dblayer.h b/ldap/servers/slapd/back-ldbm/dblayer.h index fd1e04388..e02e6e0b0 100644 --- a/ldap/servers/slapd/back-ldbm/dblayer.h +++ b/ldap/servers/slapd/back-ldbm/dblayer.h @@ -148,6 +148,7 @@ struct dblayer_private PRLock *thread_count_lock; /* lock for thread_count_cv */ PRCondVar *thread_count_cv; /* condition variable for housekeeping thread shutdown */ int dblayer_lockdown; /* use DB_LOCKDOWN */ +#define BDB_LOCK_NB_MIN 10000 int dblayer_lock_config; int dblayer_previous_lock_config;/* Max lock count when we last shut down-- * used to determine if we delete the mpool */ diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c index 351dccec6..e843aaef9 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_config.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c @@ -1009,7 +1009,7 @@ static void *ldbm_config_db_lock_get(void *arg) { struct ldbminfo *li = (struct ldbminfo *) arg; - return (void *) ((uintptr_t)li->li_dblayer_private->dblayer_lock_config); + return (void *) ((uintptr_t)li->li_new_dblock); } @@ -1019,12 +1019,20 @@ static int ldbm_config_db_lock_set(void *arg, void *value, char *errorbuf, int p int retval = LDAP_SUCCESS; size_t val = (size_t) value; + if (val < BDB_LOCK_NB_MIN) { + slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: Invalid value for %s (%d). Must be greater than %d\n", + CONFIG_DB_LOCK, val, BDB_LOCK_NB_MIN); + LDAPDebug2Args(LDAP_DEBUG_ANY, LOG_ERR, "Error: Invalid value for %s (%d)\n", + CONFIG_DB_LOCK, val); + return LDAP_UNWILLING_TO_PERFORM; + } if (apply) { if (CONFIG_PHASE_RUNNING == phase) { - li->li_dblayer_private->dblayer_lock_config = val; + li->li_new_dblock = val; LDAPDebug(LDAP_DEBUG_ANY, LOG_ERR, "New db max lock count will not take affect until the server is restarted\n", 0, 0, 0); } else { - li->li_dblayer_private->dblayer_lock_config = val; + li->li_new_dblock = val; + li->li_dblock = val; } } @@ -1508,7 +1516,7 @@ static config_info ldbm_config[] = { {CONFIG_DB_VERBOSE, CONFIG_TYPE_ONOFF, "off", &ldbm_config_db_verbose_get, &ldbm_config_db_verbose_set, 0}, {CONFIG_DB_DEBUG, CONFIG_TYPE_ONOFF, "on", &ldbm_config_db_debug_get, &ldbm_config_db_debug_set, 0}, {CONFIG_DB_NAMED_REGIONS, CONFIG_TYPE_ONOFF, "off", &ldbm_config_db_named_regions_get, &ldbm_config_db_named_regions_set, 0}, - {CONFIG_DB_LOCK, CONFIG_TYPE_INT, "10000", &ldbm_config_db_lock_get, &ldbm_config_db_lock_set, CONFIG_FLAG_ALWAYS_SHOW}, + {CONFIG_DB_LOCK, CONFIG_TYPE_INT, "10000", &ldbm_config_db_lock_get, &ldbm_config_db_lock_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, {CONFIG_DB_PRIVATE_MEM, CONFIG_TYPE_ONOFF, "off", &ldbm_config_db_private_mem_get, &ldbm_config_db_private_mem_set, 0}, {CONFIG_DB_PRIVATE_IMPORT_MEM, CONFIG_TYPE_ONOFF, "on", &ldbm_config_db_private_import_mem_get, &ldbm_config_db_private_import_mem_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, {CONDIF_DB_ONLINE_IMPORT_ENCRYPT, CONFIG_TYPE_ONOFF, "on", &ldbm_config_db_online_import_encrypt_get, &ldbm_config_db_online_import_encrypt_set, 0},
0
b74780e4dbbc9c3ef894accc3c9696a1a2a8bc4b
389ds/389-ds-base
Ticket 50649 - lib389 without defaults.inf Bug Description: In deploying 389-ds-portal to a container, I notice that lib389 would fail with "unable to locate defaults.inf". This means we need some way to "proceed" sanely when we can't find defaults.inf. Fix Description: Re-arrange some parts of the init functions to make subobjects init later, and have paths able to access versions online instead. This also flags in paths if we are local or remote. https://pagure.io/389-ds-base/issue/50649 Author: William Brown <[email protected]> Review by: spichugi (Thanks!)
commit b74780e4dbbc9c3ef894accc3c9696a1a2a8bc4b Author: William Brown <[email protected]> Date: Fri Nov 1 09:17:26 2019 +1000 Ticket 50649 - lib389 without defaults.inf Bug Description: In deploying 389-ds-portal to a container, I notice that lib389 would fail with "unable to locate defaults.inf". This means we need some way to "proceed" sanely when we can't find defaults.inf. Fix Description: Re-arrange some parts of the init functions to make subobjects init later, and have paths able to access versions online instead. This also flags in paths if we are local or remote. https://pagure.io/389-ds-base/issue/50649 Author: William Brown <[email protected]> Review by: spichugi (Thanks!) diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py index 0d1ab5747..4f4705e91 100644 --- a/src/lib389/lib389/__init__.py +++ b/src/lib389/lib389/__init__.py @@ -402,10 +402,11 @@ class DirSrv(SimpleLDAPObject, object): self.confdir = None - self.ds_paths = Paths(instance=self) + # We can't assume the paths state yet ... + self.ds_paths = Paths(instance=self, local=False) # Set the default systemd status. This MAY be overidden in the setup utils # as required. - self.systemd = self.ds_paths.with_systemd + # self.systemd = self.ds_paths.with_systemd # Reset the args (py.test reuses the args_instance for each test case) # We allocate a "default" prefix here which allows an un-allocate or @@ -416,7 +417,6 @@ class DirSrv(SimpleLDAPObject, object): # self.ds_paths.prefix = args_instance[SER_DEPLOYED_DIR] self.__wrapmethods() - self.__add_brookers__() def __str__(self): """XXX and in SSL case?""" @@ -441,7 +441,7 @@ class DirSrv(SimpleLDAPObject, object): # The lack of this value basically rules it out in most cases self.isLocal = True - self.ds_paths = Paths(serverid, instance=self) + self.ds_paths = Paths(serverid, instance=self, local=self.isLocal) self.serverid = serverid # Do we have ldapi settings? @@ -487,7 +487,7 @@ class DirSrv(SimpleLDAPObject, object): self.log.debug('SER_SERVERID_PROP not provided, assuming non-local instance') # The lack of this value basically rules it out in most cases self.isLocal = False - self.ds_paths = Paths(instance=self) + self.ds_paths = Paths(instance=self, local=self.isLocal) # Do we have ldapi settings? # Do we really need .strip() on this? @@ -532,13 +532,13 @@ class DirSrv(SimpleLDAPObject, object): raise ValueError("invalid state for calling allocate: %s" % self.state) - self.isLocal = False + self.isLocal = True if SER_SERVERID_PROP not in args: self.log.debug('SER_SERVERID_PROP not provided, assuming non-local instance') # The lack of this value basically rules it out in most cases - self.ds_paths = Paths(instance=self) + self.ds_paths = Paths(instance=self, local=self.isLocal) else: - self.ds_paths = Paths(serverid=args[SER_SERVERID_PROP], instance=self) + self.ds_paths = Paths(serverid=args[SER_SERVERID_PROP], instance=self, local=self.isLocal) # Settings from args of server attributes self.serverid = args.get(SER_SERVERID_PROP, None) # Probably local? @@ -1082,9 +1082,11 @@ class DirSrv(SimpleLDAPObject, object): Authenticated, now finish the initialization """ self.log.debug("open(): bound as %s", self.binddn) - if not connOnly: + if not connOnly and self.isLocal: self.__initPart2() self.state = DIRSRV_STATE_ONLINE + # Now that we're online, some of our methods may try to query the version online. + self.__add_brookers__() def close(self): ''' @@ -1710,7 +1712,7 @@ class DirSrv(SimpleLDAPObject, object): return self.ds_paths.asan_enabled def with_systemd(self): - return self.systemd + return self.ds_paths.with_systemd def get_server_tls_subject(self): """ Get the servers TLS subject line for enrollment purposes. diff --git a/src/lib389/lib389/idm/user.py b/src/lib389/lib389/idm/user.py index 4d3acf8e8..9922feda7 100644 --- a/src/lib389/lib389/idm/user.py +++ b/src/lib389/lib389/idm/user.py @@ -53,7 +53,7 @@ class nsUserAccount(Account): :type dn: str """ def __init__(self, instance, dn=None): - if ds_is_older('1.4.0'): + if ds_is_older('1.4.0', instance=instance): raise Exception("Not supported") super(nsUserAccount, self).__init__(instance, dn) self._rdn_attribute = RDN @@ -154,11 +154,11 @@ class UserAccount(Account): 'inetOrgPerson', 'organizationalPerson', ] - if ds_is_older('1.3.7'): + if ds_is_older('1.3.7', instance=instance): self._create_objectclasses.append('inetUser') else: self._create_objectclasses.append('nsMemberOf') - if not ds_is_older('1.4.0'): + if not ds_is_older('1.4.0', instance=instance): self._create_objectclasses.append('nsAccount') user_compare_exclude = [ 'nsUniqueId', diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py index 073c7c79d..c1a85e595 100644 --- a/src/lib389/lib389/instance/setup.py +++ b/src/lib389/lib389/instance/setup.py @@ -807,8 +807,8 @@ class SetupDs(object): # Should I move this import? I think this prevents some recursion from lib389 import DirSrv ds_instance = DirSrv(self.verbose) - if self.containerised: - ds_instance.systemd = general['systemd'] + # if self.containerised: + # ds_instance.systemd = general['systemd'] args = { SER_PORT: slapd['port'], SER_SERVERID_PROP: slapd['instance_name'], diff --git a/src/lib389/lib389/monitor.py b/src/lib389/lib389/monitor.py index 290cad5e2..283c9fbe8 100644 --- a/src/lib389/lib389/monitor.py +++ b/src/lib389/lib389/monitor.py @@ -118,7 +118,7 @@ class MonitorLDBM(DSLdapObject): 'dbcacheroevict', 'dbcacherwevict', ] - if not ds_is_older("1.4.0"): + if not ds_is_older("1.4.0", instance=instance): self._backend_keys.extend([ 'normalizeddncachetries', 'normalizeddncachehits', diff --git a/src/lib389/lib389/paths.py b/src/lib389/lib389/paths.py index b1734ed22..ebada88b4 100644 --- a/src/lib389/lib389/paths.py +++ b/src/lib389/lib389/paths.py @@ -82,13 +82,14 @@ CONFIG_MAP = { 'access_log' : ('cn=config', 'nsslapd-accesslog'), 'audit_log' : ('cn=config', 'nsslapd-auditlog'), 'ldapi': ('cn=config', 'nsslapd-ldapifilepath'), + 'version': ('', 'vendorVersion'), } SECTION = 'slapd' class Paths(object): - def __init__(self, serverid=None, instance=None): + def __init__(self, serverid=None, instance=None, local=True): """ Parses and uses a set of default paths from wellknown locations. The list of keys available is from the MUST attribute in this module. @@ -113,6 +114,7 @@ class Paths(object): self._config = None self._serverid = serverid self._instance = instance + self._islocal = local def _get_defaults_loc(self, search_paths): ## THIS IS HOW WE HANDLE A PREFIX INSTALL @@ -148,7 +150,7 @@ class Paths(object): def __getattr__(self, name): from lib389.utils import ensure_str - if self._defaults_cached is False: + if self._defaults_cached is False and self._islocal: self._read_defaults() self._validate_defaults() # Are we online? Is our key in the config map? @@ -158,8 +160,14 @@ class Paths(object): ent = self._instance.getEntry(dn, attrlist=[attr,]) # If the server doesn't have it, fall back to our configuration. if attr is not None: - return ensure_str(ent.getValue(attr)) - + v = ensure_str(ent.getValue(attr)) + # Do we need to post-process the value? + if name == 'version': + # We need to post process this - it's 389-Directory/1.4.2.2.20191031git8166d8345 B2019.304.19 + # but we need a string like: 1.4.2.2.20191031git8166d8345 + v = v.split('/')[1].split()[0] + return v + # Else get from the config if self._serverid is not None: return ensure_str(self._config.get(SECTION, name).format(instance_name=self._serverid)) else: @@ -167,7 +175,7 @@ class Paths(object): @property def asan_enabled(self): - if self._defaults_cached is False: + if self._defaults_cached is False and self._islocal: self._read_defaults() self._validate_defaults() if self._config.has_option(SECTION, 'asan_enabled'): @@ -177,7 +185,7 @@ class Paths(object): @property def with_systemd(self): - if self._defaults_cached is False: + if self._defaults_cached is False and self._islocal: self._read_defaults() self._validate_defaults() if self._is_container: @@ -190,10 +198,10 @@ class Paths(object): @property def perl_enabled(self): - if self._defaults_cached is False: + if self._defaults_cached is False and self._islocal: self._read_defaults() self._validate_defaults() if self._config.has_option(SECTION, 'enable_perl'): - if self._config.get(SECTION, 'enable_perl') == 'no': - return False - return True + if self._config.get(SECTION, 'enable_perl') == 'yes': + return True + return False diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py index 36422dd2c..f0c94a29a 100644 --- a/src/lib389/lib389/utils.py +++ b/src/lib389/lib389/utils.py @@ -1060,7 +1060,7 @@ def generate_ds_params(inst_num, role=ReplicaRole.STANDALONE): return instance_data -def get_ds_version(): +def get_ds_version(paths=None): """ Return version of ns-slapd installed on this system. This is determined by the defaults.inf file, so is correct for the built and installed ns-slapd binary. This function works without @@ -1068,16 +1068,21 @@ def get_ds_version(): returns a string of the form: 1.3.4.8 """ - p = Paths() - return p.version + if paths is None: + paths = Paths() + return paths.version -def ds_is_related(relation, ds_ver, *ver): +def ds_is_related(relation, *ver, instance=None): """ Return a result of a comparison between the current version of ns-slapd and a provided version. """ ops = {'older': operator.lt, 'newer': operator.ge} + if instance is None: + ds_ver = get_ds_version() + else: + ds_ver = get_ds_version(instance.ds_paths) if len(ver) > 1: for cmp_ver in ver: if cmp_ver.startswith(ds_ver[:3]): @@ -1086,20 +1091,18 @@ def ds_is_related(relation, ds_ver, *ver): return ops[relation](LegacyVersion(ds_ver), LegacyVersion(ver[0])) -def ds_is_older(*ver): +def ds_is_older(*ver, instance=None): """ Return True if the current version of ns-slapd is older than a provided version """ - ds_ver = get_ds_version() - return ds_is_related('older', ds_ver, *ver) + return ds_is_related('older', *ver, instance=instance) -def ds_is_newer(*ver): +def ds_is_newer(*ver, instance=None): """ Return True if the current version of ns-slapd is newer than a provided version """ - ds_ver = get_ds_version() - return ds_is_related('newer', ds_ver, *ver) + return ds_is_related('newer', *ver, instance=instance) def gentime_to_datetime(gentime):
0
5ed5f873dc21e6c358ac70b6e21d710fdb391ce2
389ds/389-ds-base
Ticket 50056 - Fix CLI/UI bugs Description: Fix several issues discovered during QE testing https://bugzilla.redhat.com/show_bug.cgi?id=1654101 - dscreate issues https://bugzilla.redhat.com/show_bug.cgi?id=1654585 - dsidm sys ext error https://bugzilla.redhat.com/show_bug.cgi?id=1654105 - dsconf related issues https://bugzilla.redhat.com/show_bug.cgi?id=1654116 - dsctl remove "confirm with "Yes" https://bugzilla.redhat.com/show_bug.cgi?id=1654134 - backups fixed https://bugzilla.redhat.com/show_bug.cgi?id=1654451 - dscreate permissions and selinux issues https://bugzilla.redhat.com/show_bug.cgi?id=1654566 - dbtasks no attr _instance https://bugzilla.redhat.com/show_bug.cgi?id=1631461 - selinux reserved ports https://bugzilla.redhat.com/show_bug.cgi?id=1654518 - issues with selinux ports https://bugzilla.redhat.com/show_bug.cgi?id=1654581 - dsidm sys ext error https://bugzilla.redhat.com/show_bug.cgi?id=1654577 - check if backup already exists https://bugzilla.redhat.com/show_bug.cgi?id=1654693 - add password option for dsconf tools https://pagure.io/389-ds-base/issue/50056 - dscreate defaults for instance name https://pagure.io/389-ds-base/issue/50056 Reviewed by: spichugi(Thanks!)
commit 5ed5f873dc21e6c358ac70b6e21d710fdb391ce2 Author: Mark Reynolds <[email protected]> Date: Wed Dec 5 15:35:59 2018 -0500 Ticket 50056 - Fix CLI/UI bugs Description: Fix several issues discovered during QE testing https://bugzilla.redhat.com/show_bug.cgi?id=1654101 - dscreate issues https://bugzilla.redhat.com/show_bug.cgi?id=1654585 - dsidm sys ext error https://bugzilla.redhat.com/show_bug.cgi?id=1654105 - dsconf related issues https://bugzilla.redhat.com/show_bug.cgi?id=1654116 - dsctl remove "confirm with "Yes" https://bugzilla.redhat.com/show_bug.cgi?id=1654134 - backups fixed https://bugzilla.redhat.com/show_bug.cgi?id=1654451 - dscreate permissions and selinux issues https://bugzilla.redhat.com/show_bug.cgi?id=1654566 - dbtasks no attr _instance https://bugzilla.redhat.com/show_bug.cgi?id=1631461 - selinux reserved ports https://bugzilla.redhat.com/show_bug.cgi?id=1654518 - issues with selinux ports https://bugzilla.redhat.com/show_bug.cgi?id=1654581 - dsidm sys ext error https://bugzilla.redhat.com/show_bug.cgi?id=1654577 - check if backup already exists https://bugzilla.redhat.com/show_bug.cgi?id=1654693 - add password option for dsconf tools https://pagure.io/389-ds-base/issue/50056 - dscreate defaults for instance name https://pagure.io/389-ds-base/issue/50056 Reviewed by: spichugi(Thanks!) diff --git a/src/cockpit/389-console/src/servers.js b/src/cockpit/389-console/src/servers.js index 8dbd1bff5..10c471582 100644 --- a/src/cockpit/389-console/src/servers.js +++ b/src/cockpit/389-console/src/servers.js @@ -404,6 +404,39 @@ function save_config() { } } +function do_backup(server_inst, backup_name) { + var cmd = ['status-dirsrv', server_inst]; + $("#backup-spinner").show(); + cockpit.spawn(cmd, { superuser: true}). + done(function() { + var cmd = [DSCONF, server_inst, 'backup', 'create', backup_name]; + log_cmd('#ds-backup-btn (click)', 'Backup server instance', cmd); + cockpit.spawn(cmd, { superuser: true, "err": "message", "environ": [ENV]}). + done(function(data) { + $("#backup-spinner").hide(); + popup_success("Backup has been created"); + $("#backup-form").modal('toggle'); + }). + fail(function(data) { + $("#backup-spinner").hide(); + popup_err("Failed to backup the server", data.message); + }) + }). + fail(function() { + var cmd = [DSCTL, server_inst, 'db2bak', backup_name]; + log_cmd('#ds-backup-btn (click)', 'Backup server instance (offline)', cmd); + cockpit.spawn(cmd, { superuser: true, "err": "message", "environ": [ENV]}). + done(function(data) { + $("#backup-spinner").hide(); + popup_success("Backup has been created"); + $("#backup-form").modal('toggle'); + }). + fail(function(data) { + $("#backup-spinner").hide(); + popup_err("Failed to backup the server", data.message); + }); + }); +} /* * load the server config pages @@ -1253,36 +1286,30 @@ $(document).ready( function() { "Backups are written to the server's backup directory (nsslapd-bakdir)"); return; } - var cmd = ['status-dirsrv', server_inst]; - $("#backup-spinner").show(); - cockpit.spawn(cmd, { superuser: true}). - done(function() { - var cmd = [DSCONF, server_inst, 'backup', 'create', backup_name]; - log_cmd('#ds-backup-btn (click)', 'Backup server instance', cmd); - cockpit.spawn(cmd, { superuser: true, "err": "message", "environ": [ENV]}). - done(function(data) { - $("#backup-spinner").hide(); - popup_success("Backup has been created"); - $("#backup-form").modal('toggle'); - }). - fail(function(data) { - $("#backup-spinner").hide(); - popup_err("Failed to backup the server", data.message); - }) - }). - fail(function() { - var cmd = [DSCTL, server_inst, 'db2bak', backup_name]; - log_cmd('#ds-backup-btn (click)', 'Backup server instance (offline)', cmd); - cockpit.spawn(cmd, { superuser: true, "err": "message", "environ": [ENV]}). - done(function(data) { - $("#backup-spinner").hide(); - popup_success("Backup has been created"); - $("#backup-form").modal('toggle'); - }). - fail(function(data) { - $("#backup-spinner").hide(); - popup_err("Failed to backup the server", data.message); - }); + + // First check if backup name is already used + var check_cmd = [DSCTL, '-j', server_id, 'backups']; + log_cmd('#restore-server-btn (click)', 'Restore server instance', check_cmd); + cockpit.spawn(check_cmd, { superuser: true, "err": "message", "environ": [ENV]}).done(function(data) { + var obj = JSON.parse(data); + var found_backup = false; + for (var i = 0; i < obj.items.length; i++) { + if (obj.items[i][0] == backup_name) { + found_backup = true; + break; + } + } + if (found_backup) { + popup_confirm("A backup already exists with this name, replace it?", "Confirmation", function (yes) { + if (yes) { + do_backup(server_inst, backup_name); + } else { + return; + } + }); + } else { + do_backup(server_inst, backup_name); + } }); }); @@ -1292,7 +1319,7 @@ $(document).ready( function() { /* Restore. load restore table with current backups */ $("#restore-server-btn").on('click', function () { - var cmd = [DSCTL, server_id, '-j', 'backups']; + var cmd = [DSCTL, '-j', server_id, 'backups']; log_cmd('#restore-server-btn (click)', 'Restore server instance', cmd); cockpit.spawn(cmd, { superuser: true, "err": "message", "environ": [ENV]}).done(function(data) { var backup_btn = "<button class=\"btn btn-default restore-btn\" type=\"button\">Restore</button>"; diff --git a/src/lib389/cli/dsconf b/src/lib389/cli/dsconf index c96346724..33b3b5a4b 100755 --- a/src/lib389/cli/dsconf +++ b/src/lib389/cli/dsconf @@ -51,6 +51,18 @@ parser.add_argument('-D', '--binddn', help="The account to bind as for executing operations", default=None ) +parser.add_argument('-w', '--bindpw', + help="Password for binddn", + default=None + ) +parser.add_argument('-W', '--prompt', + action='store_true', default=False, + help="Prompt for password for the bind DN" + ) +parser.add_argument('-y', '--pwdfile', + help="Specifies a file containing the password for the binddn", + default=None + ) parser.add_argument('-b', '--basedn', help="Basedn (root naming context) of the instance to manage", default=None @@ -126,7 +138,7 @@ if __name__ == '__main__': inst = None result = False try: - inst = connect_instance(dsrc_inst=dsrc_inst, verbose=args.verbose) + inst = connect_instance(dsrc_inst=dsrc_inst, verbose=args.verbose, args=args) result = args.func(inst, None, log, args) if args.verbose: log.info("Command successful.") diff --git a/src/lib389/cli/dsidm b/src/lib389/cli/dsidm index e67a08152..386bc11ca 100755 --- a/src/lib389/cli/dsidm +++ b/src/lib389/cli/dsidm @@ -43,6 +43,18 @@ parser.add_argument('-D', '--binddn', help="The account to bind as for executing operations", default=None ) +parser.add_argument('-w', '--bindpw', + help="Password for binddn", + default=None + ) +parser.add_argument('-W', '--prompt', + action='store_true', default=False, + help="Prompt for password for binddn" + ) +parser.add_argument('-y', '--pwdfile', + help="Specifies a file containing the password for the bind DN", + default=None + ) parser.add_argument('-Z', '--starttls', help="Connect with StartTLS", default=False, action='store_true' @@ -98,7 +110,7 @@ if __name__ == '__main__': if dsrc_inst['basedn'] is None: log.error("Must provide a basedn!") - sys.ext(1) + sys.exit(1) if not args.verbose: signal.signal(signal.SIGINT, signal_handler) @@ -109,7 +121,7 @@ if __name__ == '__main__': inst = None result = False try: - inst = connect_instance(dsrc_inst=dsrc_inst, verbose=args.verbose) + inst = connect_instance(dsrc_inst=dsrc_inst, verbose=args.verbose, args=args) result = args.func(inst, dsrc_inst['basedn'], log, args) if args.verbose: log.info("Command successful.") diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py index ad09fc458..62fc3243a 100644 --- a/src/lib389/lib389/__init__.py +++ b/src/lib389/lib389/__init__.py @@ -2856,9 +2856,9 @@ class DirSrv(SimpleLDAPObject, object): cmd.append('-a') tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") if bename: - ldifname = os.path.join(self._instance.ds_paths.ldif_dir, "%s-%s-%s.ldif" % (self._instance.serverid, be_name, tnow)) + ldifname = os.path.join(self.ds_paths.ldif_dir, "%s-%s-%s.ldif" % (self.serverid, be_name, tnow)) else: - ldifname = os.path.join(self._instance.ds_paths.ldif_dir, "%s-%s.ldif" % (self._instance.serverid, tnow)) + ldifname = os.path.join(self.ds_paths.ldif_dir, "%s-%s.ldif" % (self.serverid, tnow)) cmd.append(ldifname) try: result = subprocess.check_output(cmd, encoding='utf-8') diff --git a/src/lib389/lib389/cli_base/__init__.py b/src/lib389/lib389/cli_base/__init__.py index 664a9beb9..b4acb499a 100644 --- a/src/lib389/lib389/cli_base/__init__.py +++ b/src/lib389/lib389/cli_base/__init__.py @@ -79,7 +79,7 @@ def _warn(data, msg=None): return data -def connect_instance(dsrc_inst, verbose): +def connect_instance(dsrc_inst, verbose, args): dsargs = dsrc_inst['args'] if '//' not in dsargs['ldapurl']: # Connecting to the local instance @@ -99,10 +99,25 @@ def connect_instance(dsrc_inst, verbose): raise ValueError("Could not find configuration for instance: " + dsargs['ldapurl']) ds = DirSrv(verbose=verbose) ds.allocate(dsargs) - if not ds.can_autobind() and dsrc_inst['binddn'] is not None: + + if args.pwdfile is not None or args.bindpw is not None or args.prompt is True: + if args.pwdfile is not None: + # Read password from file + try: + with open(args.pwdfile, "r") as f: + dsargs[SER_ROOT_PW] = f.readline().rstrip() + except EnvironmentError as e: + raise ValueError("Failed to open password file: " + str(e)) + elif args.bindpw is not None: + # Password provided + dsargs[SER_ROOT_PW] = args.bindpw + else: + # No password or we chose to prompt + dsargs[SER_ROOT_PW] = getpass("Enter password for {} on {}: ".format(dsrc_inst['binddn'], dsrc_inst['uri'])) + elif not ds.can_autobind(): + # No LDAPI, prompt for password dsargs[SER_ROOT_PW] = getpass("Enter password for {} on {}: ".format(dsrc_inst['binddn'], dsrc_inst['uri'])) - elif not ds.can_autobind() and dsrc_inst['binddn'] is None: - raise Exception("Must provide a binddn to connect with") + ds.allocate(dsargs) ds.open(saslmethod=dsrc_inst['saslmech'], certdir=dsrc_inst['tls_cacertdir'], diff --git a/src/lib389/lib389/cli_conf/plugin.py b/src/lib389/lib389/cli_conf/plugin.py index 8f5943393..39a9feda6 100644 --- a/src/lib389/lib389/cli_conf/plugin.py +++ b/src/lib389/lib389/cli_conf/plugin.py @@ -102,9 +102,11 @@ def plugin_enable(inst, basedn, log, args): dn = _get_arg(args.dn, msg="Enter plugin dn to enable") mc = MANY(inst, basedn) o = mc.get(dn=dn) - o.enable() - o_str = o.display() - print('Enabled %s', o_str) + if o.status(): + print('Plugin already enabled') + else: + o.enable() + print('Enabled plugin') # Plugin disable @@ -114,9 +116,11 @@ def plugin_disable(inst, basedn, log, args, warn=True): _warn(dn, msg="Disabling %s %s" % (SINGULAR.__name__, dn)) mc = MANY(inst, basedn) o = mc.get(dn=dn) - o.disable() - o_str = o.display() - print('Disabled %s', o_str) + if not o.state(): + print("Plugin already disabled") + else: + o.disable() + print('Disabled plugin') # Plugin configure? @@ -132,22 +136,28 @@ def generic_show(inst, basedn, log, args): def generic_enable(inst, basedn, log, args): plugin = args.plugin_cls(inst) - plugin.enable() - print("Enabled %s", plugin.rdn) + if plugin.status(): + print("Plugin '%s' already enabled" % plugin.rdn) + else: + plugin.enable() + print("Enabled plugin '%s'" % plugin.rdn) def generic_disable(inst, basedn, log, args): plugin = args.plugin_cls(inst) - plugin.disable() - print("Disabled %s", plugin.rdn) + if not plugin.status(): + print("Plugin '%s' already disabled " % plugin.rdn) + else: + plugin.disable() + print("Disabled plugin '%s'" % plugin.rdn) def generic_status(inst, basedn, log, args): plugin = args.plugin_cls(inst) if plugin.status() is True: - print("%s is enabled", plugin.rdn) + print("Plugin '%s' is enabled" % plugin.rdn) else: - print("%s is disabled", plugin.rdn) + print("Plugin '%s' is disabled" % plugin.rdn) def add_generic_plugin_parsers(subparser, plugin_cls): diff --git a/src/lib389/lib389/cli_ctl/instance.py b/src/lib389/lib389/cli_ctl/instance.py index 4825a6160..ae173071d 100644 --- a/src/lib389/lib389/cli_ctl/instance.py +++ b/src/lib389/lib389/cli_ctl/instance.py @@ -135,7 +135,7 @@ def instance_remove_all(log, args): inst_names = get_instance_list(args.remove_all) if len(inst_names) > 0: - answer = input("Are you sure you want to remove all the Directory Server instances? (Yes/no): ") + answer = input("Are you sure you want to remove all the Directory Server instances? Enter \"Yes\" to continue: ") if answer != 'Yes': print("Aborted removal of all instances") return diff --git a/src/lib389/lib389/instance/remove.py b/src/lib389/lib389/instance/remove.py index 21da083e7..70ebfcc11 100644 --- a/src/lib389/lib389/instance/remove.py +++ b/src/lib389/lib389/instance/remove.py @@ -12,7 +12,7 @@ import subprocess from lib389.utils import selinux_label_port -def remove_ds_instance(dirsrv): +def remove_ds_instance(dirsrv, force=False): """ This will delete the instance as it is define. This must be a local instance. """ @@ -37,6 +37,7 @@ def remove_ds_instance(dirsrv): remove_paths['log_dir'] = dirsrv.ds_paths.log_dir # remove_paths['run_dir'] = dirsrv.ds_paths.run_dir remove_paths['tmpfiles_d'] = dirsrv.ds_paths.tmpfiles_d + "/dirsrv-" + dirsrv.serverid + ".conf" + remove_paths['inst_dir'] = dirsrv.ds_paths.inst_dir marker_path = "%s/sysconfig/dirsrv-%s" % (dirsrv.ds_paths.sysconf_dir, dirsrv.serverid) @@ -66,8 +67,9 @@ def remove_ds_instance(dirsrv): for path_k in remove_paths: _log.debug("Removing %s" % remove_paths[path_k]) shutil.rmtree(remove_paths[path_k], ignore_errors=True) + # Remove parent (/var/lib/dirsrv/slapd-INST) - shutil.rmtree(remove_paths['db_dir'].replace('db', '')) + shutil.rmtree(remove_paths['db_dir'].replace('db', ''), ignore_errors=True) # Finally remove the sysconfig marker. os.remove(marker_path) @@ -78,8 +80,16 @@ def remove_ds_instance(dirsrv): subprocess.check_call(["systemctl", "disable", "dirsrv@{}".format(dirsrv.serverid)]) # Remove selinux port label - if dirsrv.port is not None: + _log.debug("Removing the port label") + try: selinux_label_port(dirsrv.port, remove_label=True) + except ValueError as e: + if force: + pass + else: + _log.error(str(e)) + raise e + if dirsrv.sslport is not None: selinux_label_port(dirsrv.sslport, remove_label=True) diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py index 2033f5cfb..1a25a4ee6 100644 --- a/src/lib389/lib389/instance/setup.py +++ b/src/lib389/lib389/instance/setup.py @@ -16,7 +16,8 @@ import socket import subprocess import getpass import configparser -from lib389 import _ds_shutil_copytree +import selinux +from lib389 import _ds_shutil_copytree, DirSrv from lib389._constants import * from lib389.properties import * from lib389.passwd import password_hash, password_generate @@ -26,6 +27,7 @@ from lib389.configurations.sample import create_base_domain from lib389.instance.options import General2Base, Slapd2Base, Backend2Base from lib389.paths import Paths from lib389.saslmap import SaslMappings +from lib389.instance.remove import remove_ds_instance from lib389.utils import ( assert_c, is_a_dn, @@ -202,6 +204,36 @@ class SetupDs(object): else: assert_c(False, "Unsupported config_version in section [general]") + def _remove_failed_install(self, serverid): + """The install failed, remove the scraps + :param serverid - The server ID of the instance + """ + inst = DirSrv() + + # Allocate the instance based on name + insts = [] + insts = inst.list(serverid=serverid) + + if len(insts) != 1: + log.error("No such instance to remove {}".format(serverid)) + return + inst.allocate(insts[0]) + remove_ds_instance(inst, force=True) + + def _server_id_taken(self, serverid, prefix='/usr'): + """Check if instance name is already taken + :param serverid - name of the server instance + :param prefix - name of prefix build location + :return True - if the serfver id is already in use + False - if the server id is available + """ + if prefix != "/usr": + inst_dir = prefix + "/etc/dirsrv/slapd-" + serverid + else: + inst_dir = "/etc/dirsrv/slapd-" + serverid + + return os.path.isdir(inst_dir) + def create_from_cli(self): # Ask questions to generate general, slapd, and backends print('Install Directory Server (interactive mode)') @@ -268,8 +300,16 @@ class SetupDs(object): # Instance name - adjust defaults once set while 1: slapd['instance_name'] = general['full_machine_name'].split('.', 1)[0] + + # Check if default server id is taken + if self._server_id_taken(slapd['instance_name'], prefix=slapd['prefix']): + slapd['instance_name'] = "" + val = input('\nEnter the instance name [{}]: '.format(slapd['instance_name'])) if val != "": + if not all(ord(c) < 128 for c in val): + print("Server identifier can not contain non ascii characters") + continue if ' ' in val: print("Server identifier can not contain a space") continue @@ -284,21 +324,18 @@ class SetupDs(object): continue # Check if server id is taken - inst_dir = slapd['config_dir'] + "/" + val - if os.path.isdir(inst_dir): + if self._server_id_taken(val, prefix=slapd['prefix']): print("Server identifier \"{}\" is already taken, please choose a new name".format(val)) continue # instance name is good slapd['instance_name'] = val break + elif slapd['instance_name'] == "": + continue else: # Check if default server id is taken - if slapd['prefix'] != "/usr": - inst_dir = slapd['prefix'] + slapd['config_dir'] + "/" + slapd['instance_name'] - else: - inst_dir = slapd['config_dir'] + "/" + slapd['instance_name'] - if os.path.isdir(inst_dir): + if self._server_id_taken(slapd['instance_name'], prefix=slapd['prefix']): print("Server identifier \"{}\" is already taken, please choose a new name".format(slapd['instance_name'])) continue break @@ -404,21 +441,22 @@ class SetupDs(object): break # Add sample entries? - while 1: - val = input("\nCreate sample entries in the suffix [no]: ".format(suffix)) - if val != "": - if val.lower() == "no" or val.lower() == "n": - break - if val.lower() == "yes" or val.lower() == "y": - backend['sample_entries'] = INSTALL_LATEST_CONFIG + if len(backends) > 0: + while 1: + val = input("\nCreate sample entries in the suffix [no]: ".format(suffix)) + if val != "": + if val.lower() == "no" or val.lower() == "n": + break + if val.lower() == "yes" or val.lower() == "y": + backend['sample_entries'] = INSTALL_LATEST_CONFIG + break + + # Unknown value + print ("Value \"{}\" is invalid, please use \"yes\" or \"no\"".format(val)) + continue + else: break - # Unknown value - print ("Value \"{}\" is invalid, please use \"yes\" or \"no\"".format(val)) - continue - else: - break - # Are you ready? while 1: val = input('\nAre you ready to install? [no]: ') @@ -574,7 +612,13 @@ class SetupDs(object): self.log.info("NOOP: Dry run requested") else: # Actually trigger the installation. - self._install_ds(general, slapd, backends) + try: + self._install_ds(general, slapd, backends) + except ValueError as e: + self.log.fatal("Error: " + str(e) + ", removing incomplete installation...") + self._remove_failed_install(slapd['instance_name']) + raise ValueError("Instance creation failed!") + # Call the child api to do anything it needs. self._install(extra) if self.verbose: @@ -596,10 +640,12 @@ class SetupDs(object): for line in template_init.readlines(): initconfig += line.replace('{{', '{', 1).replace('}}', '}', 1).replace('-', '_') try: - os.makedirs("%s" % slapd['initconfig_dir'], mode=0o775) + # /etc/sysconfig + os.makedirs("%s" % slapd['initconfig_dir'], mode=0o770) except FileExistsError: pass - with open("%s/dirsrv-%s" % (slapd['initconfig_dir'], slapd['instance_name']), 'w') as f: + sysconfig_filename = "%s/dirsrv-%s" % (slapd['initconfig_dir'], slapd['instance_name']) + with open(sysconfig_filename, 'w') as f: f.write(initconfig.format( SERVER_DIR=slapd['lib_dir'], SERVERBIN_DIR=slapd['sbin_dir'], @@ -609,17 +655,25 @@ class SetupDs(object): DS_ROOT='', PRODUCT_NAME='slapd', )) + os.chmod(sysconfig_filename, 0o440) + os.chown(sysconfig_filename, slapd['user_uid'], slapd['group_gid']) # Create all the needed paths - # we should only need to make bak_dir, cert_dir, config_dir, db_dir, ldif_dir, lock_dir, log_dir, run_dir? schema_dir, + # we should only need to make bak_dir, cert_dir, config_dir, db_dir, ldif_dir, lock_dir, log_dir, run_dir? for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): if self.verbose: self.log.info("ACTION: creating %s", slapd[path]) try: - os.makedirs(slapd[path], mode=0o775) + os.umask(0o007) # For parent dirs that get created -> sets 770 for perms + os.makedirs(slapd[path], mode=0o770) except OSError: pass os.chown(slapd[path], slapd['user_uid'], slapd['group_gid']) + + # /var/lock/dirsrv needs special attention... + parentdir = os.path.abspath(os.path.join(slapd['lock_dir'], os.pardir)) + os.chown(parentdir, slapd['user_uid'], slapd['group_gid']) + ### Warning! We need to down the directory under db too for .restore to work. # See dblayer.c for more! db_parent = os.path.join(slapd['db_dir'], '..') @@ -632,18 +686,21 @@ class SetupDs(object): _ds_shutil_copytree(os.path.join(slapd['sysconf_dir'], 'dirsrv/schema'), slapd['schema_dir']) os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid']) + os.chmod(slapd['schema_dir'], 0o770) # Copy in the collation srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/slapd-collations.conf') dstfile = os.path.join(slapd['config_dir'], 'slapd-collations.conf') shutil.copy2(srcfile, dstfile) os.chown(dstfile, slapd['user_uid'], slapd['group_gid']) + os.chmod(dstfile, 0o440) # Copy in the certmap configuration srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/certmap.conf') dstfile = os.path.join(slapd['config_dir'], 'certmap.conf') shutil.copy2(srcfile, dstfile) os.chown(dstfile, slapd['user_uid'], slapd['group_gid']) + os.chmod(dstfile, 0o440) # If we are on the correct platform settings, systemd if general['systemd'] and not self.containerised: @@ -758,10 +815,17 @@ class SetupDs(object): # Set selinux port label selinux_label_port(slapd['secure_port']) - ## LAST CHANCE, FIX PERMISSIONS. - # Selinux fixups? - # Restorecon of paths? - if not self.containerised and general['selinux']: + # Do selinux fixups + if not self.containerised and general['selinux'] and selinux.is_selinux_enabled(): + selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', 'ldif_dir', + 'lock_dir', 'log_dir', 'run_dir', 'schema_dir', 'tmp_dir') + for path in selinux_paths: + try: + selinux.restorecon(slapd[path], recursive=True) + except: + self.log.debug("Failed to run restorecon on: " + slapd[path]) + pass + selinux_label_port(slapd['port']) # Start the server diff --git a/src/lib389/lib389/nss_ssl.py b/src/lib389/lib389/nss_ssl.py index 0c71e9ccc..ccd15d00b 100644 --- a/src/lib389/lib389/nss_ssl.py +++ b/src/lib389/lib389/nss_ssl.py @@ -106,6 +106,7 @@ class NssSsl(object): noise = password_generate(256) with open(fpath, 'w') as f: f.write(noise) + os.chmod(fpath, 0o660) def reinit(self): """ @@ -122,12 +123,17 @@ class NssSsl(object): # In the future we may add the needed option to avoid writing the pin # files. # Write the pin.txt, and the pwdfile.txt - if not os.path.exists('%s/%s' % (self._certdb, PIN_TXT)): - with open('%s/%s' % (self._certdb, PIN_TXT), 'w') as f: + pin_file = '%s/%s' % (self._certdb, PIN_TXT) + if not os.path.exists(pin_file): + with open(pin_file, 'w') as f: f.write('Internal (Software) Token:%s' % self.dbpassword) - if not os.path.exists('%s/%s' % (self._certdb, PWD_TXT)): - with open('%s/%s' % (self._certdb, PWD_TXT), 'w') as f: + os.chmod(pin_file, 0o660) + + pwd_text_file = '%s/%s' % (self._certdb, PWD_TXT) + if not os.path.exists(pwd_text_file): + with open(pwd_text_file, 'w') as f: f.write('%s' % self.dbpassword) + os.chmod(pwd_text_file, 0o660) # Init the db. # 48886; This needs to be sql format ... diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py index bfa5616de..55ef7c794 100644 --- a/src/lib389/lib389/utils.py +++ b/src/lib389/lib389/utils.py @@ -184,7 +184,8 @@ def selinux_label_port(port, remove_label=False): :raises: ValueError: Error message """ - if not selinux.is_selinux_enabled() or port == 389 or port == 636: + selinux_default_ports = [389, 636, 3268, 3269, 7389] + if not selinux.is_selinux_enabled() or port in selinux_default_ports: return label_set = False @@ -204,12 +205,14 @@ def selinux_label_port(port, remove_label=False): # The port is within the range, just return return break - else: + elif not remove_label: # Port belongs to someone else (bad) - raise ValueError("Port " + port + " was already labelled with: " + policy['type']) + # This is only an issue during setting a label, not removing a label + raise ValueError("Port {} was already labelled with: ({}) Please choose a different port number".format(port, policy['type'])) if (remove_label and label_set) or (not remove_label and not label_set): for i in range(3): + try: subprocess.check_call(["semanage", "port", "-d" if remove_label else "-a",
0