instance_id
stringlengths 10
57
| base_commit
stringlengths 40
40
| created_at
stringdate 2014-04-30 14:58:36
2025-04-30 20:14:11
| environment_setup_commit
stringlengths 40
40
| hints_text
stringlengths 0
273k
| patch
stringlengths 251
7.06M
| problem_statement
stringlengths 11
52.5k
| repo
stringlengths 7
53
| test_patch
stringlengths 231
997k
| meta
dict | version
stringclasses 851
values | install_config
dict | requirements
stringlengths 93
34.2k
⌀ | environment
stringlengths 760
20.5k
⌀ | FAIL_TO_PASS
listlengths 1
9.39k
| FAIL_TO_FAIL
listlengths 0
2.69k
| PASS_TO_PASS
listlengths 0
7.87k
| PASS_TO_FAIL
listlengths 0
192
| license_name
stringclasses 55
values | __index_level_0__
int64 0
21.4k
| before_filepaths
listlengths 1
105
| after_filepaths
listlengths 1
105
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
pre-commit__pre-commit-hooks-274 | e80813e7e9bceeb263a4329341f2681074fa725a | 2018-03-19 16:29:06 | e80813e7e9bceeb263a4329341f2681074fa725a | diff --git a/README.md b/README.md
index 8d21a68..41e9103 100644
--- a/README.md
+++ b/README.md
@@ -52,6 +52,11 @@ Add this to your `.pre-commit-config.yaml`
- `check-yaml` - Attempts to load all yaml files to verify syntax.
- `--allow-multiple-documents` - allow yaml files which use the
[multi-document syntax](http://www.yaml.org/spec/1.2/spec.html#YAML)
+ - `--unsafe` - Instaed of loading the files, simply parse them for syntax.
+ A syntax-only check enables extensions and unsafe constructs which would
+ otherwise be forbidden. Using this option removes all guarantees of
+ portability to other yaml implementations.
+ Implies `--allow-multiple-documents`.
- `debug-statements` - Check for pdb / ipdb / pudb statements in code.
- `detect-aws-credentials` - Checks for the existence of AWS secrets that you
have set up with the AWS CLI.
diff --git a/pre_commit_hooks/check_yaml.py b/pre_commit_hooks/check_yaml.py
index e9bb8f0..9fbbd88 100644
--- a/pre_commit_hooks/check_yaml.py
+++ b/pre_commit_hooks/check_yaml.py
@@ -1,6 +1,7 @@
from __future__ import print_function
import argparse
+import collections
import sys
import yaml
@@ -11,24 +12,52 @@ except ImportError: # pragma: no cover (no libyaml-dev / pypy)
Loader = yaml.SafeLoader
+def _exhaust(gen):
+ for _ in gen:
+ pass
+
+
+def _parse_unsafe(*args, **kwargs):
+ _exhaust(yaml.parse(*args, **kwargs))
+
+
def _load_all(*args, **kwargs):
- # need to exhaust the generator
- return tuple(yaml.load_all(*args, **kwargs))
+ _exhaust(yaml.load_all(*args, **kwargs))
+
+
+Key = collections.namedtuple('Key', ('multi', 'unsafe'))
+LOAD_FNS = {
+ Key(multi=False, unsafe=False): yaml.load,
+ Key(multi=False, unsafe=True): _parse_unsafe,
+ Key(multi=True, unsafe=False): _load_all,
+ Key(multi=True, unsafe=True): _parse_unsafe,
+}
def check_yaml(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
- '-m', '--allow-multiple-documents', dest='yaml_load_fn',
- action='store_const', const=_load_all, default=yaml.load,
+ '-m', '--multi', '--allow-multiple-documents', action='store_true',
+ )
+ parser.add_argument(
+ '--unsafe', action='store_true',
+ help=(
+ 'Instead of loading the files, simply parse them for syntax. '
+ 'A syntax-only check enables extensions and unsafe contstructs '
+ 'which would otherwise be forbidden. Using this option removes '
+ 'all guarantees of portability to other yaml implementations. '
+ 'Implies --allow-multiple-documents'
+ ),
)
parser.add_argument('filenames', nargs='*', help='Yaml filenames to check.')
args = parser.parse_args(argv)
+ load_fn = LOAD_FNS[Key(multi=args.multi, unsafe=args.unsafe)]
+
retval = 0
for filename in args.filenames:
try:
- args.yaml_load_fn(open(filename), Loader=Loader)
+ load_fn(open(filename), Loader=Loader)
except yaml.YAMLError as exc:
print(exc)
retval = 1
| Check-yaml exception for Ansible Vault
Hi,
I've recently used the [embed encrypt variables](https://docs.ansible.com/ansible/latest/vault.html#encrypt-string-for-use-in-yaml) in my Ansible repos, and the check-yaml hook is failing because of the specific `!vault` usage.
```
Check Yaml...............................................................Failed
hookid: check-yaml
could not determine a constructor for the tag '!vault'
in "inventories/group_vars/all.yml", line 62, column 13
```
Do you think it's possible to add this as an exception inside the hook (and make it pass for everyone who's using encypted variables in their playbooks) ?
Or should I add explicit exception inside my `.pre-commit-config.yaml`, for each files where I use those encrypted variables ? | pre-commit/pre-commit-hooks | diff --git a/tests/check_yaml_test.py b/tests/check_yaml_test.py
index de3b383..aa357f1 100644
--- a/tests/check_yaml_test.py
+++ b/tests/check_yaml_test.py
@@ -22,7 +22,7 @@ def test_check_yaml_allow_multiple_documents(tmpdir):
f = tmpdir.join('test.yaml')
f.write('---\nfoo\n---\nbar\n')
- # should failw without the setting
+ # should fail without the setting
assert check_yaml((f.strpath,))
# should pass when we allow multiple documents
@@ -33,3 +33,22 @@ def test_fails_even_with_allow_multiple_documents(tmpdir):
f = tmpdir.join('test.yaml')
f.write('[')
assert check_yaml(('--allow-multiple-documents', f.strpath))
+
+
+def test_check_yaml_unsafe(tmpdir):
+ f = tmpdir.join('test.yaml')
+ f.write(
+ 'some_foo: !vault |\n'
+ ' $ANSIBLE_VAULT;1.1;AES256\n'
+ ' deadbeefdeadbeefdeadbeef\n',
+ )
+ # should fail "safe" check
+ assert check_yaml((f.strpath,))
+ # should pass when we allow unsafe documents
+ assert not check_yaml(('--unsafe', f.strpath))
+
+
+def test_check_yaml_unsafe_still_fails_on_syntax_errors(tmpdir):
+ f = tmpdir.join('test.yaml')
+ f.write('[')
+ assert check_yaml(('--unsafe', f.strpath))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
autopep8==2.0.4
certifi==2021.5.30
cfgv==3.3.1
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==2.5.5
identify==2.4.4
importlib-metadata==4.2.0
importlib-resources==5.2.3
iniconfig==1.1.1
mccabe==0.4.0
mock==5.2.0
nodeenv==1.6.0
packaging==21.3
pep8==1.7.1
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
-e git+https://github.com/pre-commit/pre-commit-hooks.git@e80813e7e9bceeb263a4329341f2681074fa725a#egg=pre_commit_hooks
py==1.11.0
pycodestyle==2.10.0
pyflakes==1.0.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==6.0.1
six==1.17.0
toml==0.10.2
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.16.2
zipp==3.6.0
| name: pre-commit-hooks
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- autopep8==2.0.4
- cfgv==3.3.1
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==2.5.5
- identify==2.4.4
- importlib-metadata==4.2.0
- importlib-resources==5.2.3
- iniconfig==1.1.1
- mccabe==0.4.0
- mock==5.2.0
- nodeenv==1.6.0
- packaging==21.3
- pep8==1.7.1
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- py==1.11.0
- pycodestyle==2.10.0
- pyflakes==1.0.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==6.0.1
- six==1.17.0
- toml==0.10.2
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/pre-commit-hooks
| [
"tests/check_yaml_test.py::test_check_yaml_unsafe",
"tests/check_yaml_test.py::test_check_yaml_unsafe_still_fails_on_syntax_errors"
]
| []
| [
"tests/check_yaml_test.py::test_check_yaml[bad_yaml.notyaml-1]",
"tests/check_yaml_test.py::test_check_yaml[ok_yaml.yaml-0]",
"tests/check_yaml_test.py::test_check_yaml_allow_multiple_documents",
"tests/check_yaml_test.py::test_fails_even_with_allow_multiple_documents"
]
| []
| MIT License | 2,309 | [
"pre_commit_hooks/check_yaml.py",
"README.md"
]
| [
"pre_commit_hooks/check_yaml.py",
"README.md"
]
|
|
mozilla__bleach-361 | a65f5c8ea664abbd54b4c711ebd0ca26c3509b7e | 2018-03-19 21:48:55 | c27512d20b48b7901687b62d15c91be1de856f89 | diff --git a/bleach/sanitizer.py b/bleach/sanitizer.py
index 12225ef..faf8fd7 100644
--- a/bleach/sanitizer.py
+++ b/bleach/sanitizer.py
@@ -668,8 +668,20 @@ class BleachSanitizerFilter(sanitizer.Filter):
assert token_type in ("StartTag", "EmptyTag")
attrs = []
for (ns, name), v in token["data"].items():
+ # If we end up with a namespace, but no name, switch them so we
+ # have a valid name to use.
+ if ns and not name:
+ ns, name = name, ns
+
+ # Figure out namespaced name if the namespace is appropriate
+ # and exists; if the ns isn't in prefixes, then drop it.
+ if ns is None or ns not in prefixes:
+ namespaced_name = name
+ else:
+ namespaced_name = '%s:%s' % (prefixes[ns], name)
+
attrs.append(' %s="%s"' % (
- name if ns is None else "%s:%s" % (prefixes[ns], name),
+ namespaced_name,
# NOTE(willkg): HTMLSerializer escapes attribute values
# already, so if we do it here (like HTMLSerializer does),
# then we end up double-escaping.
| Exceptions on strange Html
Hello,
This small pattern raises an exception :
import bleach
bleach.clean("<d {c}>")
| mozilla/bleach | diff --git a/tests/test_clean.py b/tests/test_clean.py
index 1f3cbfc..9547d63 100644
--- a/tests/test_clean.py
+++ b/tests/test_clean.py
@@ -759,6 +759,19 @@ def test_convert_entities(data, expected):
assert convert_entities(data) == expected
+def test_nonexistent_namespace():
+ """Verify if the namespace doesn't exist, it doesn't fail with a KeyError
+
+ The tokenizer creates "c" as a namespace and that doesn't exist in the map
+ of namespaces, so then it fails with a KeyError. I don't understand why the
+ tokenizer makes "c" into a namespace in this string.
+
+ Issue #352.
+
+ """
+ assert clean('<d {c}>') == '<d c=""></d>'
+
+
def get_tests():
"""Retrieves regression tests from data/ directory
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 2.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"pip install -U pip setuptools>=18.5"
],
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/mozilla/bleach.git@a65f5c8ea664abbd54b4c711ebd0ca26c3509b7e#egg=bleach
exceptiongroup==1.2.2
html5lib==1.1
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
six==1.17.0
tomli==2.2.1
webencodings==0.5.1
| name: bleach
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- html5lib==1.1
- iniconfig==2.1.0
- packaging==24.2
- pip==25.0.1
- pluggy==1.5.0
- pytest==8.3.5
- setuptools==78.1.0
- six==1.17.0
- tomli==2.2.1
- webencodings==0.5.1
prefix: /opt/conda/envs/bleach
| [
"tests/test_clean.py::test_nonexistent_namespace"
]
| [
"tests/test_clean.py::test_uri_value_allowed_protocols[<a"
]
| [
"tests/test_clean.py::test_clean_idempotent",
"tests/test_clean.py::test_only_text_is_cleaned",
"tests/test_clean.py::test_empty",
"tests/test_clean.py::test_content_has_no_html",
"tests/test_clean.py::test_content_has_allowed_html[an",
"tests/test_clean.py::test_content_has_allowed_html[another",
"tests/test_clean.py::test_html_is_lowercased",
"tests/test_clean.py::test_comments[<!--",
"tests/test_clean.py::test_comments[<!--open",
"tests/test_clean.py::test_comments[<!--comment-->text-True-text]",
"tests/test_clean.py::test_comments[<!--comment-->text-False-<!--comment-->text]",
"tests/test_clean.py::test_comments[text<!--",
"tests/test_clean.py::test_comments[text<!--comment-->-True-text]",
"tests/test_clean.py::test_comments[text<!--comment-->-False-text<!--comment-->]",
"tests/test_clean.py::test_disallowed_tags[<img",
"tests/test_clean.py::test_disallowed_tags[a",
"tests/test_clean.py::test_invalid_char_in_tag",
"tests/test_clean.py::test_unclosed_tag",
"tests/test_clean.py::test_nested_script_tag",
"tests/test_clean.py::test_bare_entities_get_escaped_correctly[an",
"tests/test_clean.py::test_bare_entities_get_escaped_correctly[tag",
"tests/test_clean.py::test_character_entities_handling[&-&]",
"tests/test_clean.py::test_character_entities_handling[ - ]",
"tests/test_clean.py::test_character_entities_handling[ ",
"tests/test_clean.py::test_character_entities_handling[<em>strong</em>-<em>strong</em>]",
"tests/test_clean.py::test_character_entities_handling[&is",
"tests/test_clean.py::test_character_entities_handling[cool",
"tests/test_clean.py::test_character_entities_handling[&&",
"tests/test_clean.py::test_character_entities_handling[&",
"tests/test_clean.py::test_character_entities_handling[this",
"tests/test_clean.py::test_character_entities_handling[http://example.com?active=true¤t=true-http://example.com?active=true&current=true]",
"tests/test_clean.py::test_character_entities_handling[<a",
"tests/test_clean.py::test_character_entities_handling[&xx;-&xx;]",
"tests/test_clean.py::test_character_entities_handling['-']",
"tests/test_clean.py::test_character_entities_handling["-"]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[&#-&#]",
"tests/test_clean.py::test_character_entities_handling[&#<-&#<]",
"tests/test_clean.py::test_character_entities_handling['"-'"]",
"tests/test_clean.py::test_stripping_tags[a",
"tests/test_clean.py::test_stripping_tags[<p><a",
"tests/test_clean.py::test_stripping_tags[<p><span>multiply",
"tests/test_clean.py::test_stripping_tags_is_safe[<scri<script>pt>alert(1)</scr</script>ipt>-pt>alert(1)ipt>]",
"tests/test_clean.py::test_stripping_tags_is_safe[<scri<scri<script>pt>pt>alert(1)</script>-pt>pt>alert(1)]",
"tests/test_clean.py::test_allowed_styles",
"tests/test_clean.py::test_href_with_wrong_tag",
"tests/test_clean.py::test_disallowed_attr",
"tests/test_clean.py::test_unquoted_attr_values_are_quoted",
"tests/test_clean.py::test_unquoted_event_handler_attr_value",
"tests/test_clean.py::test_invalid_filter_attr",
"tests/test_clean.py::test_poster_attribute",
"tests/test_clean.py::test_attributes_callable",
"tests/test_clean.py::test_attributes_wildcard",
"tests/test_clean.py::test_attributes_wildcard_callable",
"tests/test_clean.py::test_attributes_tag_callable",
"tests/test_clean.py::test_attributes_tag_list",
"tests/test_clean.py::test_attributes_list",
"tests/test_clean.py::test_svg_attr_val_allows_ref",
"tests/test_clean.py::test_svg_allow_local_href[<svg><pattern",
"tests/test_clean.py::test_svg_allow_local_href_nonlocal[<svg><pattern",
"tests/test_clean.py::test_weird_strings",
"tests/test_clean.py::test_invisible_characters[1\\x0723-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0823-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0b23-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0c23-1?23]",
"tests/test_clean.py::test_invisible_characters[import",
"tests/test_clean.py::test_convert_entities[-]",
"tests/test_clean.py::test_convert_entities[abc-abc]",
"tests/test_clean.py::test_convert_entities[ -\\xa0]",
"tests/test_clean.py::test_convert_entities[ -",
"tests/test_clean.py::test_convert_entities[ -",
"tests/test_clean.py::test_convert_entities[&xx;-&xx;]",
"tests/test_clean.py::test_regressions[/bleach/tests/data/1.test->\"><script>alert(\"XSS\")</script>&\\n--\\n>\"><script>alert(\"XSS\")</script>&\\n]",
"tests/test_clean.py::test_regressions[/bleach/tests/data/2.test-\"><STYLE>@import\"javascript:alert('XSS')\";</STYLE>\\n--\\n\"><style>@import\"javascript:alert('XSS')\";</style>\\n]",
"tests/test_clean.py::test_regressions[/bleach/tests/data/3.test->\"'><img%20src%3D%26%23x6a;%26%23x61;%26%23x76;%26%23x61;%26%23x73;%26%23x63;%26%23x72;%26%23x69;%26%23x70;%26%23x74;%26%23x3a;alert(%26quot;%26%23x20;XSS%26%23x20;Test%26%23x20;Successful%26quot;)>\\n--\\n>\"'><img%20src%3d%26%23x6a;%26%23x61;%26%23x76;%26%23x61;%26%23x73;%26%23x63;%26%23x72;%26%23x69;%26%23x70;%26%23x74;%26%23x3a;alert(%26quot;%26%23x20;xss%26%23x20;test%26%23x20;successful%26quot;)></img%20src%3d%26%23x6a;%26%23x61;%26%23x76;%26%23x61;%26%23x73;%26%23x63;%26%23x72;%26%23x69;%26%23x70;%26%23x74;%26%23x3a;alert(%26quot;%26%23x20;xss%26%23x20;test%26%23x20;successful%26quot;)>\\n]",
"tests/test_clean.py::test_regressions[/bleach/tests/data/4.test-<scr<script></script>ipt",
"tests/test_clean.py::test_regressions[/bleach/tests/data/5.test->%22%27><img%20src%3d%22javascript:alert(%27%20XSS%27)%22>\\n--\\n>%22%27><img%20src%3d%22javascript:alert(%27%20xss%27)%22></img%20src%3d%22javascript:alert(%27%20xss%27)%22>\\n]",
"tests/test_clean.py::test_regressions[/bleach/tests/data/6.test-<a",
"tests/test_clean.py::test_regressions[/bleach/tests/data/7.test-\">\\n--\\n\">\\n]",
"tests/test_clean.py::test_regressions[/bleach/tests/data/8.test->\"\\n--\\n>\"\\n]",
"tests/test_clean.py::test_regressions[/bleach/tests/data/9.test-'';!--\"<XSS>=&{()}\\n--\\n'';!--\"<xss>=&{()}</xss>\\n]",
"tests/test_clean.py::test_regressions[/bleach/tests/data/10.test-<IMG",
"tests/test_clean.py::test_regressions[/bleach/tests/data/11.test-<IMG",
"tests/test_clean.py::test_regressions[/bleach/tests/data/12.test-<IMG",
"tests/test_clean.py::test_regressions[/bleach/tests/data/13.test-<IMG",
"tests/test_clean.py::test_regressions[/bleach/tests/data/14.test-<IMGSRC=java&<WBR>#115;crip&<WBR>#116;:a\\n--\\n<imgsrc=&#106;&#97;&#118;&#97;&<wbr>#115;crip&<wbr></wbr>#116;:a</imgsrc=&#106;&#97;&#118;&#97;&<wbr>\\n]",
"tests/test_clean.py::test_regressions[/bleach/tests/data/15.test-le&<WBR>#114;t('XS<WBR>;S')>\\n--\\nle&<wbr></wbr>#114;t('X&#83<wbr></wbr>;S'&#41>\\n]",
"tests/test_clean.py::test_regressions[/bleach/tests/data/16.test-<IMGSRC=ja&<WBR>#0000118as&<WBR>#0000099ri&<WBR>#0000112t:&<WBR>#0000097le&<WBR>#0000114t(&<WBR>#0000039XS&<WBR>#0000083')>\\n--\\n<imgsrc=&#0000106&#0000097&<wbr>#0000118&#0000097&#0000115&<wbr></wbr>#0000099&#0000114&#0000105&<wbr></wbr>#0000112&#0000116&#0000058&<wbr></wbr>#0000097&#0000108&#0000101&<wbr></wbr>#0000114&#0000116&#0000040&<wbr></wbr>#0000039&#0000088&#0000083&<wbr></wbr>#0000083&#0000039&#0000041></imgsrc=&#0000106&#0000097&<wbr>\\n]",
"tests/test_clean.py::test_regressions[/bleach/tests/data/17.test-<IMGSRC=javas&<WBR>#x63ript:&<WBR>#x61lert(&<WBR>#x27XSS')>\\n--\\n<imgsrc=&#x6a&#x61&#x76&#x61&#x73&<wbr>#x63&#x72&#x69&#x70&#x74&#x3A&<wbr></wbr>#x61&#x6C&#x65&#x72&#x74&#x28&<wbr></wbr>#x27&#x58&#x53&#x53&#x27&#x29></imgsrc=&#x6a&#x61&#x76&#x61&#x73&<wbr>\\n]",
"tests/test_clean.py::test_regressions[/bleach/tests/data/18.test-<IMG",
"tests/test_clean.py::test_regressions[/bleach/tests/data/19.test-<IMG",
"tests/test_clean.py::test_regressions[/bleach/tests/data/20.test-<IMG",
"tests/test_clean.py::TestCleaner::test_basics",
"tests/test_clean.py::TestCleaner::test_filters"
]
| []
| Apache License 2.0 | 2,310 | [
"bleach/sanitizer.py"
]
| [
"bleach/sanitizer.py"
]
|
|
conan-io__conan-2639 | 9d07bc26337c3282a57e59aa6a5c7b8afe9c282c | 2018-03-19 23:38:37 | 0f8b143c43d0354c6a75da94a1374d5ce39b7f96 | diff --git a/conans/client/command.py b/conans/client/command.py
index 506c0debe..df9034e9a 100644
--- a/conans/client/command.py
+++ b/conans/client/command.py
@@ -35,10 +35,13 @@ class Extender(argparse.Action):
# share this destination.
parser.set_defaults(**{self.dest: None})
- try:
- dest.extend(values)
- except ValueError:
+ if isinstance(values, str):
dest.append(values)
+ elif values:
+ try:
+ dest.extend(values)
+ except ValueError:
+ dest.append(values)
class OnceArgument(argparse.Action):
@@ -232,10 +235,13 @@ class Command(object):
help='Force install specified package ID (ignore settings/options)')
parser.add_argument("-r", "--remote", help='look in the specified remote server',
action=OnceArgument)
+ parser.add_argument("-re", "--recipe", help='Downloads only the recipe', default=False,
+ action="store_true")
args = parser.parse_args(*args)
- return self._conan.download(reference=args.reference, package=args.package, remote=args.remote)
+ return self._conan.download(reference=args.reference, package=args.package,
+ remote=args.remote, recipe=args.recipe)
def install(self, *args):
"""Installs the requirements specified in a conanfile (.py or .txt).
@@ -1232,7 +1238,7 @@ def _add_common_install_arguments(parser, build_help):
'-e CXX=/usr/bin/clang++',
nargs=1, action=Extender)
if build_help:
- parser.add_argument("-b", "--build", action=Extender, nargs="*", help=build_help)
+ parser.add_argument("-b", "--build", action=Extender, nargs="?", help=build_help)
_help_build_policies = '''Optional, use it to choose if you want to build from sources:
diff --git a/conans/client/conan_api.py b/conans/client/conan_api.py
index 98704dc90..630b8131a 100644
--- a/conans/client/conan_api.py
+++ b/conans/client/conan_api.py
@@ -371,10 +371,12 @@ class ConanAPIV1(object):
profile=profile, force=force)
@api_method
- def download(self, reference, remote=None, package=None):
+ def download(self, reference, remote=None, package=None, recipe=False):
+ if package and recipe:
+ raise ConanException("recipe parameter cannot be used together with package")
# Install packages without settings (fixed ids or all)
conan_ref = ConanFileReference.loads(reference)
- self._manager.download(conan_ref, package, remote=remote)
+ self._manager.download(conan_ref, package, remote=remote, recipe=recipe)
@api_method
def install_reference(self, reference, settings=None, options=None, env=None,
diff --git a/conans/client/installer.py b/conans/client/installer.py
index d59599727..8caafd2c0 100644
--- a/conans/client/installer.py
+++ b/conans/client/installer.py
@@ -385,14 +385,22 @@ class ConanInstaller(object):
if not os.path.exists(package_folder):
self._out.info("Retrieving package %s" % package_reference.package_id)
- if self._remote_proxy.get_package(package_reference,
- short_paths=conan_file.short_paths):
- _handle_system_requirements(conan_file, package_reference,
- self._client_cache, output)
- if get_env("CONAN_READ_ONLY_CACHE", False):
- make_read_only(package_folder)
- return True
-
+ try:
+ if self._remote_proxy.get_package(package_reference,
+ short_paths=conan_file.short_paths):
+ _handle_system_requirements(conan_file, package_reference,
+ self._client_cache, output)
+ if get_env("CONAN_READ_ONLY_CACHE", False):
+ make_read_only(package_folder)
+ return True
+ except Exception:
+ if os.path.exists(package_folder):
+ try:
+ rmdir(package_folder)
+ except OSError as e:
+ raise ConanException("%s\n\nCouldn't remove folder '%s', might be busy or open. Close any app "
+ "using it, and retry" % (str(e), package_folder))
+ raise
_raise_package_not_found_error(conan_file, package_reference.conan,
package_reference.package_id, output)
diff --git a/conans/client/manager.py b/conans/client/manager.py
index c0ad9fc0a..d5415ace3 100644
--- a/conans/client/manager.py
+++ b/conans/client/manager.py
@@ -206,11 +206,12 @@ class ConanManager(object):
packager.create_package(conanfile, source_folder, build_folder, dest_package_folder,
install_folder, package_output, local=True)
- def download(self, reference, package_ids, remote):
+ def download(self, reference, package_ids, remote, recipe):
""" Download conanfile and specified packages to local repository
@param reference: ConanFileReference
@param package_ids: Package ids or empty for download all
@param remote: install only from that remote
+ @param only_recipe: download only the recipe
"""
assert(isinstance(reference, ConanFileReference))
remote_proxy = ConanProxy(self._client_cache, self._user_io, self._remote_manager, remote)
@@ -221,6 +222,10 @@ class ConanManager(object):
# First of all download package recipe
remote_proxy.get_recipe(reference)
+
+ if recipe:
+ return
+
# Download the sources too, don't be lazy
conan_file_path = self._client_cache.conanfile(reference)
conanfile = load_conanfile_class(conan_file_path)
@@ -533,7 +538,9 @@ class ConanManager(object):
if not remote:
remote = remote_proxy.registry.default_remote.name
name, password = self._user_io.request_login(remote_name=remote, username=name)
- return remote_proxy.authenticate(name, password)
+
+ all_remotes = True if remote is None else False
+ return remote_proxy.authenticate(name, password, all_remotes=all_remotes)
def get_path(self, reference, package_id=None, path=None, remote=None):
remote_proxy = ConanProxy(self._client_cache, self._user_io, self._remote_manager, remote)
diff --git a/conans/client/printer.py b/conans/client/printer.py
index bcd4b650e..f36dba0f8 100644
--- a/conans/client/printer.py
+++ b/conans/client/printer.py
@@ -202,7 +202,11 @@ class Printer(object):
warn_msg = "There are no packages for reference '%s' matching the query '%s'" % (str(reference),
packages_query)
else:
- warn_msg = "There are no packages for pattern '%s'" % str(reference)
+ warn_msg = "There are no packages for reference '%s'" % str(reference)
+
+ if recipe_hash:
+ warn_msg += ", but package recipe found."
+
self._out.info(warn_msg)
return
diff --git a/conans/client/proxy.py b/conans/client/proxy.py
index 19f114f3e..bdfd72041 100644
--- a/conans/client/proxy.py
+++ b/conans/client/proxy.py
@@ -395,13 +395,22 @@ class ConanProxy(object):
output.warn('Binary for %s not in remote: %s' % (package_id, str(e)))
return False
- def authenticate(self, name, password):
- if not name: # List all users, from all remotes
- remotes = self._registry.remotes
- if not remotes:
- self._out.error("No remotes defined")
- for remote in remotes:
- self._remote_manager.authenticate(remote, None, None)
- return
- remote, _ = self._get_remote()
- return self._remote_manager.authenticate(remote, name, password)
+ def authenticate(self, name, password, all_remotes=False):
+ """
+ Manage user auth against remote.
+ Also displays a list of authenticated users against remote(s) if user name is evaluated to False.
+
+ :param name: user name string
+ :param password: password string
+ :param all_remotes: True/False to use all available remotes to display a list of authenticated users if
+ user name is evaluated to False.
+ """
+ current_remote, _ = self._get_remote()
+
+ if name:
+ return self._remote_manager.authenticate(current_remote, name, password)
+
+ # List all users from required remotes
+ remotes = self._registry.remotes if all_remotes else [current_remote]
+ for remote in remotes:
+ self._remote_manager.authenticate(remote, None, None)
diff --git a/conans/client/rest/cacert.py b/conans/client/rest/cacert.py
index 349c7c28c..9e8738aac 100644
--- a/conans/client/rest/cacert.py
+++ b/conans/client/rest/cacert.py
@@ -6,3345 +6,5706 @@ import logging
# TODO: Fix this security warning
logging.captureWarnings(True)
-# Got from: https://curl.haxx.se/docs/caextract.html
-# Updated at 12 March 2018
-
-cacert = """##
-## Bundle of CA Root Certificates
-##
-## Certificate data from Mozilla as of: Wed Mar 7 04:12:06 2018 GMT
-##
-## This is a bundle of X.509 certificates of public Certificate Authorities
-## (CA). These were automatically extracted from Mozilla's root certificates
-## file (certdata.txt). This file can be found in the mozilla source tree:
-## https://hg.mozilla.org/releases/mozilla-release/raw-file/default/security/nss/lib/ckfw/builtins/certdata.txt
-##
-## It contains the certificates in PEM format and therefore
-## can be directly used with curl / libcurl / php_curl, or with
-## an Apache+mod_ssl webserver for SSL client authentication.
-## Just configure this file as the SSLCACertificateFile.
-##
-## Conversion done with mk-ca-bundle.pl version 1.27.
-## SHA256: 704f02707ec6b4c4a7597a8c6039b020def11e64f3ef0605a9c3543d48038a57
-##
-
-
-GlobalSign Root CA
-==================
------BEGIN CERTIFICATE-----
-MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkGA1UEBhMCQkUx
-GTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jvb3QgQ0ExGzAZBgNVBAMTEkds
-b2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAwMDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNV
-BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYD
-VQQDExJHbG9iYWxTaWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDa
-DuaZjc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavpxy0Sy6sc
-THAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp1Wrjsok6Vjk4bwY8iGlb
-Kk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdGsnUOhugZitVtbNV4FpWi6cgKOOvyJBNP
-c1STE4U6G7weNLWLBYy5d4ux2x8gkasJU26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrX
-gzT/LCrBbBlDSgeF59N89iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
-HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0BAQUF
-AAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOzyj1hTdNGCbM+w6Dj
-Y1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE38NflNUVyRRBnMRddWQVDf9VMOyG
-j/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymPAbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhH
-hm4qxFYxldBniYUr+WymXUadDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveC
-X4XSQRjbgbMEHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==
------END CERTIFICATE-----
-
-GlobalSign Root CA - R2
-=======================
------BEGIN CERTIFICATE-----
-MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xv
-YmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh
-bFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT
-aWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln
-bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6
-ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8eoLrvozp
-s6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklqtTleiDTsvHgMCJiEbKjN
-S7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc5HGnRusyMvo4KD0L5CL
-TfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6C
-ygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
-FgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5nbG9i
-YWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGLjAN
-BgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp
-9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu
-01yiPqFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG7
-9G+dwfCMNYxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7
+
+cacert = """
+# Issuer: O=Equifax OU=Equifax Secure Certificate Authority
+# Subject: O=Equifax OU=Equifax Secure Certificate Authority
+# Label: "Equifax Secure CA"
+# Serial: 903804111
+# MD5 Fingerprint: 67:cb:9d:c0:13:24:8a:82:9b:b2:17:1e:d1:1b:ec:d4
+# SHA1 Fingerprint: d2:32:09:ad:23:d3:14:23:21:74:e4:0d:7f:9d:62:13:97:86:63:3a
+# SHA256 Fingerprint: 08:29:7a:40:47:db:a2:36:80:c7:31:db:6e:31:76:53:ca:78:48:e1:be:bd:3a:0b:01:79:a7:07:f9:2c:f1:78
+-----BEGIN CERTIFICATE-----
+MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV
+UzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2Vy
+dGlmaWNhdGUgQXV0aG9yaXR5MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1
+MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VxdWlmYXgxLTArBgNVBAsTJEVx
+dWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCBnzANBgkqhkiG9w0B
+AQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPRfM6f
+BeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+A
+cJkVV5MW8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kC
+AwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQ
+MA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlm
+aWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTgw
+ODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvSspXXR9gj
+IBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQF
+MAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA
+A4GBAFjOKer89961zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y
+7qj/WsjTVbJmcVfewCHrPSqnI0kBBIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh
+1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee9570+sB3c4
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
+# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
+# Label: "GlobalSign Root CA"
+# Serial: 4835703278459707669005204
+# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a
+# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c
+# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99
+-----BEGIN CERTIFICATE-----
+MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG
+A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv
+b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw
+MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i
+YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT
+aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ
+jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp
+xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp
+1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG
+snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ
+U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8
+9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E
+BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B
+AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz
+yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE
+38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP
+AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad
+DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME
+HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2
+# Label: "GlobalSign Root CA - R2"
+# Serial: 4835703278459682885658125
+# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30
+# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe
+# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e
+-----BEGIN CERTIFICATE-----
+MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G
+A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp
+Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1
+MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG
+A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL
+v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8
+eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq
+tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd
+C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa
+zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB
+mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH
+V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n
+bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG
+3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs
+J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO
+291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS
+ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd
+AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7
TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==
-----END CERTIFICATE-----
-Verisign Class 3 Public Primary Certification Authority - G3
-============================================================
------BEGIN CERTIFICATE-----
-MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV
-UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv
-cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
-IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh
-dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw
-CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy
-dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv
-cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkg
-Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
-ggEBAMu6nFL8eB8aHm8bN3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1
-EUGO+i2tKmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGukxUc
-cLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBmCC+Vk7+qRy+oRpfw
-EuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJXwzw3sJ2zq/3avL6QaaiMxTJ5Xpj
-055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWuimi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA
-ERSWwauSCPc/L8my/uRan2Te2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5f
-j267Cz3qWhMeDGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC
-/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565pF4ErWjfJXir0
-xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGtTxzhT5yvDwyd93gN2PQ1VoDa
-t20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ==
------END CERTIFICATE-----
-
-Entrust.net Premium 2048 Secure Server CA
-=========================================
------BEGIN CERTIFICATE-----
-MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChMLRW50cnVzdC5u
-ZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBpbmNvcnAuIGJ5IHJlZi4gKGxp
-bWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV
-BAMTKkVudHJ1c3QubmV0IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQx
-NzUwNTFaFw0yOTA3MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3
-d3d3LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTEl
-MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5u
-ZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgpMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-MIIBCgKCAQEArU1LqRKGsuqjIAcVFmQqK0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOL
-Gp18EzoOH1u3Hs/lJBQesYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSr
-hRSGlVuXMlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVTXTzW
-nLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/HoZdenoVve8AjhUi
-VBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH4QIDAQABo0IwQDAOBgNVHQ8BAf8E
-BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJ
-KoZIhvcNAQEFBQADggEBADubj1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPy
-T/4xmf3IDExoU8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf
-zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5bu/8j72gZyxKT
-J1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+bYQLCIt+jerXmCHG8+c8eS9e
-nNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/ErfF6adulZkMV8gzURZVE=
------END CERTIFICATE-----
-
-Baltimore CyberTrust Root
-=========================
------BEGIN CERTIFICATE-----
-MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UE
-ChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3li
-ZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoXDTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMC
-SUUxEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFs
-dGltb3JlIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKME
-uyKrmD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsB
-UnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/C
-G9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9
-XbIGevOF6uvUA65ehD5f/xXtabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjpr
-l3RjM71oGDHweI12v/yejl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoI
-VDaGezq1BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEB
-BQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT929hkTI7gQCvlYpNRh
-cL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3WgxjkzSswF07r51XgdIGn9w/xZchMB5
-hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsa
-Y71k5h+3zvDyny67G7fyUIhzksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9H
-RCwBXbsdtTLSR9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp
------END CERTIFICATE-----
-
-AddTrust External Root
-======================
------BEGIN CERTIFICATE-----
-MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEUMBIGA1UEChML
-QWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYD
-VQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEw
-NDgzOFowbzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRU
-cnVzdCBFeHRlcm5hbCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0Eg
-Um9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvtH7xsD821
-+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9uMq/NzgtHj6RQa1wVsfw
-Tz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzXmk6vBbOmcZSccbNQYArHE504B4YCqOmo
-aSYYkKtMsE8jqzpPhNjfzp/haW+710LXa0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy
-2xSoRcRdKn23tNbE7qzNE0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv7
-7+ldU9U0WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYDVR0P
-BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0Jvf6xCZU7wO94CTL
-VBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRk
-VHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB
-IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZl
-j7DYd7usQWxHYINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5
-6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvCNr4TDea9Y355
-e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEXc4g/VhsxOBi0cQ+azcgOno4u
-G+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5amnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ=
------END CERTIFICATE-----
-
-Entrust Root Certification Authority
-====================================
------BEGIN CERTIFICATE-----
-MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMCVVMxFjAUBgNV
-BAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0Lm5ldC9DUFMgaXMgaW5jb3Jw
-b3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMWKGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsG
-A1UEAxMkRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0
-MloXDTI2MTEyNzIwNTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMu
-MTkwNwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSByZWZlcmVu
-Y2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNVBAMTJEVudHJ1c3QgUm9v
-dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
-ALaVtkNC+sZtKm9I35RMOVcF7sN5EUFoNu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYsz
-A9u3g3s+IIRe7bJWKKf44LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOww
-Cj0Yzfv9KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGIrb68
-j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi94DkZfs0Nw4pgHBN
-rziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOBsDCBrTAOBgNVHQ8BAf8EBAMCAQYw
-DwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAigA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1
-MzQyWjAfBgNVHSMEGDAWgBRokORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DH
-hmak8fdLQ/uEvW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA
-A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9tO1KzKtvn1ISM
-Y/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6ZuaAGAT/3B+XxFNSRuzFVJ7yVTa
-v52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTS
-W3iDVuycNsMm4hH2Z0kdkquM++v/eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0
-tHuu2guQOHXvgR1m0vdXcDazv/wor3ElhVsT/h5/WrQ8
------END CERTIFICATE-----
-
-GeoTrust Global CA
-==================
------BEGIN CERTIFICATE-----
-MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQK
-Ew1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMDIwNTIxMDQw
-MDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j
-LjEbMBkGA1UEAxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
-CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjo
-BbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDviS2Aelet
-8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU1XupGc1V3sjs0l44U+Vc
-T4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagU
-vTLrGAMoUgRx5aszPeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTAD
-AQH/MB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVk
-DBF9qn1luMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKInZ57Q
-zxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfStQWVYrmm3ok9Nns4
-d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcFPseKUgzbFbS9bZvlxrFUaKnjaZC2
-mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Unhw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6p
-XE0zX5IJL4hmXXeXxx12E6nV5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvm
-Mw==
------END CERTIFICATE-----
-
-GeoTrust Universal CA
-=====================
------BEGIN CERTIFICATE-----
-MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN
-R2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVyc2FsIENBMB4XDTA0MDMwNDA1
-MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IElu
-Yy4xHjAcBgNVBAMTFUdlb1RydXN0IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP
-ADCCAgoCggIBAKYVVaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9t
-JPi8cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTTQjOgNB0e
-RXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFhF7em6fgemdtzbvQKoiFs
-7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2vc7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d
-8Lsrlh/eezJS/R27tQahsiFepdaVaH/wmZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7V
-qnJNk22CDtucvc+081xdVHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3Cga
-Rr0BHdCXteGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZf9hB
-Z3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfReBi9Fi1jUIxaS5BZu
-KGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+nhutxx9z3SxPGWX9f5NAEC7S8O08
-ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0
-XG0D08DYj3rWMB8GA1UdIwQYMBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIB
-hjANBgkqhkiG9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc
-aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fXIwjhmF7DWgh2
-qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzynANXH/KttgCJwpQzgXQQpAvvL
-oJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0zuzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsK
-xr2EoyNB3tZ3b4XUhRxQ4K5RirqNPnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxF
-KyDuSN/n3QmOGKjaQI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2
-DFKWkoRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9ER/frslK
-xfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQtDF4JbAiXfKM9fJP/P6EU
-p8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/SfuvmbJxPgWp6ZKy7PtXny3YuxadIwVyQD8vI
-P/rmMuGNG2+k5o7Y+SlIis5z/iw=
------END CERTIFICATE-----
-
-GeoTrust Universal CA 2
-=======================
------BEGIN CERTIFICATE-----
-MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN
-R2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwHhcNMDQwMzA0
-MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3Qg
-SW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUA
-A4ICDwAwggIKAoICAQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0
-DE81WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUGFF+3Qs17
-j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdqXbboW0W63MOhBW9Wjo8Q
-JqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxLse4YuU6W3Nx2/zu+z18DwPw76L5GG//a
-QMJS9/7jOvdqdzXQ2o3rXhhqMcceujwbKNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2
-WP0+GfPtDCapkzj4T8FdIgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP
-20gaXT73y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRthAAn
-ZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgocQIgfksILAAX/8sgC
-SqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4Lt1ZrtmhN79UNdxzMk+MBB4zsslG
-8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2
-+/CfXGJx7Tz0RzgQKzAfBgNVHSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8E
-BAMCAYYwDQYJKoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z
-dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQL1EuxBRa3ugZ
-4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgrFg5fNuH8KrUwJM/gYwx7WBr+
-mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSoag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpq
-A1Ihn0CoZ1Dy81of398j9tx4TuaYT1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpg
-Y+RdM4kX2TGq2tbzGDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiP
-pm8m1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJVOCiNUW7d
-FGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH6aLcr34YEoP9VhdBLtUp
-gn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwXQMAJKOSLakhT2+zNVVXxxvjpoixMptEm
-X36vWkzaH6byHCx+rgIW0lbQL1dTR+iS
------END CERTIFICATE-----
-
-Visa eCommerce Root
-===================
------BEGIN CERTIFICATE-----
-MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQG
-EwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2Ug
-QXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2
-WhcNMjIwNjI0MDAxNjEyWjBrMQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMm
-VmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv
-bW1lcmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h2mCxlCfL
-F9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4ElpF7sDPwsRROEW+1QK8b
-RaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdVZqW1LS7YgFmypw23RuwhY/81q6UCzyr0
-TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI
-/k4+oKsGGelT84ATB+0tvz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzs
-GHxBvfaLdXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG
-MB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUFAAOCAQEAX/FBfXxc
-CLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcRzCSs00Rsca4BIGsDoo8Ytyk6feUW
-YFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pz
-zkWKsKZJ/0x9nXGIxHYdkFsd7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBu
-YQa7FkKMcPcw++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt
+# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
+# Label: "Verisign Class 3 Public Primary Certification Authority - G3"
+# Serial: 206684696279472310254277870180966723415
+# MD5 Fingerprint: cd:68:b6:a7:c7:c4:ce:75:e0:1d:4f:57:44:61:92:09
+# SHA1 Fingerprint: 13:2d:0d:45:53:4b:69:97:cd:b2:d5:c3:39:e2:55:76:60:9b:5c:c6
+# SHA256 Fingerprint: eb:04:cf:5e:b1:f3:9a:fa:76:2f:2b:b1:20:f2:96:cb:a5:20:c1:b9:7d:b1:58:95:65:b8:1c:b9:a1:7b:72:44
+-----BEGIN CERTIFICATE-----
+MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw
+CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl
+cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu
+LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT
+aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
+dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD
+VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT
+aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ
+bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu
+IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
+LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b
+N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t
+KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu
+kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm
+CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ
+Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu
+imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te
+2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe
+DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC
+/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p
+F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt
+TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
+# Label: "Verisign Class 4 Public Primary Certification Authority - G3"
+# Serial: 314531972711909413743075096039378935511
+# MD5 Fingerprint: db:c8:f2:27:2e:b1:ea:6a:29:23:5d:fe:56:3e:33:df
+# SHA1 Fingerprint: c8:ec:8c:87:92:69:cb:4b:ab:39:e9:8d:7e:57:67:f3:14:95:73:9d
+# SHA256 Fingerprint: e3:89:36:0d:0f:db:ae:b3:d2:50:58:4b:47:30:31:4e:22:2f:39:c1:56:a0:20:14:4e:8d:96:05:61:79:15:06
+-----BEGIN CERTIFICATE-----
+MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQsw
+CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl
+cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu
+LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT
+aWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
+dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD
+VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT
+aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ
+bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu
+IENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
+LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK3LpRFpxlmr8Y+1
+GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaStBO3IFsJ
++mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0Gbd
+U6LM8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLm
+NxdLMEYH5IBtptiWLugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XY
+ufTsgsbSPZUd5cBPhMnZo0QoBmrXRazwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/
+ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAj/ola09b5KROJ1WrIhVZPMq1
+CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXttmhwwjIDLk5Mq
+g6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm
+fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c
+2NU8Qh0XwRJdRTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/
+bLvSHgCwIe34QWKCudiyxLtGUPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Label: "Entrust.net Premium 2048 Secure Server CA"
+# Serial: 946069240
+# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90
+# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31
+# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77
+-----BEGIN CERTIFICATE-----
+MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML
+RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp
+bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5
+IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3
+MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3
+LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp
+YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG
+A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq
+K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe
+sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX
+MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT
+XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/
+HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH
+4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub
+j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo
+U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf
+zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b
+u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+
+bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er
+fF6adulZkMV8gzURZVE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
+# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
+# Label: "Baltimore CyberTrust Root"
+# Serial: 33554617
+# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4
+# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74
+# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ
+RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD
+VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX
+DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y
+ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy
+VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr
+mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr
+IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK
+mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu
+XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy
+dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye
+jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1
+BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3
+DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92
+9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx
+jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0
+Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz
+ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS
+R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp
+-----END CERTIFICATE-----
+
+# Issuer: CN=AddTrust Class 1 CA Root O=AddTrust AB OU=AddTrust TTP Network
+# Subject: CN=AddTrust Class 1 CA Root O=AddTrust AB OU=AddTrust TTP Network
+# Label: "AddTrust Low-Value Services Root"
+# Serial: 1
+# MD5 Fingerprint: 1e:42:95:02:33:92:6b:b9:5f:c0:7f:da:d6:b2:4b:fc
+# SHA1 Fingerprint: cc:ab:0e:a0:4c:23:01:d6:69:7b:dd:37:9f:cd:12:eb:24:e3:94:9d
+# SHA256 Fingerprint: 8c:72:09:27:9a:c0:4e:27:5e:16:d0:7f:d3:b7:75:e8:01:54:b5:96:80:46:e3:1f:52:dd:25:76:63:24:e9:a7
+-----BEGIN CERTIFICATE-----
+MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEU
+MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3
+b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMw
+MTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML
+QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYD
+VQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUA
+A4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ul
+CDtbKRY654eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6n
+tGO0/7Gcrjyvd7ZWxbWroulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyl
+dI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1Zmne3yzxbrww2ywkEtvrNTVokMsAsJch
+PXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJuiGMx1I4S+6+JNM3GOGvDC
++Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8wHQYDVR0O
+BBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8E
+BTADAQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBl
+MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFk
+ZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENB
+IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxtZBsfzQ3duQH6lmM0MkhHma6X
+7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0PhiVYrqW9yTkkz
+43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY
+eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJl
+pz/+0WatC7xrmYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOA
+WiFeIc9TVPC6b4nbqKqVz4vjccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network
+# Subject: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network
+# Label: "AddTrust External Root"
+# Serial: 1
+# MD5 Fingerprint: 1d:35:54:04:85:78:b0:3f:42:42:4d:bf:20:73:0a:3f
+# SHA1 Fingerprint: 02:fa:f3:e2:91:43:54:68:60:78:57:69:4d:f5:e4:5b:68:85:18:68
+# SHA256 Fingerprint: 68:7f:a4:51:38:22:78:ff:f0:c8:b1:1f:8d:43:d5:76:67:1c:6e:b2:bc:ea:b4:13:fb:83:d9:65:d0:6d:2f:f2
+-----BEGIN CERTIFICATE-----
+MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU
+MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs
+IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290
+MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux
+FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h
+bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v
+dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt
+H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9
+uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX
+mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX
+a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN
+E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0
+WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD
+VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0
+Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU
+cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx
+IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN
+AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH
+YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5
+6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC
+Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX
+c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a
+mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AddTrust Public CA Root O=AddTrust AB OU=AddTrust TTP Network
+# Subject: CN=AddTrust Public CA Root O=AddTrust AB OU=AddTrust TTP Network
+# Label: "AddTrust Public Services Root"
+# Serial: 1
+# MD5 Fingerprint: c1:62:3e:23:c5:82:73:9c:03:59:4b:2b:e9:77:49:7f
+# SHA1 Fingerprint: 2a:b6:28:48:5e:78:fb:f3:ad:9e:79:10:dd:6b:df:99:72:2c:96:e5
+# SHA256 Fingerprint: 07:91:ca:07:49:b2:07:82:aa:d3:c7:d7:bd:0c:df:c9:48:58:35:84:3e:b2:d7:99:60:09:ce:43:ab:6c:69:27
+-----BEGIN CERTIFICATE-----
+MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEU
+MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3
+b3JrMSAwHgYDVQQDExdBZGRUcnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAx
+MDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtB
+ZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5ldHdvcmsxIDAeBgNV
+BAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOC
+AQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV
+6tsfSlbunyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nX
+GCwwfQ56HmIexkvA/X1id9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnP
+dzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSGAa2Il+tmzV7R/9x98oTaunet3IAIx6eH
+1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAwHM+A+WD+eeSI8t0A65RF
+62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0GA1UdDgQW
+BBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUw
+AwEB/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDEL
+MAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRU
+cnVzdCBUVFAgTmV0d29yazEgMB4GA1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJv
+b3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4JNojVhaTdt02KLmuG7jD8WS6
+IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL+YPoRNWyQSW/
+iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao
+GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh
+4SINhwBk/ox9Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQm
+XiLsks3/QppEIW1cxeMiHV9HEufOX1362KqxMy3ZdvJOOjMMK7MtkAY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AddTrust Qualified CA Root O=AddTrust AB OU=AddTrust TTP Network
+# Subject: CN=AddTrust Qualified CA Root O=AddTrust AB OU=AddTrust TTP Network
+# Label: "AddTrust Qualified Certificates Root"
+# Serial: 1
+# MD5 Fingerprint: 27:ec:39:47:cd:da:5a:af:e2:9a:01:65:21:a9:4c:bb
+# SHA1 Fingerprint: 4d:23:78:ec:91:95:39:b5:00:7f:75:8f:03:3b:21:1e:c5:4d:8b:cf
+# SHA256 Fingerprint: 80:95:21:08:05:db:4b:bc:35:5e:44:28:d8:fd:6e:c2:cd:e3:ab:5f:b9:7a:99:42:98:8e:b8:f4:dc:d0:60:16
+-----BEGIN CERTIFICATE-----
+MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEU
+MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3
+b3JrMSMwIQYDVQQDExpBZGRUcnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1
+MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcxCzAJBgNVBAYTAlNFMRQwEgYDVQQK
+EwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5ldHdvcmsxIzAh
+BgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwq
+xBb/4Oxx64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G
+87B4pfYOQnrjfxvM0PC3KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i
+2O+tCBGaKZnhqkRFmhJePp1tUvznoD1oL/BLcHwTOK28FSXx1s6rosAx1i+f4P8U
+WfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GRwVY18BTcZTYJbqukB8c1
+0cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HUMIHRMB0G
+A1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0T
+AQH/BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6Fr
+pGkwZzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQL
+ExRBZGRUcnVzdCBUVFAgTmV0d29yazEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlm
+aWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBABmrder4i2VhlRO6aQTv
+hsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxGGuoYQ992zPlm
+hpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X
+dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3
+P6CxB9bpT9zeRXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9Y
+iQBCYz95OdBEsIJuQRno3eDBiFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5no
+xqE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
+# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
+# Label: "Entrust Root Certification Authority"
+# Serial: 1164660820
+# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4
+# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9
+# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c
+-----BEGIN CERTIFICATE-----
+MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0
+Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW
+KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl
+cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw
+NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw
+NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy
+ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV
+BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo
+Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4
+4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9
+KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI
+rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi
+94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB
+sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi
+gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo
+kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE
+vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA
+A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t
+O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua
+AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP
+9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/
+eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m
+0vdXcDazv/wor3ElhVsT/h5/WrQ8
+-----END CERTIFICATE-----
+
+# Issuer: O=RSA Security Inc OU=RSA Security 2048 V3
+# Subject: O=RSA Security Inc OU=RSA Security 2048 V3
+# Label: "RSA Security 2048 v3"
+# Serial: 13297492616345471454730593562152402946
+# MD5 Fingerprint: 77:0d:19:b1:21:fd:00:42:9c:3e:0c:a5:dd:0b:02:8e
+# SHA1 Fingerprint: 25:01:90:19:cf:fb:d9:99:1c:b7:68:25:74:8d:94:5f:30:93:95:42
+# SHA256 Fingerprint: af:8b:67:62:a1:e5:28:22:81:61:a9:5d:5c:55:9e:e2:66:27:8f:75:d7:9e:83:01:89:a5:03:50:6a:bd:6b:4c
+-----BEGIN CERTIFICATE-----
+MIIDYTCCAkmgAwIBAgIQCgEBAQAAAnwAAAAKAAAAAjANBgkqhkiG9w0BAQUFADA6
+MRkwFwYDVQQKExBSU0EgU2VjdXJpdHkgSW5jMR0wGwYDVQQLExRSU0EgU2VjdXJp
+dHkgMjA0OCBWMzAeFw0wMTAyMjIyMDM5MjNaFw0yNjAyMjIyMDM5MjNaMDoxGTAX
+BgNVBAoTEFJTQSBTZWN1cml0eSBJbmMxHTAbBgNVBAsTFFJTQSBTZWN1cml0eSAy
+MDQ4IFYzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt49VcdKA3Xtp
+eafwGFAyPGJn9gqVB93mG/Oe2dJBVGutn3y+Gc37RqtBaB4Y6lXIL5F4iSj7Jylg
+/9+PjDvJSZu1pJTOAeo+tWN7fyb9Gd3AIb2E0S1PRsNO3Ng3OTsor8udGuorryGl
+wSMiuLgbWhOHV4PR8CDn6E8jQrAApX2J6elhc5SYcSa8LWrg903w8bYqODGBDSnh
+AMFRD0xS+ARaqn1y07iHKrtjEAMqs6FPDVpeRrc9DvV07Jmf+T0kgYim3WBU6JU2
+PcYJk5qjEoAAVZkZR73QpXzDuvsf9/UP+Ky5tfQ3mBMY3oVbtwyCO4dvlTlYMNpu
+AWgXIszACwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAfBgNVHSMEGDAWgBQHw1EwpKrpRa41JPr/JCwz0LGdjDAdBgNVHQ4EFgQUB8NR
+MKSq6UWuNST6/yQsM9CxnYwwDQYJKoZIhvcNAQEFBQADggEBAF8+hnZuuDU8TjYc
+HnmYv/3VEhF5Ug7uMYm83X/50cYVIeiKAVQNOvtUudZj1LGqlk2iQk3UUx+LEN5/
+Zb5gEydxiKRz44Rj0aRV4VCT5hsOedBnvEbIvz8XDZXmxpBp3ue0L96VfdASPz0+
+f00/FGj1EVDVwfSQpQgdMWD/YIwjVAqv/qFuxdF6Kmh4zx6CCiC0H63lhbJqaHVO
+rSU3lIW+vaHU6rcMSzyd6BIA8F+sDeGscGNz9395nzIlQnQFgCi/vcEkllgVsRch
+6YlL2weIZ/QVrXA+L02FO8K32/6YaCOJ4XQP3vTFhGMpG8zLB8kApKnXwiJPZ9d3
+7CAFYd4=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Global CA O=GeoTrust Inc.
+# Subject: CN=GeoTrust Global CA O=GeoTrust Inc.
+# Label: "GeoTrust Global CA"
+# Serial: 144470
+# MD5 Fingerprint: f7:75:ab:29:fb:51:4e:b7:77:5e:ff:05:3c:99:8e:f5
+# SHA1 Fingerprint: de:28:f4:a4:ff:e5:b9:2f:a3:c5:03:d1:a3:49:a7:f9:96:2a:82:12
+# SHA256 Fingerprint: ff:85:6a:2d:25:1d:cd:88:d3:66:56:f4:50:12:67:98:cf:ab:aa:de:40:79:9c:72:2d:e4:d2:b5:db:36:a7:3a
+-----BEGIN CERTIFICATE-----
+MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT
+MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i
+YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG
+EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg
+R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9
+9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq
+fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv
+iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU
+1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+
+bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW
+MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA
+ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l
+uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn
+Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS
+tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF
+PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un
+hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV
+5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Global CA 2 O=GeoTrust Inc.
+# Subject: CN=GeoTrust Global CA 2 O=GeoTrust Inc.
+# Label: "GeoTrust Global CA 2"
+# Serial: 1
+# MD5 Fingerprint: 0e:40:a7:6c:de:03:5d:8f:d1:0f:e4:d1:8d:f9:6c:a9
+# SHA1 Fingerprint: a9:e9:78:08:14:37:58:88:f2:05:19:b0:6d:2b:0d:2b:60:16:90:7d
+# SHA256 Fingerprint: ca:2d:82:a0:86:77:07:2f:8a:b6:76:4f:f0:35:67:6c:fe:3e:5e:32:5e:01:21:72:df:3f:92:09:6d:b7:9b:85
+-----BEGIN CERTIFICATE-----
+MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEW
+MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFs
+IENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQG
+EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3Qg
+R2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDvPE1A
+PRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/NTL8
+Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hL
+TytCOb1kLUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL
+5mkWRxHCJ1kDs6ZgwiFAVvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7
+S4wMcoKK+xfNAGw6EzywhIdLFnopsk/bHdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe
+2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE
+FHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNHK266ZUap
+EBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6td
+EPx7srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv
+/NgdRN3ggX+d6YvhZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywN
+A0ZF66D0f0hExghAzN4bcLUprbqLOzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0
+abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkCx1YAzUm5s2x7UwQa4qjJqhIF
+I8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqFH4z1Ir+rzoPz
+4iIprn2DQKi6bA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Universal CA O=GeoTrust Inc.
+# Subject: CN=GeoTrust Universal CA O=GeoTrust Inc.
+# Label: "GeoTrust Universal CA"
+# Serial: 1
+# MD5 Fingerprint: 92:65:58:8b:a2:1a:31:72:73:68:5c:b4:a5:7a:07:48
+# SHA1 Fingerprint: e6:21:f3:35:43:79:05:9a:4b:68:30:9d:8a:2f:74:22:15:87:ec:79
+# SHA256 Fingerprint: a0:45:9b:9f:63:b2:25:59:f5:fa:5d:4c:6d:b3:f9:f7:2f:f1:93:42:03:35:78:f0:73:bf:1d:1b:46:cb:b9:12
+-----BEGIN CERTIFICATE-----
+MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEW
+MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVy
+c2FsIENBMB4XDTA0MDMwNDA1MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UE
+BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xHjAcBgNVBAMTFUdlb1RydXN0
+IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKYV
+VaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9tJPi8
+cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTT
+QjOgNB0eRXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFh
+F7em6fgemdtzbvQKoiFs7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2v
+c7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d8Lsrlh/eezJS/R27tQahsiFepdaVaH/w
+mZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7VqnJNk22CDtucvc+081xd
+VHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3CgaRr0BHdCX
+teGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZ
+f9hBZ3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfRe
+Bi9Fi1jUIxaS5BZuKGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+
+nhutxx9z3SxPGWX9f5NAEC7S8O08ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB
+/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0XG0D08DYj3rWMB8GA1UdIwQY
+MBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG
+9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc
+aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fX
+IwjhmF7DWgh2qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzyn
+ANXH/KttgCJwpQzgXQQpAvvLoJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0z
+uzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsKxr2EoyNB3tZ3b4XUhRxQ4K5RirqN
+Pnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxFKyDuSN/n3QmOGKja
+QI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2DFKW
+koRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9
+ER/frslKxfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQt
+DF4JbAiXfKM9fJP/P6EUp8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/Sfuvm
+bJxPgWp6ZKy7PtXny3YuxadIwVyQD8vIP/rmMuGNG2+k5o7Y+SlIis5z/iw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Universal CA 2 O=GeoTrust Inc.
+# Subject: CN=GeoTrust Universal CA 2 O=GeoTrust Inc.
+# Label: "GeoTrust Universal CA 2"
+# Serial: 1
+# MD5 Fingerprint: 34:fc:b8:d0:36:db:9e:14:b3:c2:f2:db:8f:e4:94:c7
+# SHA1 Fingerprint: 37:9a:19:7b:41:85:45:35:0c:a6:03:69:f3:3c:2e:af:47:4f:20:79
+# SHA256 Fingerprint: a0:23:4f:3b:c8:52:7c:a5:62:8e:ec:81:ad:5d:69:89:5d:a5:68:0d:c9:1d:1c:b8:47:7f:33:f8:78:b9:5b:0b
+-----BEGIN CERTIFICATE-----
+MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEW
+MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVy
+c2FsIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYD
+VQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1
+c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
+AQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0DE81
+WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUG
+FF+3Qs17j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdq
+XbboW0W63MOhBW9Wjo8QJqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxL
+se4YuU6W3Nx2/zu+z18DwPw76L5GG//aQMJS9/7jOvdqdzXQ2o3rXhhqMcceujwb
+KNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2WP0+GfPtDCapkzj4T8Fd
+IgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP20gaXT73
+y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRt
+hAAnZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgoc
+QIgfksILAAX/8sgCSqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4
+Lt1ZrtmhN79UNdxzMk+MBB4zsslG8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAfBgNV
+HSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8EBAMCAYYwDQYJ
+KoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z
+dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQ
+L1EuxBRa3ugZ4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgr
+Fg5fNuH8KrUwJM/gYwx7WBr+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSo
+ag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpqA1Ihn0CoZ1Dy81of398j9tx4TuaY
+T1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpgY+RdM4kX2TGq2tbz
+GDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiPpm8m
+1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJV
+OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH
+6aLcr34YEoP9VhdBLtUpgn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwX
+QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS
+-----END CERTIFICATE-----
+
+# Issuer: CN=Visa eCommerce Root O=VISA OU=Visa International Service Association
+# Subject: CN=Visa eCommerce Root O=VISA OU=Visa International Service Association
+# Label: "Visa eCommerce Root"
+# Serial: 25952180776285836048024890241505565794
+# MD5 Fingerprint: fc:11:b8:d8:08:93:30:00:6d:23:f9:7e:eb:52:1e:02
+# SHA1 Fingerprint: 70:17:9b:86:8c:00:a4:fa:60:91:52:22:3f:9f:3e:32:bd:e0:05:62
+# SHA256 Fingerprint: 69:fa:c9:bd:55:fb:0a:c7:8d:53:bb:ee:5c:f1:d5:97:98:9f:d0:aa:ab:20:a2:51:51:bd:f1:73:3e:e7:d1:22
+-----BEGIN CERTIFICATE-----
+MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBr
+MQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRl
+cm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv
+bW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2WhcNMjIwNjI0MDAxNjEyWjBrMQsw
+CQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5h
+dGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1l
+cmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h
+2mCxlCfLF9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4E
+lpF7sDPwsRROEW+1QK8bRaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdV
+ZqW1LS7YgFmypw23RuwhY/81q6UCzyr0TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq
+299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI/k4+oKsGGelT84ATB+0t
+vz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzsGHxBvfaL
+dXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD
+AgEGMB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUF
+AAOCAQEAX/FBfXxcCLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcR
+zCSs00Rsca4BIGsDoo8Ytyk6feUWYFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3
+LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pzzkWKsKZJ/0x9nXGIxHYdkFsd
+7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBuYQa7FkKMcPcw
+++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt
398znM/jra6O1I7mT1GvFpLgXPYHDw==
-----END CERTIFICATE-----
-Comodo AAA Services root
-========================
------BEGIN CERTIFICATE-----
-MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS
-R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg
-TGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAw
-MFoXDTI4MTIzMTIzNTk1OVowezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hl
-c3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV
-BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
-ggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQuaBtDFcCLNSS1UY8y2bmhG
-C1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe3M/vg4aijJRPn2jymJBGhCfHdr/jzDUs
-i14HZGWCwEiwqJH5YZ92IFCokcdmtet4YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszW
-Y19zjNoFmag4qMsXeDZRrOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjH
-Ypy+g8cmez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQUoBEK
-Iz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wewYDVR0f
-BHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNl
-cy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29tb2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2Vz
-LmNybDANBgkqhkiG9w0BAQUFAAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm
-7l3sAg9g1o1QGE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz
-Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2G9w84FoVxp7Z
-8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsil2D4kF501KKaU73yqWjgom7C
-12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg==
------END CERTIFICATE-----
-
-QuoVadis Root CA
-================
------BEGIN CERTIFICATE-----
-MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJCTTEZMBcGA1UE
-ChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0
-eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAz
-MTkxODMzMzNaFw0yMTAzMTcxODMzMzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRp
-cyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQD
-EyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF
-AAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Ypli4kVEAkOPcahdxYTMuk
-J0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2DrOpm2RgbaIr1VxqYuvXtdj182d6UajtL
-F8HVj71lODqV0D1VNk7feVcxKh7YWWVJWCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeL
-YzcS19Dsw3sgQUSj7cugF+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWen
-AScOospUxbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCCAk4w
-PQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVvdmFkaXNvZmZzaG9y
-ZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREwggENMIIBCQYJKwYBBAG+WAABMIH7
-MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNlIG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmlj
-YXRlIGJ5IGFueSBwYXJ0eSBhc3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJs
-ZSBzdGFuZGFyZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh
-Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYIKwYBBQUHAgEW
-Fmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3TKbkGGew5Oanwl4Rqy+/fMIGu
-BgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rqy+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkw
-FwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0
-aG9yaXR5MS4wLAYDVQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6
-tlCLMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSkfnIYj9lo
-fFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf87C9TqnN7Az10buYWnuul
-LsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1RcHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2x
-gI4JVrmcGmD+XcHXetwReNDWXcG31a0ymQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi
-5upZIof4l/UO/erMkqQWxFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi
-5nrQNiOKSnQ2+Q==
------END CERTIFICATE-----
-
-QuoVadis Root CA 2
-==================
------BEGIN CERTIFICATE-----
-MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT
-EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMjAeFw0wNjExMjQx
-ODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
-aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4IC
-DwAwggIKAoICAQCaGMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6
-XJxgFyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55JWpzmM+Yk
-lvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bBrrcCaoF6qUWD4gXmuVbB
-lDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp+ARz8un+XJiM9XOva7R+zdRcAitMOeGy
-lZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt
-66/3FsvbzSUr5R/7mp/iUcw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1Jdxn
-wQ5hYIizPtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og/zOh
-D7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UHoycR7hYQe7xFSkyy
-BNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuIyV77zGHcizN300QyNQliBJIWENie
-J0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1Ud
-DgQWBBQahGK8SEwzJQTU7tD2A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGU
-a6FJpEcwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT
-ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2fBluornFdLwUv
-Z+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzng/iN/Ae42l9NLmeyhP3ZRPx3
-UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2BlfF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodm
-VjB3pjd4M1IQWK4/YY7yarHvGH5KWWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK
-+JDSV6IZUaUtl0HaB0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrW
-IozchLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPRTUIZ3Ph1
-WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWDmbA4CD/pXvk1B+TJYm5X
-f6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0ZohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II
-4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8
-VCLAAVBpQ570su9t+Oza8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u
------END CERTIFICATE-----
-
-QuoVadis Root CA 3
-==================
------BEGIN CERTIFICATE-----
-MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT
-EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMzAeFw0wNjExMjQx
-OTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
-aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4IC
-DwAwggIKAoICAQDMV0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNgg
-DhoB4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUrH556VOij
-KTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd8lyyBTNvijbO0BNO/79K
-DDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9CabwvvWhDFlaJKjdhkf2mrk7AyxRllDdLkgbv
-BNDInIjbC3uBr7E9KsRlOni27tyAsdLTmZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwp
-p5ijJUMv7/FfJuGITfhebtfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8
-nT8KKdjcT5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDtWAEX
-MJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZc6tsgLjoC2SToJyM
-Gf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A4iLItLRkT9a6fUg+qGkM17uGcclz
-uD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYDVR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHT
-BgkrBgEEAb5YAAMwgcUwgZMGCCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmlj
-YXRlIGNvbnN0aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0
-aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVudC4wLQYIKwYB
-BQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2NwczALBgNVHQ8EBAMCAQYwHQYD
-VR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4GA1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4
-ywLQoUmkRzBFMQswCQYDVQQGEwJCTTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UE
-AxMSUXVvVmFkaXMgUm9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZV
-qyM07ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSemd1o417+s
-hvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd+LJ2w/w4E6oM3kJpK27z
-POuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2
-Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadNt54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp
-8kokUvd0/bpO5qgdAm6xDYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBC
-bjPsMZ57k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6szHXu
-g/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0jWy10QJLZYxkNc91p
-vGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeTmJlglFwjz1onl14LBQaTNx47aTbr
-qZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK4SVhM7JZG+Ju1zdXtg2pEto=
------END CERTIFICATE-----
-
-Security Communication Root CA
-==============================
------BEGIN CERTIFICATE-----
-MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP
-U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw
-HhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP
-U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw
-ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw
-8yl89f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJDKaVv0uM
-DPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9Ms+k2Y7CI9eNqPPYJayX
-5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/NQV3Is00qVUarH9oe4kA92819uZKAnDfd
-DJZkndwi92SL32HeFZRSFaB9UslLqCHJxrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2
-JChzAgMBAAGjPzA9MB0GA1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYw
-DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vGkl3g
-0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfrUj94nK9NrvjVT8+a
-mCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5Bw+SUEmK3TGXX8npN6o7WWWXlDLJ
-s58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJUJRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ
-6rBK+1YWc26sTfcioU+tHXotRSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAi
-FL39vmwLAw==
------END CERTIFICATE-----
-
-Sonera Class 2 Root CA
-======================
------BEGIN CERTIFICATE-----
-MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG
-U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAxMDQwNjA3Mjk0MFoXDTIxMDQw
-NjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh
-IENsYXNzMiBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3
-/Ei9vX+ALTU74W+oZ6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybT
-dXnt5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s3TmVToMG
-f+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2EjvOr7nQKV0ba5cTppCD8P
-tOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu8nYybieDwnPz3BjotJPqdURrBGAgcVeH
-nfO+oJAjPYok4doh28MCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITT
-XjwwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt
-0jSv9zilzqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/3DEI
-cbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvDFNr450kkkdAdavph
-Oe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6Tk6ezAyNlNzZRZxe7EJQY670XcSx
-EtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLH
-llpwrN9M
------END CERTIFICATE-----
-
-XRamp Global CA Root
-====================
------BEGIN CERTIFICATE-----
-MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UE
-BhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2Vj
-dXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB
-dXRob3JpdHkwHhcNMDQxMTAxMTcxNDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMx
-HjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkg
-U2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
-dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS638eMpSe2OAtp87ZOqCwu
-IR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCPKZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMx
-foArtYzAQDsRhtDLooY2YKTVMIJt2W7QDxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FE
-zG+gSqmUsE3a56k0enI4qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqs
-AxcZZPRaJSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNViPvry
-xS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud
-EwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASsjVy16bYbMDYGA1UdHwQvMC0wK6Ap
-oCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMC
-AQEwDQYJKoZIhvcNAQEFBQADggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc
-/Kh4ZzXxHfARvbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt
-qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLaIR9NmXmd4c8n
-nxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSyi6mx5O+aGtA9aZnuqCij4Tyz
-8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQO+7ETPTsJ3xCwnR8gooJybQDJbw=
------END CERTIFICATE-----
-
-Go Daddy Class 2 CA
-===================
------BEGIN CERTIFICATE-----
-MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMY
-VGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRp
-ZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkG
-A1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g
-RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQAD
-ggENADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv
-2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+qN1j3hybX2C32
-qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiOr18SPaAIBQi2XKVlOARFmR6j
-YGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmY
-vLEHZ6IVDd2gWMZEewo+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0O
-BBYEFNLEsNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h/t2o
-atTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMu
-MTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwG
-A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wim
-PQoZ+YeAEW5p5JYXMP80kWNyOO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKt
-I3lpjbi2Tc7PTMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ
-HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mERdEr/VxqHD3VI
-Ls9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5CufReYNnyicsbkqWletNw+vHX/b
-vZ8=
------END CERTIFICATE-----
-
-Starfield Class 2 CA
-====================
------BEGIN CERTIFICATE-----
-MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzElMCMGA1UEChMc
-U3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZpZWxkIENsYXNzIDIg
-Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQwNjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBo
-MQswCQYDVQQGEwJVUzElMCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAG
-A1UECxMpU3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqG
-SIb3DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf8MOh2tTY
-bitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN+lq2cwQlZut3f+dZxkqZ
-JRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVm
-epsZGD3/cVE8MC5fvj13c7JdBmzDI1aaK4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSN
-F4Azbl5KXZnJHoe0nRrA1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HF
-MIHCMB0GA1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fRzt0f
-hvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNo
-bm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24g
-QXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGs
-afPzWdqbAYcaT1epoXkJKtv3L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLM
-PUxA2IGvd56Deruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl
-xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynpVSJYACPq4xJD
-KVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEYWQPJIrSPnNVeKtelttQKbfi3
-QBFGmh95DmK/D5fs4C8fF5Q=
------END CERTIFICATE-----
-
-Taiwan GRCA
-===========
------BEGIN CERTIFICATE-----
-MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/MQswCQYDVQQG
-EwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4X
-DTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1owPzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dv
-dmVybm1lbnQgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQAD
-ggIPADCCAgoCggIBAJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qN
-w8XRIePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1qgQdW8or5
-BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKyyhwOeYHWtXBiCAEuTk8O
-1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAtsF/tnyMKtsc2AtJfcdgEWFelq16TheEfO
-htX7MfP6Mb40qij7cEwdScevLJ1tZqa2jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wov
-J5pGfaENda1UhhXcSTvxls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7
-Q3hub/FCVGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHKYS1t
-B6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoHEgKXTiCQ8P8NHuJB
-O9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThNXo+EHWbNxWCWtFJaBYmOlXqYwZE8
-lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1UdDgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNV
-HRMEBTADAQH/MDkGBGcqBwAEMTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg2
-09yewDL7MTqKUWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ
-TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyfqzvS/3WXy6Tj
-Zwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaKZEk9GhiHkASfQlK3T8v+R0F2
-Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFEJPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlU
-D7gsL0u8qV1bYH+Mh6XgUmMqvtg7hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6Qz
-DxARvBMB1uUO07+1EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+Hbk
-Z6MmnD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WXudpVBrkk
-7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44VbnzssQwmSNOXfJIoRIM3BKQ
-CZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDeLMDDav7v3Aun+kbfYNucpllQdSNpc5Oy
-+fwC00fmcc4QAu4njIT/rEUNE1yDMuAlpYYsfPQS
------END CERTIFICATE-----
-
-DigiCert Assured ID Root CA
-===========================
------BEGIN CERTIFICATE-----
-MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQG
-EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw
-IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzEx
-MTEwMDAwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL
-ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0Ew
-ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7cJpSIqvTO
-9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYPmDI2dsze3Tyoou9q+yHy
-UmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW
-/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpy
-oeb6pNnVFzF1roV9Iq4/AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whf
-GHdPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRF
-66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd823IDzANBgkq
-hkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRCdWKuh+vy1dneVrOfzM4UKLkNl2Bc
-EkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTffwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38Fn
-SbNd67IJKusm7Xi+fT8r87cmNW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i
-8b5QZ7dsvfPxH2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe
+# Issuer: CN=Certum CA O=Unizeto Sp. z o.o.
+# Subject: CN=Certum CA O=Unizeto Sp. z o.o.
+# Label: "Certum Root CA"
+# Serial: 65568
+# MD5 Fingerprint: 2c:8f:9f:66:1d:18:90:b1:47:26:9d:8e:86:82:8c:a9
+# SHA1 Fingerprint: 62:52:dc:40:f7:11:43:a2:2f:de:9e:f7:34:8e:06:42:51:b1:81:18
+# SHA256 Fingerprint: d8:e0:fe:bc:1d:b2:e3:8d:00:94:0f:37:d2:7d:41:34:4d:99:3e:73:4b:99:d5:65:6d:97:78:d4:d8:14:36:24
+-----BEGIN CERTIFICATE-----
+MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBM
+MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD
+QTAeFw0wMjA2MTExMDQ2MzlaFw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBM
+MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD
+QTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6xwS7TT3zNJc4YPk/E
+jG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdLkKWo
+ePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GI
+ULdtlkIJ89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapu
+Ob7kky/ZR6By6/qmW6/KUz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUg
+AKpoC6EahQGcxEZjgoi2IrHu/qpGWX7PNSzVttpd90gzFFS269lvzs2I1qsb2pY7
+HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEA
+uI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+GXYkHAQa
+TOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTg
+xSvgGrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1q
+CjqTE5s7FCMTY5w/0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5x
+O/fIR/RpbxXyEV6DHpx8Uq79AtoSqFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs
+6GAqm4VKQPNriiTsBhYscw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=AAA Certificate Services O=Comodo CA Limited
+# Subject: CN=AAA Certificate Services O=Comodo CA Limited
+# Label: "Comodo AAA Services root"
+# Serial: 1
+# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0
+# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49
+# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4
+-----BEGIN CERTIFICATE-----
+MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb
+MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
+GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj
+YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM
+GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua
+BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe
+3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4
+YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR
+rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm
+ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU
+oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF
+MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v
+QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t
+b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF
+AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q
+GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz
+Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2
+G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi
+l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3
+smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Secure Certificate Services O=Comodo CA Limited
+# Subject: CN=Secure Certificate Services O=Comodo CA Limited
+# Label: "Comodo Secure Services root"
+# Serial: 1
+# MD5 Fingerprint: d3:d9:bd:ae:9f:ac:67:24:b3:c8:1b:52:e1:b9:a9:bd
+# SHA1 Fingerprint: 4a:65:d5:f4:1d:ef:39:b8:b8:90:4a:4a:d3:64:81:33:cf:c7:a1:d1
+# SHA256 Fingerprint: bd:81:ce:3b:4f:65:91:d1:1a:67:b5:fc:7a:47:fd:ef:25:52:1b:f9:aa:4e:18:b9:e3:df:2e:34:a7:80:3b:e8
+-----BEGIN CERTIFICATE-----
+MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEb
+MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
+GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRp
+ZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVow
+fjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAiBgNV
+BAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPM
+cm3ye5drswfxdySRXyWP9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3S
+HpR7LZQdqnXXs5jLrLxkU0C8j6ysNstcrbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996
+CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rCoznl2yY4rYsK7hljxxwk
+3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3Vp6ea5EQz
+6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNV
+HQ4EFgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1Ud
+EwEB/wQFMAMBAf8wgYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2Rv
+Y2EuY29tL1NlY3VyZUNlcnRpZmljYXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRw
+Oi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmww
+DQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm4J4oqF7Tt/Q0
+5qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj
+Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtI
+gKvcnDe4IRRLDXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJ
+aD61JlfutuC23bkpgHl9j6PwpCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDl
+izeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1HRR3B7Hzs/Sk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Trusted Certificate Services O=Comodo CA Limited
+# Subject: CN=Trusted Certificate Services O=Comodo CA Limited
+# Label: "Comodo Trusted Services root"
+# Serial: 1
+# MD5 Fingerprint: 91:1b:3f:6e:cd:9e:ab:ee:07:fe:1f:71:d2:b3:61:27
+# SHA1 Fingerprint: e1:9f:e3:0e:8b:84:60:9e:80:9b:17:0d:72:a8:c5:ba:6e:14:09:bd
+# SHA256 Fingerprint: 3f:06:e5:56:81:d4:96:f5:be:16:9e:b5:38:9f:9f:2b:8f:f6:1e:17:08:df:68:81:72:48:49:cd:5d:27:cb:69
+-----BEGIN CERTIFICATE-----
+MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEb
+MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
+GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0
+aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEwMDAwMDBaFw0yODEyMzEyMzU5NTla
+MH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAO
+BgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUwIwYD
+VQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWW
+fnJSoBVC21ndZHoa0Lh73TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMt
+TGo87IvDktJTdyR0nAducPy9C1t2ul/y/9c3S0pgePfw+spwtOpZqqPOSC+pw7IL
+fhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6juljatEPmsbS9Is6FARW
+1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsSivnkBbA7
+kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0G
+A1UdDgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21v
+ZG9jYS5jb20vVHJ1c3RlZENlcnRpZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRo
+dHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENlcnRpZmljYXRlU2VydmljZXMu
+Y3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8NtwuleGFTQQuS9/
+HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32
+pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxIS
+jBc/lDb+XbDABHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+
+xqFx7D+gIIxmOom0jtTYsU0lR+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/Atyjcn
+dBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O9y5Xt5hwXsjEeLBi
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority
+# Subject: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority
+# Label: "QuoVadis Root CA"
+# Serial: 985026699
+# MD5 Fingerprint: 27:de:36:fe:72:b7:00:03:00:9d:f4:f0:1e:6c:04:24
+# SHA1 Fingerprint: de:3f:40:bd:50:93:d3:9b:6c:60:f6:da:bc:07:62:01:00:89:76:c9
+# SHA256 Fingerprint: a4:5e:de:3b:bb:f0:9c:8a:e1:5c:72:ef:c0:72:68:d6:93:a2:1c:99:6f:d5:1e:67:ca:07:94:60:fd:6d:88:73
+-----BEGIN CERTIFICATE-----
+MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJC
+TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0
+aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0
+aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAzMTkxODMzMzNaFw0yMTAzMTcxODMz
+MzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUw
+IwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQDEyVR
+dW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Yp
+li4kVEAkOPcahdxYTMukJ0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2D
+rOpm2RgbaIr1VxqYuvXtdj182d6UajtLF8HVj71lODqV0D1VNk7feVcxKh7YWWVJ
+WCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeLYzcS19Dsw3sgQUSj7cug
+F+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWenAScOospU
+xbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCC
+Ak4wPQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVv
+dmFkaXNvZmZzaG9yZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREw
+ggENMIIBCQYJKwYBBAG+WAABMIH7MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNl
+IG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBh
+c3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFy
+ZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh
+Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYI
+KwYBBQUHAgEWFmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3T
+KbkGGew5Oanwl4Rqy+/fMIGuBgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rq
+y+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1p
+dGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYD
+VQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6tlCL
+MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSk
+fnIYj9lofFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf8
+7C9TqnN7Az10buYWnuulLsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1R
+cHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2xgI4JVrmcGmD+XcHXetwReNDWXcG31a0y
+mQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi5upZIof4l/UO/erMkqQW
+xFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi5nrQNiOK
+SnQ2+Q==
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 2"
+# Serial: 1289
+# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b
+# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7
+# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86
+-----BEGIN CERTIFICATE-----
+MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
+GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
+b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV
+BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
+YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa
+GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg
+Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J
+WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB
+rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp
++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1
+ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i
+Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz
+PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og
+/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH
+oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI
+yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud
+EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2
+A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL
+MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT
+ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f
+BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn
+g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl
+fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K
+WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha
+B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc
+hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR
+TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD
+mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z
+ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y
+4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza
+8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 3"
+# Serial: 1478
+# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf
+# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85
+# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35
+-----BEGIN CERTIFICATE-----
+MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
+GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
+b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV
+BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
+YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM
+V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB
+4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr
+H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd
+8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv
+vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT
+mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe
+btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc
+T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt
+WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ
+c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A
+4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD
+VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG
+CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0
+aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0
+aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu
+dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw
+czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G
+A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC
+TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg
+Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0
+7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem
+d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd
++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B
+4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN
+t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x
+DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57
+k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s
+zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j
+Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT
+mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK
+4SVhM7JZG+Ju1zdXtg2pEto=
+-----END CERTIFICATE-----
+
+# Issuer: O=SECOM Trust.net OU=Security Communication RootCA1
+# Subject: O=SECOM Trust.net OU=Security Communication RootCA1
+# Label: "Security Communication Root CA"
+# Serial: 0
+# MD5 Fingerprint: f1:bc:63:6a:54:e0:b5:27:f5:cd:e7:1a:e3:4d:6e:4a
+# SHA1 Fingerprint: 36:b1:2b:49:f9:81:9e:d7:4c:9e:bc:38:0f:c6:56:8f:5d:ac:b2:f7
+# SHA256 Fingerprint: e7:5e:72:ed:9f:56:0e:ec:6e:b4:80:00:73:a4:3f:c3:ad:19:19:5a:39:22:82:01:78:95:97:4a:99:02:6b:6c
+-----BEGIN CERTIFICATE-----
+MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY
+MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t
+dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5
+WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD
+VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8
+9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ
+DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9
+Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N
+QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ
+xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G
+A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T
+AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG
+kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr
+Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5
+Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU
+JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot
+RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Sonera Class2 CA O=Sonera
+# Subject: CN=Sonera Class2 CA O=Sonera
+# Label: "Sonera Class 2 Root CA"
+# Serial: 29
+# MD5 Fingerprint: a3:ec:75:0f:2e:88:df:fa:48:01:4e:0b:5c:48:6f:fb
+# SHA1 Fingerprint: 37:f7:6d:e6:07:7c:90:c5:b1:3e:93:1a:b7:41:10:b4:f2:e4:9a:27
+# SHA256 Fingerprint: 79:08:b4:03:14:c1:38:10:0b:51:8d:07:35:80:7f:fb:fc:f8:51:8a:00:95:33:71:05:ba:38:6b:15:3d:d9:27
+-----BEGIN CERTIFICATE-----
+MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEP
+MA0GA1UEChMGU29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAx
+MDQwNjA3Mjk0MFoXDTIxMDQwNjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNV
+BAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJhIENsYXNzMiBDQTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3/Ei9vX+ALTU74W+o
+Z6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybTdXnt
+5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s
+3TmVToMGf+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2Ej
+vOr7nQKV0ba5cTppCD8PtOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu
+8nYybieDwnPz3BjotJPqdURrBGAgcVeHnfO+oJAjPYok4doh28MCAwEAAaMzMDEw
+DwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITTXjwwCwYDVR0PBAQDAgEG
+MA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt0jSv9zil
+zqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/
+3DEIcbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvD
+FNr450kkkdAdavphOe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6
+Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2
+ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M
+-----END CERTIFICATE-----
+
+# Issuer: CN=Staat der Nederlanden Root CA O=Staat der Nederlanden
+# Subject: CN=Staat der Nederlanden Root CA O=Staat der Nederlanden
+# Label: "Staat der Nederlanden Root CA"
+# Serial: 10000010
+# MD5 Fingerprint: 60:84:7c:5a:ce:db:0c:d4:cb:a7:e9:fe:02:c6:a9:c0
+# SHA1 Fingerprint: 10:1d:fa:3f:d5:0b:cb:bb:9b:b5:60:0c:19:55:a4:1a:f4:73:3a:04
+# SHA256 Fingerprint: d4:1d:82:9e:8c:16:59:82:2a:f9:3f:ce:62:bf:fc:de:26:4f:c8:4e:8b:95:0c:5f:f2:75:d0:52:35:46:95:a3
+-----BEGIN CERTIFICATE-----
+MIIDujCCAqKgAwIBAgIEAJiWijANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJO
+TDEeMBwGA1UEChMVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSYwJAYDVQQDEx1TdGFh
+dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQTAeFw0wMjEyMTcwOTIzNDlaFw0xNTEy
+MTYwOTE1MzhaMFUxCzAJBgNVBAYTAk5MMR4wHAYDVQQKExVTdGFhdCBkZXIgTmVk
+ZXJsYW5kZW4xJjAkBgNVBAMTHVN0YWF0IGRlciBOZWRlcmxhbmRlbiBSb290IENB
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmNK1URF6gaYUmHFtvszn
+ExvWJw56s2oYHLZhWtVhCb/ekBPHZ+7d89rFDBKeNVU+LCeIQGv33N0iYfXCxw71
+9tV2U02PjLwYdjeFnejKScfST5gTCaI+Ioicf9byEGW07l8Y1Rfj+MX94p2i71MO
+hXeiD+EwR+4A5zN9RGcaC1Hoi6CeUJhoNFIfLm0B8mBF8jHrqTFoKbt6QZ7GGX+U
+tFE5A3+y3qcym7RHjm+0Sq7lr7HcsBthvJly3uSJt3omXdozSVtSnA71iq3DuD3o
+BmrC1SoLbHuEvVYFy4ZlkuxEK7COudxwC0barbxjiDn622r+I/q85Ej0ZytqERAh
+SQIDAQABo4GRMIGOMAwGA1UdEwQFMAMBAf8wTwYDVR0gBEgwRjBEBgRVHSAAMDww
+OgYIKwYBBQUHAgEWLmh0dHA6Ly93d3cucGtpb3ZlcmhlaWQubmwvcG9saWNpZXMv
+cm9vdC1wb2xpY3kwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSofeu8Y6R0E3QA
+7Jbg0zTBLL9s+DANBgkqhkiG9w0BAQUFAAOCAQEABYSHVXQ2YcG70dTGFagTtJ+k
+/rvuFbQvBgwp8qiSpGEN/KtcCFtREytNwiphyPgJWPwtArI5fZlmgb9uXJVFIGzm
+eafR2Bwp/MIgJ1HI8XxdNGdphREwxgDS1/PTfLbwMVcoEoJz6TMvplW0C5GUR5z6
+u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3ynGQI0DvDKcWy
+7ZAEwbEpkcUwb8GpcjPM/l0WFywRaed+/sWDCN+83CI6LiBpIzlWYGeQiy52OfsR
+iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com
+# Subject: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com
+# Label: "UTN DATACorp SGC Root CA"
+# Serial: 91374294542884689855167577680241077609
+# MD5 Fingerprint: b3:a5:3e:77:21:6d:ac:4a:c0:c9:fb:d5:41:3d:ca:06
+# SHA1 Fingerprint: 58:11:9f:0e:12:82:87:ea:50:fd:d9:87:45:6f:4f:78:dc:fa:d6:d4
+# SHA256 Fingerprint: 85:fb:2f:91:dd:12:27:5a:01:45:b6:36:53:4f:84:02:4a:d6:8b:69:b8:ee:88:68:4f:f7:11:37:58:05:b3:48
+-----BEGIN CERTIFICATE-----
+MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB
+kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
+Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho
+dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw
+IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD
+VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu
+dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN
+BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6
+E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ
+D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK
+4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq
+lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW
+bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB
+o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT
+MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js
+LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr
+BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB
+AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft
+Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj
+j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH
+KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv
+2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3
+mfnGV/TJVTl4uix5yaaIK/QI
+-----END CERTIFICATE-----
+
+# Issuer: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com
+# Subject: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com
+# Label: "UTN USERFirst Hardware Root CA"
+# Serial: 91374294542884704022267039221184531197
+# MD5 Fingerprint: 4c:56:41:e5:0d:bb:2b:e8:ca:a3:ed:18:08:ad:43:39
+# SHA1 Fingerprint: 04:83:ed:33:99:ac:36:08:05:87:22:ed:bc:5e:46:00:e3:be:f9:d7
+# SHA256 Fingerprint: 6e:a5:47:41:d0:04:66:7e:ed:1b:48:16:63:4a:a3:a7:9e:6e:4b:96:95:0f:82:79:da:fc:8d:9b:d8:81:21:37
+-----BEGIN CERTIFICATE-----
+MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB
+lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
+Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho
+dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt
+SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG
+A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe
+MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v
+d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh
+cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn
+0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ
+M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a
+MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd
+oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI
+DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy
+oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD
+VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0
+dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy
+bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF
+BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM
+//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli
+CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE
+CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t
+3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS
+KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Chambers of Commerce Root O=AC Camerfirma SA CIF A82743287 OU=http://www.chambersign.org
+# Subject: CN=Chambers of Commerce Root O=AC Camerfirma SA CIF A82743287 OU=http://www.chambersign.org
+# Label: "Camerfirma Chambers of Commerce Root"
+# Serial: 0
+# MD5 Fingerprint: b0:01:ee:14:d9:af:29:18:94:76:8e:f1:69:33:2a:84
+# SHA1 Fingerprint: 6e:3a:55:a4:19:0c:19:5c:93:84:3c:c0:db:72:2e:31:30:61:f0:b1
+# SHA256 Fingerprint: 0c:25:8a:12:a5:67:4a:ef:25:f2:8b:a7:dc:fa:ec:ee:a3:48:e5:41:e6:f5:cc:4e:e6:3b:71:b3:61:60:6a:c3
+-----BEGIN CERTIFICATE-----
+MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEn
+MCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQL
+ExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMg
+b2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAxNjEzNDNaFw0zNzA5MzAxNjEzNDRa
+MH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZpcm1hIFNBIENJRiBB
+ODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3JnMSIw
+IAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0B
+AQEFAAOCAQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtb
+unXF/KGIJPov7coISjlUxFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0d
+BmpAPrMMhe5cG3nCYsS4No41XQEMIwRHNaqbYE6gZj3LJgqcQKH0XZi/caulAGgq
+7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jWDA+wWFjbw2Y3npuRVDM3
+0pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFVd9oKDMyX
+roDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIG
+A1UdEwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5j
+aGFtYmVyc2lnbi5vcmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p
+26EpW1eLTXYGduHRooowDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIA
+BzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hhbWJlcnNpZ24ub3JnMCcGA1Ud
+EgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYDVR0gBFEwTzBN
+BgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz
+aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEB
+AAxBl8IahsAifJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZd
+p0AJPaxJRUXcLo0waLIJuvvDL8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi
+1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wNUPf6s+xCX6ndbcj0dc97wXImsQEc
+XCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/nADydb47kMgkdTXg0
+eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1erfu
+tGWaIZDgqtCYvDi1czyL+Nw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Global Chambersign Root O=AC Camerfirma SA CIF A82743287 OU=http://www.chambersign.org
+# Subject: CN=Global Chambersign Root O=AC Camerfirma SA CIF A82743287 OU=http://www.chambersign.org
+# Label: "Camerfirma Global Chambersign Root"
+# Serial: 0
+# MD5 Fingerprint: c5:e6:7b:bf:06:d0:4f:43:ed:c4:7a:65:8a:fb:6b:19
+# SHA1 Fingerprint: 33:9b:6b:14:50:24:9b:55:7a:01:87:72:84:d9:e0:2f:c3:d2:d8:e9
+# SHA256 Fingerprint: ef:3c:b4:17:fc:8e:bf:6f:97:87:6c:9e:4e:ce:39:de:1e:a5:fe:64:91:41:d1:02:8b:7d:11:c0:b2:29:8c:ed
+-----BEGIN CERTIFICATE-----
+MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEn
+MCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQL
+ExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENo
+YW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYxNDE4WhcNMzcwOTMwMTYxNDE4WjB9
+MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgy
+NzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEgMB4G
+A1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUA
+A4IBDQAwggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0
+Mi+ITaFgCPS3CU6gSS9J1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/s
+QJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8Oby4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpV
+eAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl6DJWk0aJqCWKZQbua795
+B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c8lCrEqWh
+z0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0T
+AQH/BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1i
+ZXJzaWduLm9yZy9jaGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4w
+TcbOX60Qq+UDpfqpFDAOBgNVHQ8BAf8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAH
+MCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBjaGFtYmVyc2lnbi5vcmcwKgYD
+VR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9yZzBbBgNVHSAE
+VDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh
+bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0B
+AQUFAAOCAQEAPDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUM
+bKGKfKX0j//U2K0X1S0E0T9YgOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXi
+ryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJPJ7oKXqJ1/6v/2j1pReQvayZzKWG
+VwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4IBHNfTIzSJRUTN3c
+ecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREest2d/
+AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A==
+-----END CERTIFICATE-----
+
+# Issuer: CN=NetLock Kozjegyzoi (Class A) Tanusitvanykiado O=NetLock Halozatbiztonsagi Kft. OU=Tanusitvanykiadok
+# Subject: CN=NetLock Kozjegyzoi (Class A) Tanusitvanykiado O=NetLock Halozatbiztonsagi Kft. OU=Tanusitvanykiadok
+# Label: "NetLock Notary (Class A) Root"
+# Serial: 259
+# MD5 Fingerprint: 86:38:6d:5e:49:63:6c:85:5c:db:6d:dc:94:b7:d0:f7
+# SHA1 Fingerprint: ac:ed:5f:65:53:fd:25:ce:01:5f:1f:7a:48:3b:6a:74:9f:61:78:c6
+# SHA256 Fingerprint: 7f:12:cd:5f:7e:5e:29:0e:c7:d8:51:79:d5:b7:2c:20:a5:be:75:08:ff:db:5b:f8:1a:b9:68:4a:7f:c9:f6:67
+-----BEGIN CERTIFICATE-----
+MIIGfTCCBWWgAwIBAgICAQMwDQYJKoZIhvcNAQEEBQAwga8xCzAJBgNVBAYTAkhV
+MRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMe
+TmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0
+dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBLb3pqZWd5em9pIChDbGFzcyBB
+KSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNDIzMTQ0N1oXDTE5MDIxOTIzMTQ0
+N1owga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQHEwhC
+dWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQu
+MRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBL
+b3pqZWd5em9pIChDbGFzcyBBKSBUYW51c2l0dmFueWtpYWRvMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvHSMD7tM9DceqQWC2ObhbHDqeLVu0ThEDaiD
+zl3S1tWBxdRL51uUcCbbO51qTGL3cfNk1mE7PetzozfZz+qMkjvN9wfcZnSX9EUi
+3fRc4L9t875lM+QVOr/bmJBVOMTtplVjC7B4BPTjbsE/jvxReB+SnoPC/tmwqcm8
+WgD/qaiYdPv2LD4VOQ22BFWoDpggQrOxJa1+mm9dU7GrDPzr4PN6s6iz/0b2Y6LY
+Oph7tqyF/7AlT3Rj5xMHpQqPBffAZG9+pyeAlt7ULoZgx2srXnN7F+eRP2QM2Esi
+NCubMvJIH5+hCoR64sKtlz2O1cH5VqNQ6ca0+pii7pXmKgOM3wIDAQABo4ICnzCC
+ApswDgYDVR0PAQH/BAQDAgAGMBIGA1UdEwEB/wQIMAYBAf8CAQQwEQYJYIZIAYb4
+QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaCAk1GSUdZRUxFTSEgRXplbiB0
+YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pvbGdhbHRhdGFz
+aSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQu
+IEEgaGl0ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtm
+ZWxlbG9zc2VnLWJpenRvc2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMg
+ZWxmb2dhZGFzYW5hayBmZWx0ZXRlbGUgYXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVs
+amFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFzIGxlaXJhc2EgbWVndGFsYWxoYXRv
+IGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBhIGh0dHBzOi8vd3d3
+Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVub3J6
+ZXNAbmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1
+YW5jZSBhbmQgdGhlIHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3Qg
+dG8gdGhlIE5ldExvY2sgQ1BTIGF2YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRs
+b2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFpbCBhdCBjcHNAbmV0bG9jay5uZXQuMA0G
+CSqGSIb3DQEBBAUAA4IBAQBIJEb3ulZv+sgoA0BO5TE5ayZrU3/b39/zcT0mwBQO
+xmd7I6gMc90Bu8bKbjc5VdXHjFYgDigKDtIqpLBJUsY4B/6+CgmM0ZjPytoUMaFP
+0jn8DxEsQ8Pdq5PHVT5HfBgaANzze9jyf1JsIPQLX2lS9O74silg6+NJMSEN1rUQ
+QeJBCWziGppWS3cC9qCbmieH6FUpccKQn0V4GuEVZD3QDtigdp+uxdAu6tYPVuxk
+f1qbFFgBJ34TUMdrKuZoPL9coAob4Q566eKAw+np9v1sEZ7Q5SgnK1QyQhSCdeZK
+8CtmdWOMovsEPoMOmzbwGOQmIMOM8CgHrTwXZoi1/baI
+-----END CERTIFICATE-----
+
+# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
+# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
+# Label: "XRamp Global CA Root"
+# Serial: 107108908803651509692980124233745014957
+# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1
+# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6
+# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2
+-----BEGIN CERTIFICATE-----
+MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB
+gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk
+MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY
+UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx
+NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3
+dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy
+dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB
+dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6
+38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP
+KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q
+DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4
+qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa
+JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi
+PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P
+BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs
+jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0
+eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD
+ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR
+vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt
+qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa
+IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy
+i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ
+O+7ETPTsJ3xCwnR8gooJybQDJbw=
+-----END CERTIFICATE-----
+
+# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
+# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
+# Label: "Go Daddy Class 2 CA"
+# Serial: 0
+# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67
+# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4
+# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4
+-----BEGIN CERTIFICATE-----
+MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh
+MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE
+YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3
+MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo
+ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg
+MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN
+ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA
+PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w
+wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi
+EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY
+avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+
+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE
+sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h
+/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5
+IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD
+ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy
+OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P
+TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ
+HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER
+dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf
+ReYNnyicsbkqWletNw+vHX/bvZ8=
+-----END CERTIFICATE-----
+
+# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
+# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
+# Label: "Starfield Class 2 CA"
+# Serial: 0
+# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24
+# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a
+# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58
+-----BEGIN CERTIFICATE-----
+MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl
+MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp
+U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw
+NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE
+ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp
+ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3
+DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf
+8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN
++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0
+X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa
+K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA
+1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G
+A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR
+zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0
+YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD
+bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w
+DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3
+L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D
+eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl
+xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp
+VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY
+WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q=
+-----END CERTIFICATE-----
+
+# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing
+# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing
+# Label: "StartCom Certification Authority"
+# Serial: 1
+# MD5 Fingerprint: 22:4d:8f:8a:fc:f7:35:c2:bb:57:34:90:7b:8b:22:16
+# SHA1 Fingerprint: 3e:2b:f7:f2:03:1b:96:f3:8c:e6:c4:d8:a8:5d:3e:2d:58:47:6a:0f
+# SHA256 Fingerprint: c7:66:a9:be:f2:d4:07:1c:86:3a:31:aa:49:20:e8:13:b2:d1:98:60:8c:b7:b7:cf:e2:11:43:b8:36:df:09:ea
+-----BEGIN CERTIFICATE-----
+MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW
+MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
+Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9
+MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi
+U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh
+cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk
+pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf
+OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C
+Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT
+Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi
+HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM
+Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w
++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+
+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3
+Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B
+26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID
+AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE
+FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j
+ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js
+LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM
+BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0
+Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy
+dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh
+cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh
+YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg
+dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp
+bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ
+YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT
+TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ
+9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8
+jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW
+FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz
+ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1
+ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L
+EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu
+L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq
+yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC
+O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V
+um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh
+NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14=
+-----END CERTIFICATE-----
+
+# Issuer: O=Government Root Certification Authority
+# Subject: O=Government Root Certification Authority
+# Label: "Taiwan GRCA"
+# Serial: 42023070807708724159991140556527066870
+# MD5 Fingerprint: 37:85:44:53:32:45:1f:20:f0:f3:95:e1:25:c4:43:4e
+# SHA1 Fingerprint: f4:8b:11:bf:de:ab:be:94:54:20:71:e6:41:de:6b:be:88:2b:40:b9
+# SHA256 Fingerprint: 76:00:29:5e:ef:e8:5b:9e:1f:d6:24:db:76:06:2a:aa:ae:59:81:8a:54:d2:77:4c:d4:c0:b2:c0:11:31:e1:b3
+-----BEGIN CERTIFICATE-----
+MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/
+MQswCQYDVQQGEwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5MB4XDTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1ow
+PzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dvdmVybm1lbnQgUm9vdCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
+AJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qNw8XR
+IePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1q
+gQdW8or5BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKy
+yhwOeYHWtXBiCAEuTk8O1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAts
+F/tnyMKtsc2AtJfcdgEWFelq16TheEfOhtX7MfP6Mb40qij7cEwdScevLJ1tZqa2
+jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wovJ5pGfaENda1UhhXcSTvx
+ls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7Q3hub/FC
+VGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHK
+YS1tB6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoH
+EgKXTiCQ8P8NHuJBO9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThN
+Xo+EHWbNxWCWtFJaBYmOlXqYwZE8lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1Ud
+DgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNVHRMEBTADAQH/MDkGBGcqBwAE
+MTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg209yewDL7MTqK
+UWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ
+TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyf
+qzvS/3WXy6TjZwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaK
+ZEk9GhiHkASfQlK3T8v+R0F2Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFE
+JPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlUD7gsL0u8qV1bYH+Mh6XgUmMqvtg7
+hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6QzDxARvBMB1uUO07+1
+EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+HbkZ6Mm
+nD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WX
+udpVBrkk7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44Vbnz
+ssQwmSNOXfJIoRIM3BKQCZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDe
+LMDDav7v3Aun+kbfYNucpllQdSNpc5Oy+fwC00fmcc4QAu4njIT/rEUNE1yDMuAl
+pYYsfPQS
+-----END CERTIFICATE-----
+
+# Issuer: CN=Swisscom Root CA 1 O=Swisscom OU=Digital Certificate Services
+# Subject: CN=Swisscom Root CA 1 O=Swisscom OU=Digital Certificate Services
+# Label: "Swisscom Root CA 1"
+# Serial: 122348795730808398873664200247279986742
+# MD5 Fingerprint: f8:38:7c:77:88:df:2c:16:68:2e:c2:e2:52:4b:b8:f9
+# SHA1 Fingerprint: 5f:3a:fc:0a:8b:64:f6:86:67:34:74:df:7e:a9:a2:fe:f9:fa:7a:51
+# SHA256 Fingerprint: 21:db:20:12:36:60:bb:2e:d4:18:20:5d:a1:1e:e7:a8:5a:65:e2:bc:6e:55:b5:af:7e:78:99:c8:a2:66:d9:2e
+-----BEGIN CERTIFICATE-----
+MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBk
+MQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0
+YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3Qg
+Q0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4MTgyMjA2MjBaMGQxCzAJBgNVBAYT
+AmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGlnaXRhbCBDZXJ0aWZp
+Y2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIICIjAN
+BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9
+m2BtRsiMMW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdih
+FvkcxC7mlSpnzNApbjyFNDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/
+TilftKaNXXsLmREDA/7n29uj/x2lzZAeAR81sH8A25Bvxn570e56eqeqDFdvpG3F
+EzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkCb6dJtDZd0KTeByy2dbco
+kdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn7uHbHaBu
+HYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNF
+vJbNcA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo
+19AOeCMgkckkKmUpWyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjC
+L3UcPX7ape8eYIVpQtPM+GP+HkM5haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJW
+bjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNYMUJDLXT5xp6mig/p/r+D5kNX
+JLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0hBBYw
+FDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j
+BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzc
+K6FptWfUjNP9MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzf
+ky9NfEBWMXrrpA9gzXrzvsMnjgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7Ik
+Vh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQMbFamIp1TpBcahQq4FJHgmDmHtqB
+sfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4HVtA4oJVwIHaM190e
+3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtlvrsR
+ls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ip
+mXeascClOS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HH
+b6D0jqTsNFFbjCYDcKF31QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksf
+rK/7DZBaZmBwXarNeNQk7shBoJMBkpxqnvy5JMWzFYJ+vq6VK+uxwNrjAWALXmms
+hFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCyx/yP2FS1k2Kdzs9Z+z0Y
+zirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMWNY6E0F/6
+MBr1mmz0DlP5OlvRHA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root CA"
+# Serial: 17154717934120587862167794914071425081
+# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72
+# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43
+# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c
+-----BEGIN CERTIFICATE-----
+MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
+b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
+cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c
+JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP
+mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+
+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4
+VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/
+AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB
+AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
+BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun
+pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC
+dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf
+fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm
+NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx
+H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe
+o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g==
-----END CERTIFICATE-----
-DigiCert Global Root CA
-=======================
------BEGIN CERTIFICATE-----
-MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQG
-EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw
-HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBDQTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAw
-MDAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3
-dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkq
-hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsBCSDMAZOn
-TjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97nh6Vfe63SKMI2tavegw5
-BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt43C/dxC//AH2hdmoRBBYMql1GNXRor5H
-4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7PT19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y
-7vrTC0LUq7dBMtoM1O/4gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQAB
-o2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbRTLtm
-8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJKoZIhvcNAQEF
-BQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/EsrhMAtudXH/vTBH1jLuG2cenTnmCmr
-EbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIt
-tep3Sp+dWOIrWcBAI+0tKIJFPnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886
-UAb3LujEV0lsYSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk
+# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root CA"
+# Serial: 10944719598952040374951832963794454346
+# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e
+# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36
+# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61
+-----BEGIN CERTIFICATE-----
+MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD
+QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB
+CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97
+nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt
+43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P
+T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4
+gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO
+BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR
+TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw
+DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr
+hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg
+06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF
+PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls
+YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk
CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4=
-----END CERTIFICATE-----
-DigiCert High Assurance EV Root CA
-==================================
------BEGIN CERTIFICATE-----
-MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQG
-EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSsw
-KQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAw
-MFoXDTMxMTExMDAwMDAwMFowbDELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZ
-MBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFu
-Y2UgRVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm+9S75S0t
-Mqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTWPNt0OKRKzE0lgvdKpVMS
-OO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEMxChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3
-MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFBIk5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQ
-NAQTXKFx01p8VdteZOE3hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUe
-h10aUAsgEsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMB
-Af8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaAFLE+w2kD+L9HAdSY
-JhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3NecnzyIZgYIVyHbIUf4KmeqvxgydkAQ
-V8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6zeM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFp
-myPInngiK3BD41VHMWEZ71jFhS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkK
-mNEVX58Svnw2Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe
-vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep+OkuE6N36B9K
------END CERTIFICATE-----
-
-Certplus Class 2 Primary CA
-===========================
------BEGIN CERTIFICATE-----
-MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAwPTELMAkGA1UE
-BhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFzcyAyIFByaW1hcnkgQ0EwHhcN
-OTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2Vy
-dHBsdXMxGzAZBgNVBAMTEkNsYXNzIDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP
-ADCCAQoCggEBANxQltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR
-5aiRVhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyLkcAbmXuZ
-Vg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCdEgETjdyAYveVqUSISnFO
-YFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yasH7WLO7dDWWuwJKZtkIvEcupdM5i3y95e
-e++U8Rs+yskhwcWYAqqi9lt3m/V+llU0HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRME
-CDAGAQH/AgEKMAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJ
-YIZIAYb4QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMuY29t
-L0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/AN9WM2K191EBkOvD
-P9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8yfFC82x/xXp8HVGIutIKPidd3i1R
-TtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMRFcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+
-7UCmnYR0ObncHoUW2ikbhiMAybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW
-//1IMwrh3KWBkJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7
+# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert High Assurance EV Root CA"
+# Serial: 3553400076410547919724730734378100087
+# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a
+# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25
+# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j
+ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL
+MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3
+LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug
+RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm
++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW
+PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM
+xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB
+Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3
+hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg
+EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA
+FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec
+nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z
+eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF
+hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2
+Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe
+vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep
++OkuE6N36B9K
+-----END CERTIFICATE-----
+
+# Issuer: CN=Class 2 Primary CA O=Certplus
+# Subject: CN=Class 2 Primary CA O=Certplus
+# Label: "Certplus Class 2 Primary CA"
+# Serial: 177770208045934040241468760488327595043
+# MD5 Fingerprint: 88:2c:8c:52:b8:a2:3c:f3:f7:bb:03:ea:ae:ac:42:0b
+# SHA1 Fingerprint: 74:20:74:41:72:9c:dd:92:ec:79:31:d8:23:10:8d:c2:81:92:e2:bb
+# SHA256 Fingerprint: 0f:99:3c:8a:ef:97:ba:af:56:87:14:0e:d5:9a:d1:82:1b:b4:af:ac:f0:aa:9a:58:b5:d5:7a:33:8a:3a:fb:cb
+-----BEGIN CERTIFICATE-----
+MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw
+PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz
+cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9
+MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz
+IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ
+ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR
+VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL
+kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd
+EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas
+H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0
+HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud
+DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4
+QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu
+Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/
+AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8
+yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR
+FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA
+ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB
+kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7
l7+ijrRU
-----END CERTIFICATE-----
-DST Root CA X3
-==============
------BEGIN CERTIFICATE-----
-MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/MSQwIgYDVQQK
-ExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMTDkRTVCBSb290IENBIFgzMB4X
-DTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVowPzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1
-cmUgVHJ1c3QgQ28uMRcwFQYDVQQDEw5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQAD
-ggEPADCCAQoCggEBAN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmT
-rE4Orz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEqOLl5CjH9
-UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9bxiqKqy69cK3FCxolkHRy
-xXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40d
-utolucbY38EVAjqr2m7xPi71XAicPNaDaeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0T
-AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQ
-MA0GCSqGSIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69ikug
-dB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXrAvHRAosZy5Q6XkjE
-GB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZzR8srzJmwN0jP41ZL9c8PDHIyh8bw
-RLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubS
-fZGL+T0yjWW06XyxV3bqxbYoOb8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ
------END CERTIFICATE-----
-
-SwissSign Gold CA - G2
-======================
------BEGIN CERTIFICATE-----
-MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNVBAYTAkNIMRUw
-EwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2lnbiBHb2xkIENBIC0gRzIwHhcN
-MDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBFMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dp
-c3NTaWduIEFHMR8wHQYDVQQDExZTd2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0B
-AQEFAAOCAg8AMIICCgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUq
-t2/876LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+bbqBHH5C
-jCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c6bM8K8vzARO/Ws/BtQpg
-vd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqEemA8atufK+ze3gE/bk3lUIbLtK/tREDF
-ylqM2tIrfKjuvqblCqoOpd8FUrdVxyJdMmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvR
-AiTysybUa9oEVeXBCsdtMDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuend
-jIj3o02yMszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69yFGkO
-peUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPiaG59je883WX0XaxR
-7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxMgI93e2CaHt+28kgeDrpOVG2Y4OGi
-GqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw
-AwEB/zAdBgNVHQ4EFgQUWyV7lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64
-OfPAeGZe6Drn8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov
-L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe645R88a7A3hfm
-5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczOUYrHUDFu4Up+GC9pWbY9ZIEr
-44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOf
-Mke6UiI0HTJ6CVanfCU2qT1L2sCCbwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6m
-Gu6uLftIdxf+u+yvGPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxp
-mo/a77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCChdiDyyJk
-vC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid392qgQmwLOM7XdVAyksLf
-KzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEppLd6leNcG2mqeSz53OiATIgHQv2ieY2Br
-NU0LbbqhPcCT4H8js1WtciVORvnSFu+wZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6Lqj
-viOvrv1vA+ACOzB2+httQc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ
------END CERTIFICATE-----
-
-SwissSign Silver CA - G2
-========================
------BEGIN CERTIFICATE-----
-MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCQ0gxFTAT
-BgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMB4X
-DTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0NlowRzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3
-aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG
-9w0BAQEFAAOCAg8AMIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644
-N0MvFz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7brYT7QbNHm
-+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieFnbAVlDLaYQ1HTWBCrpJH
-6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH6ATK72oxh9TAtvmUcXtnZLi2kUpCe2Uu
-MGoM9ZDulebyzYLs2aFK7PayS+VFheZteJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5h
-qAaEuSh6XzjZG6k4sIN/c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5
-FZGkECwJMoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRHHTBs
-ROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTfjNFusB3hB48IHpmc
-celM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb65i/4z3GcRm25xBWNOHkDRUjvxF3X
-CO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
-BAUwAwEB/zAdBgNVHQ4EFgQUF6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRB
-tjpbO8tFnb0cwpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0
-cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBAHPGgeAn0i0P
-4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShpWJHckRE1qTodvBqlYJ7YH39F
-kWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L
-3XWgwF15kIwb4FDm3jH+mHtwX6WQ2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx
-/uNncqCxv1yL5PqZIseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFa
-DGi8aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2Xem1ZqSqP
-e97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQRdAtq/gsD/KNVV4n+Ssuu
-WxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJ
-DIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ub
-DgEj8Z+7fNzcbBGXJbLytGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u
------END CERTIFICATE-----
-
-GeoTrust Primary Certification Authority
-========================================
------BEGIN CERTIFICATE-----
-MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQG
-EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMoR2VvVHJ1c3QgUHJpbWFyeSBD
-ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgx
-CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQ
-cmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
-CgKCAQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9AWbK7hWN
-b6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjAZIVcFU2Ix7e64HXprQU9
-nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE07e9GceBrAqg1cmuXm2bgyxx5X9gaBGge
-RwLmnWDiNpcB3841kt++Z8dtd1k7j53WkBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGt
-tm/81w7a4DSwDRp35+MImO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
-AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJKoZI
-hvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ16CePbJC/kRYkRj5K
-Ts4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl4b7UVXGYNTq+k+qurUKykG/g/CFN
-NWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6KoKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHa
-Floxt/m0cYASSJlyc1pZU8FjUjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG
-1riR/aYNKxoUAT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk=
------END CERTIFICATE-----
-
-thawte Primary Root CA
-======================
------BEGIN CERTIFICATE-----
-MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCBqTELMAkGA1UE
-BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2
-aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv
-cml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3
-MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwg
-SW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMv
-KGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMT
-FnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCs
-oPD7gFnUnMekz52hWXMJEEUMDSxuaPFsW0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ
-1CRfBsDMRJSUjQJib+ta3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGc
-q/gcfomk6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6Sk/K
-aAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94JNqR32HuHUETVPm4p
-afs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD
-VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XPr87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUF
-AAOCAQEAeRHAS7ORtvzw6WfUDW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeE
-uzLlQRHAd9mzYJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX
-xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2/qxAeeWsEG89
-jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/LHbTY5xZ3Y+m4Q6gLkH3LpVH
-z7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7jVaMaA==
------END CERTIFICATE-----
-
-VeriSign Class 3 Public Primary Certification Authority - G5
-============================================================
------BEGIN CERTIFICATE-----
-MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCByjELMAkGA1UE
-BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO
-ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk
-IHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRp
-ZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCB
-yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2ln
-biBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBh
-dXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmlt
-YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
-ggEKAoIBAQCvJAgIKXo1nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKz
-j/i5Vbext0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIzSdhD
-Y2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQGBO+QueQA5N06tRn/
-Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+rCpSx4/VBEnkjWNHiDxpg8v+R70r
-fk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/
-BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2Uv
-Z2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy
-aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKvMzEzMA0GCSqG
-SIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzEp6B4Eq1iDkVwZMXnl2YtmAl+
-X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKE
-KQsTb47bDN0lAtukixlE0kF6BWlKWE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiC
-Km0oHw0LxOXnGiYZ4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vE
-ZV8NhnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq
------END CERTIFICATE-----
-
-SecureTrust CA
-==============
------BEGIN CERTIFICATE-----
-MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBIMQswCQYDVQQG
-EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xFzAVBgNVBAMTDlNlY3VyZVRy
-dXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIzMTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAe
-BgNVBAoTF1NlY3VyZVRydXN0IENvcnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCC
-ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQX
-OZEzZum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO0gMdA+9t
-DWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIaowW8xQmxSPmjL8xk037uH
-GFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b
-01k/unK8RCSc43Oz969XL0Imnal0ugBS8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmH
-ursCAwEAAaOBnTCBmjATBgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/
-BAUwAwEB/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCegJYYj
-aHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ
-KoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt36Z3q059c4EVlew3KW+JwULKUBRSu
-SceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHf
-mbx8IVQr5Fiiu1cprp6poxkmD5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZ
-nMUFdAvnZyPSCPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR
+# Issuer: CN=DST Root CA X3 O=Digital Signature Trust Co.
+# Subject: CN=DST Root CA X3 O=Digital Signature Trust Co.
+# Label: "DST Root CA X3"
+# Serial: 91299735575339953335919266965803778155
+# MD5 Fingerprint: 41:03:52:dc:0f:f7:50:1b:16:f0:02:8e:ba:6f:45:c5
+# SHA1 Fingerprint: da:c9:02:4f:54:d8:f6:df:94:93:5f:b1:73:26:38:ca:6a:d7:7c:13
+# SHA256 Fingerprint: 06:87:26:03:31:a7:24:03:d9:09:f1:05:e6:9b:cf:0d:32:e1:bd:24:93:ff:c6:d9:20:6d:11:bc:d6:77:07:39
+-----BEGIN CERTIFICATE-----
+MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/
+MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT
+DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow
+PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD
+Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O
+rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq
+OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b
+xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw
+7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD
+aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV
+HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG
+SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69
+ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr
+AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz
+R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5
+JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo
+Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ
+-----END CERTIFICATE-----
+
+# Issuer: CN=DST ACES CA X6 O=Digital Signature Trust OU=DST ACES
+# Subject: CN=DST ACES CA X6 O=Digital Signature Trust OU=DST ACES
+# Label: "DST ACES CA X6"
+# Serial: 17771143917277623872238992636097467865
+# MD5 Fingerprint: 21:d8:4c:82:2b:99:09:33:a2:eb:14:24:8d:8e:5f:e8
+# SHA1 Fingerprint: 40:54:da:6f:1c:3f:40:74:ac:ed:0f:ec:cd:db:79:d1:53:fb:90:1d
+# SHA256 Fingerprint: 76:7c:95:5a:76:41:2c:89:af:68:8e:90:a1:c7:0f:55:6c:fd:6b:60:25:db:ea:10:41:6d:7e:b6:83:1f:8c:40
+-----BEGIN CERTIFICATE-----
+MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBb
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3Qx
+ETAPBgNVBAsTCERTVCBBQ0VTMRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0w
+MzExMjAyMTE5NThaFw0xNzExMjAyMTE5NThaMFsxCzAJBgNVBAYTAlVTMSAwHgYD
+VQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UECxMIRFNUIEFDRVMx
+FzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPu
+ktKe1jzIDZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7
+gLFViYsx+tC3dr5BPTCapCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZH
+fAjIgrrep4c9oW24MFbCswKBXy314powGCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4a
+ahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPyMjwmR/onJALJfh1biEIT
+ajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1UdEwEB/wQF
+MAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rk
+c3QuY29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjto
+dHRwOi8vd3d3LnRydXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMt
+aW5kZXguaHRtbDAdBgNVHQ4EFgQUCXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZI
+hvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V25FYrnJmQ6AgwbN99Pe7lv7Uk
+QIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6tFr8hlxCBPeP/
+h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq
+nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpR
+rscL9yuwNwXsvFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf2
+9w4LTJxoeHtxMcfrHuBnQfO3oKfN5XozNmr6mis=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=(c) 2005 TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş.
+# Subject: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=(c) 2005 TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş.
+# Label: "TURKTRUST Certificate Services Provider Root 1"
+# Serial: 1
+# MD5 Fingerprint: f1:6a:22:18:c9:cd:df:ce:82:1d:1d:b7:78:5c:a9:a5
+# SHA1 Fingerprint: 79:98:a3:08:e1:4d:65:85:e6:c2:1e:15:3a:71:9f:ba:5a:d3:4a:d9
+# SHA256 Fingerprint: 44:04:e3:3b:5e:14:0d:cf:99:80:51:fd:fc:80:28:c7:c8:16:15:c5:ee:73:7b:11:1b:58:82:33:a9:b5:35:a0
+-----BEGIN CERTIFICATE-----
+MIID+zCCAuOgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBtzE/MD0GA1UEAww2VMOc
+UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx
+c8SxMQswCQYDVQQGDAJUUjEPMA0GA1UEBwwGQU5LQVJBMVYwVAYDVQQKDE0oYykg
+MjAwNSBUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8
+dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjAeFw0wNTA1MTMxMDI3MTdaFw0xNTAz
+MjIxMDI3MTdaMIG3MT8wPQYDVQQDDDZUw5xSS1RSVVNUIEVsZWt0cm9uaWsgU2Vy
+dGlmaWthIEhpem1ldCBTYcSfbGF5xLFjxLFzxLExCzAJBgNVBAYMAlRSMQ8wDQYD
+VQQHDAZBTktBUkExVjBUBgNVBAoMTShjKSAyMDA1IFTDnFJLVFJVU1QgQmlsZ2kg
+xLBsZXRpxZ9pbSB2ZSBCaWxpxZ9pbSBHw7x2ZW5sacSfaSBIaXptZXRsZXJpIEEu
+xZ4uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAylIF1mMD2Bxf3dJ7
+XfIMYGFbazt0K3gNfUW9InTojAPBxhEqPZW8qZSwu5GXyGl8hMW0kWxsE2qkVa2k
+heiVfrMArwDCBRj1cJ02i67L5BuBf5OI+2pVu32Fks66WJ/bMsW9Xe8iSi9BB35J
+YbOG7E6mQW6EvAPs9TscyB/C7qju6hJKjRTP8wrgUDn5CDX4EVmt5yLqS8oUBt5C
+urKZ8y1UiBAG6uEaPj1nH/vO+3yC6BFdSsG5FOpU2WabfIl9BJpiyelSPJ6c79L1
+JuTm5Rh8i27fbMx4W09ysstcP4wFjdFMjK2Sx+F4f2VsSQZQLJ4ywtdKxnWKWU51
+b0dewQIDAQABoxAwDjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAV
+9VX/N5aAWSGk/KEVTCD21F/aAyT8z5Aa9CEKmu46sWrv7/hg0Uw2ZkUd82YCdAR7
+kjCo3gp2D++Vbr3JN+YaDayJSFvMgzbC9UZcWYJWtNX+I7TYVBxEq8Sn5RTOPEFh
+fEPmzcSBCYsk+1Ql1haolgxnB2+zUEfjHCQo3SqYpGH+2+oSN7wBGjSFvW5P55Fy
+B0SFHljKVETd96y5y4khctuPwGkplyqjrhgjlxxBKot8KsF8kOipKMDTkcatKIdA
+aLX/7KfS0zgYnNN9aV3wxqUeJBujR/xpB2jn5Jq07Q+hh4cCzofSSE7hvP/L8XKS
+RGQDJereW26fyfJOrN3H
+-----END CERTIFICATE-----
+
+# Issuer: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Kasım 2005
+# Subject: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Kasım 2005
+# Label: "TURKTRUST Certificate Services Provider Root 2"
+# Serial: 1
+# MD5 Fingerprint: 37:a5:6e:d4:b1:25:84:97:b7:fd:56:15:7a:f9:a2:00
+# SHA1 Fingerprint: b4:35:d4:e1:11:9d:1c:66:90:a7:49:eb:b3:94:bd:63:7b:a7:82:b7
+# SHA256 Fingerprint: c4:70:cf:54:7e:23:02:b9:77:fb:29:dd:71:a8:9a:7b:6c:1f:60:77:7b:03:29:f5:60:17:f3:28:bf:4f:6b:e6
+-----BEGIN CERTIFICATE-----
+MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOc
+UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx
+c8SxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xS
+S1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kg
+SGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcNMDUxMTA3MTAwNzU3
+WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVrdHJv
+bmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJU
+UjEPMA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSw
+bGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWe
+LiAoYykgS2FzxLFtIDIwMDUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqeLCDe2JAOCtFp0if7qnef
+J1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKIx+XlZEdh
+R3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJ
+Qv2gQrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGX
+JHpsmxcPbe9TmJEr5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1p
+zpwACPI2/z7woQ8arBT9pmAPAgMBAAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58S
+Fq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8GA1UdEwEB/wQFMAMBAf8wDQYJ
+KoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/nttRbj2hWyfIvwq
+ECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4
+Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFz
+gw2lGh1uEpJ+hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotH
+uFEJjOp9zYhys2AzsfAKRO8P9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LS
+y3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5UrbnBEI=
+-----END CERTIFICATE-----
+
+# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG
+# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG
+# Label: "SwissSign Gold CA - G2"
+# Serial: 13492815561806991280
+# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93
+# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61
+# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95
+-----BEGIN CERTIFICATE-----
+MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
+BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln
+biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF
+MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT
+d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
+CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8
+76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+
+bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c
+6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE
+emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd
+MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt
+MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y
+MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y
+FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi
+aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM
+gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB
+qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7
+lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn
+8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov
+L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6
+45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO
+UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5
+O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC
+bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv
+GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a
+77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC
+hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3
+92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp
+Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w
+ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt
+Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ
+-----END CERTIFICATE-----
+
+# Issuer: CN=SwissSign Silver CA - G2 O=SwissSign AG
+# Subject: CN=SwissSign Silver CA - G2 O=SwissSign AG
+# Label: "SwissSign Silver CA - G2"
+# Serial: 5700383053117599563
+# MD5 Fingerprint: e0:06:a1:c9:7d:cf:c9:fc:0d:c0:56:75:96:d8:62:13
+# SHA1 Fingerprint: 9b:aa:e5:9f:56:ee:21:cb:43:5a:be:25:93:df:a7:f0:40:d1:1d:cb
+# SHA256 Fingerprint: be:6c:4d:a2:bb:b9:ba:59:b6:f3:93:97:68:37:42:46:c3:c0:05:99:3f:a9:8f:02:0d:1d:ed:be:d4:8a:81:d5
+-----BEGIN CERTIFICATE-----
+MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE
+BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu
+IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow
+RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY
+U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A
+MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv
+Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br
+YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF
+nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH
+6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt
+eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/
+c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ
+MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH
+HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf
+jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6
+5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB
+rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
+F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c
+wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0
+cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB
+AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp
+WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9
+xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ
+2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ
+IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8
+aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X
+em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR
+dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/
+OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+
+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy
+tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc.
+# Subject: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc.
+# Label: "GeoTrust Primary Certification Authority"
+# Serial: 32798226551256963324313806436981982369
+# MD5 Fingerprint: 02:26:c3:01:5e:08:30:37:43:a9:d0:7d:cf:37:e6:bf
+# SHA1 Fingerprint: 32:3c:11:8e:1b:f7:b8:b6:52:54:e2:e2:10:0d:d6:02:90:37:f0:96
+# SHA256 Fingerprint: 37:d5:10:06:c5:12:ea:ab:62:64:21:f1:ec:8c:92:01:3f:c5:f8:2a:e9:8e:e5:33:eb:46:19:b8:de:b4:d0:6c
+-----BEGIN CERTIFICATE-----
+MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY
+MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo
+R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx
+MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK
+Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9
+AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA
+ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0
+7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W
+kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI
+mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ
+KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1
+6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl
+4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K
+oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj
+UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU
+AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only
+# Subject: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only
+# Label: "thawte Primary Root CA"
+# Serial: 69529181992039203566298953787712940909
+# MD5 Fingerprint: 8c:ca:dc:0b:22:ce:f5:be:72:ac:41:1a:11:a8:d8:12
+# SHA1 Fingerprint: 91:c6:d6:ee:3e:8a:c8:63:84:e5:48:c2:99:29:5c:75:6c:81:7b:81
+# SHA256 Fingerprint: 8d:72:2f:81:a9:c1:13:c0:79:1d:f1:36:a2:96:6d:b2:6c:95:0a:97:1d:b4:6b:41:99:f4:ea:54:b7:8b:fb:9f
+-----BEGIN CERTIFICATE-----
+MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB
+qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
+Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
+MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV
+BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw
+NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j
+LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG
+A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
+IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs
+W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta
+3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk
+6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6
+Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J
+NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA
+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP
+r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU
+DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz
+YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX
+xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2
+/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/
+LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7
+jVaMaA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only
+# Label: "VeriSign Class 3 Public Primary Certification Authority - G5"
+# Serial: 33037644167568058970164719475676101450
+# MD5 Fingerprint: cb:17:e4:31:67:3e:e2:09:fe:45:57:93:f3:0a:fa:1c
+# SHA1 Fingerprint: 4e:b6:d5:78:49:9b:1c:cf:5f:58:1e:ad:56:be:3d:9b:67:44:a5:e5
+# SHA256 Fingerprint: 9a:cf:ab:7e:43:c8:d8:80:d0:6b:26:2a:94:de:ee:e4:b4:65:99:89:c3:d0:ca:f1:9b:af:64:05:e4:1a:b7:df
+-----BEGIN CERTIFICATE-----
+MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB
+yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL
+ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp
+U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW
+ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL
+MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW
+ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln
+biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp
+U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1
+nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex
+t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz
+SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG
+BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+
+rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/
+NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E
+BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH
+BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy
+aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv
+MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE
+p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y
+5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK
+WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ
+4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N
+hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq
+-----END CERTIFICATE-----
+
+# Issuer: CN=SecureTrust CA O=SecureTrust Corporation
+# Subject: CN=SecureTrust CA O=SecureTrust Corporation
+# Label: "SecureTrust CA"
+# Serial: 17199774589125277788362757014266862032
+# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1
+# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11
+# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73
+-----BEGIN CERTIFICATE-----
+MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
+FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz
+MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv
+cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz
+Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO
+0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao
+wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj
+7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS
+8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT
+BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg
+JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC
+NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3
+6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/
+3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm
+D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS
+CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR
3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE=
-----END CERTIFICATE-----
-Secure Global CA
-================
------BEGIN CERTIFICATE-----
-MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQG
-EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBH
-bG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkxMjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEg
-MB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwg
-Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jx
-YDiJiQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa/FHtaMbQ
-bqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJjnIFHovdRIWCQtBJwB1g
-8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnIHmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYV
-HDGA76oYa8J719rO+TMg1fW9ajMtgQT7sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi
-0XPnj3pDAgMBAAGjgZ0wgZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud
-EwEB/wQFMAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCswKaAn
-oCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsGAQQBgjcVAQQDAgEA
-MA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0LURYD7xh8yOOvaliTFGCRsoTciE6+
-OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXOH0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cn
-CDpOGR86p1hcF895P4vkp9MmI50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/5
-3CYNv6ZHdAbYiNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc
+# Issuer: CN=Secure Global CA O=SecureTrust Corporation
+# Subject: CN=Secure Global CA O=SecureTrust Corporation
+# Label: "Secure Global CA"
+# Serial: 9751836167731051554232119481456978597
+# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de
+# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b
+# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69
+-----BEGIN CERTIFICATE-----
+MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
+GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx
+MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg
+Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ
+iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa
+/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ
+jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI
+HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7
+sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w
+gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw
+KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG
+AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L
+URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO
+H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm
+I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY
+iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc
f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW
-----END CERTIFICATE-----
-COMODO Certification Authority
-==============================
------BEGIN CERTIFICATE-----
-MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCBgTELMAkGA1UE
-BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG
-A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNVBAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1
-dGhvcml0eTAeFw0wNjEyMDEwMDAwMDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEb
-MBkGA1UECBMSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFD
-T01PRE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0aG9yaXR5
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3UcEbVASY06m/weaKXTuH
-+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI2GqGd0S7WWaXUF601CxwRM/aN5VCaTww
-xHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV
-4EajcNxo2f8ESIl33rXp+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA
-1KGzqSX+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5OnKVI
-rLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW/zAOBgNVHQ8BAf8E
-BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmNvbW9k
-b2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOC
-AQEAPpiem/Yb6dc5t3iuHXIYSdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CP
-OGEIqB6BCsAvIC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/
-RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4zJVSk/BwJVmc
-IGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5ddBA6+C4OmF4O5MBKgxTMVBbkN
-+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IBZQ==
------END CERTIFICATE-----
-
-Network Solutions Certificate Authority
-=======================================
------BEGIN CERTIFICATE-----
-MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQG
-EwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydOZXR3b3Jr
-IFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMx
-MjM1OTU5WjBiMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu
-MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G
-CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwzc7MEL7xx
-jOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPPOCwGJgl6cvf6UDL4wpPT
-aaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rlmGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXT
-crA/vGp97Eh/jcOrqnErU2lBUzS1sLnFBgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc
-/Qzpf14Dl847ABSHJ3A4qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMB
-AAGjgZcwgZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIBBjAP
-BgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwubmV0c29sc3NsLmNv
-bS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3JpdHkuY3JsMA0GCSqGSIb3DQEBBQUA
-A4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc86fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q
-4LqILPxFzBiwmZVRDuwduIj/h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/
-GGUsyfJj4akH/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv
-wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHNpGxlaKFJdlxD
-ydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey
------END CERTIFICATE-----
-
-COMODO ECC Certification Authority
-==================================
------BEGIN CERTIFICATE-----
-MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTELMAkGA1UEBhMC
-R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE
-ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBB
-dXRob3JpdHkwHhcNMDgwMzA2MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0Ix
-GzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR
-Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRo
-b3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSRFtSrYpn1PlILBs5BAH+X
-4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0JcfRK9ChQtP6IHG4/bC8vCVlbpVsLM5ni
-wz2J+Wos77LTBumjQjBAMB0GA1UdDgQWBBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8E
-BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VG
-FAkK+qDmfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdvGDeA
-U/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY=
------END CERTIFICATE-----
-
-OISTE WISeKey Global Root GA CA
-===============================
------BEGIN CERTIFICATE-----
-MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCBijELMAkGA1UE
-BhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHlyaWdodCAoYykgMjAwNTEiMCAG
-A1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBH
-bG9iYWwgUm9vdCBHQSBDQTAeFw0wNTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYD
-VQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIw
-IAYDVQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5
-IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy0+zAJs9
-Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxRVVuuk+g3/ytr6dTqvirdqFEr12bDYVxg
-Asj1znJ7O7jyTmUIms2kahnBAbtzptf2w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbD
-d50kc3vkDIzh2TbhmYsFmQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ
-/yxViJGg4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t94B3R
-LoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw
-AwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ
-KoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOxSPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vIm
-MMkQyh2I+3QZH4VFvbBsUfk2ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4
-+vg1YFkCExh8vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa
-hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZiFj4A4xylNoEY
-okxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ/L7fCg0=
------END CERTIFICATE-----
-
-Certigna
-========
------BEGIN CERTIFICATE-----
-MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNVBAYTAkZSMRIw
-EAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4XDTA3MDYyOTE1MTMwNVoXDTI3
-MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwI
-Q2VydGlnbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7q
-XOEm7RFHYeGifBZ4QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyH
-GxnygQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbwzBfsV1/p
-ogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q130yGLMLLGq/jj8UEYkg
-DncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKf
-Irjxwo1p3Po6WAbfAgMBAAGjgbwwgbkwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQ
-tCRZvgHyUtVF9lo53BEwZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJ
-BgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzjAQ/J
-SP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQUFAAOCAQEA
-hQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8hbV6lUmPOEvjvKtpv6zf+EwLHyzs+
-ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFncfca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1klu
-PBS1xp81HlDQwY9qcEQCYsuuHWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY
-1gkIl2PlwS6wt0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw
+# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO Certification Authority O=COMODO CA Limited
+# Label: "COMODO Certification Authority"
+# Serial: 104350513648249232941998508985834464573
+# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75
+# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b
+# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66
+-----BEGIN CERTIFICATE-----
+MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB
+gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV
+BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw
+MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl
+YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P
+RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3
+UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI
+2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8
+Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp
++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+
+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O
+nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW
+/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g
+PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u
+QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY
+SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv
+IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/
+RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4
+zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd
+BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB
+ZQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C.
+# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C.
+# Label: "Network Solutions Certificate Authority"
+# Serial: 116697915152937497490437556386812487904
+# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e
+# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce
+# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c
+-----BEGIN CERTIFICATE-----
+MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi
+MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu
+MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp
+dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV
+UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO
+ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz
+c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP
+OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl
+mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF
+BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4
+qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw
+gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB
+BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu
+bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp
+dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8
+6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/
+h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH
+/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv
+wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN
+pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey
+-----END CERTIFICATE-----
+
+# Issuer: CN=WellsSecure Public Root Certificate Authority O=Wells Fargo WellsSecure OU=Wells Fargo Bank NA
+# Subject: CN=WellsSecure Public Root Certificate Authority O=Wells Fargo WellsSecure OU=Wells Fargo Bank NA
+# Label: "WellsSecure Public Root Certificate Authority"
+# Serial: 1
+# MD5 Fingerprint: 15:ac:a5:c2:92:2d:79:bc:e8:7f:cb:67:ed:02:cf:36
+# SHA1 Fingerprint: e7:b4:f6:9d:61:ec:90:69:db:7e:90:a7:40:1a:3c:f4:7d:4f:e8:ee
+# SHA256 Fingerprint: a7:12:72:ae:aa:a3:cf:e8:72:7f:7f:b3:9f:0f:b3:d1:e5:42:6e:90:60:b0:6e:e6:f1:3e:9a:3c:58:33:cd:43
+-----BEGIN CERTIFICATE-----
+MIIEvTCCA6WgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMCVVMx
+IDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxs
+cyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9v
+dCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDcxMjEzMTcwNzU0WhcNMjIxMjE0
+MDAwNzU0WjCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdl
+bGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQD
+DC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkw
+ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDub7S9eeKPCCGeOARBJe+r
+WxxTkqxtnt3CxC5FlAM1iGd0V+PfjLindo8796jE2yljDpFoNoqXjopxaAkH5OjU
+Dk/41itMpBb570OYj7OeUt9tkTmPOL13i0Nj67eT/DBMHAGTthP796EfvyXhdDcs
+HqRePGj4S78NuR4uNuip5Kf4D8uCdXw1LSLWwr8L87T8bJVhHlfXBIEyg1J55oNj
+z7fLY4sR4r1e6/aN7ZVyKLSsEmLpSjPmgzKuBXWVvYSV2ypcm44uDLiBK0HmOFaf
+SZtsdvqKXfcBeYF8wYNABf5x/Qw/zE5gCQ5lRxAvAcAFP4/4s0HvWkJ+We/Slwxl
+AgMBAAGjggE0MIIBMDAPBgNVHRMBAf8EBTADAQH/MDkGA1UdHwQyMDAwLqAsoCqG
+KGh0dHA6Ly9jcmwucGtpLndlbGxzZmFyZ28uY29tL3dzcHJjYS5jcmwwDgYDVR0P
+AQH/BAQDAgHGMB0GA1UdDgQWBBQmlRkQ2eihl5H/3BnZtQQ+0nMKajCBsgYDVR0j
+BIGqMIGngBQmlRkQ2eihl5H/3BnZtQQ+0nMKaqGBi6SBiDCBhTELMAkGA1UEBhMC
+VVMxIDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNX
+ZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMg
+Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHmCAQEwDQYJKoZIhvcNAQEFBQADggEB
+ALkVsUSRzCPIK0134/iaeycNzXK7mQDKfGYZUMbVmO2rvwNa5U3lHshPcZeG1eMd
+/ZDJPHV3V3p9+N701NX3leZ0bh08rnyd2wIDBSxxSyU+B+NemvVmFymIGjifz6pB
+A4SXa5M4esowRBskRDPQ5NHcKDj0E0M1NSljqHyita04pO2t/caaH/+Xc/77szWn
+k4bGdpEA5qxRFsQnMlzbc9qlk1eOPm01JghZ1edE13YgY+esE2fDbbFwRnzVlhE9
+iW9dqKHrjQrawx0zbKPqZxmamX9LPYNRKh3KL4YMon4QLSvUFpULB6ouFJJJtylv
+2G0xffX8oRAHh84vWdw+WNs=
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited
+# Label: "COMODO ECC Certification Authority"
+# Serial: 41578283867086692638256921589707938090
+# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23
+# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11
+# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7
+-----BEGIN CERTIFICATE-----
+MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT
+IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw
+MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy
+ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N
+T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv
+biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR
+FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J
+cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW
+BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm
+fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv
+GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=IGC/A O=PM/SGDN OU=DCSSI
+# Subject: CN=IGC/A O=PM/SGDN OU=DCSSI
+# Label: "IGC/A"
+# Serial: 245102874772
+# MD5 Fingerprint: 0c:7f:dd:6a:f4:2a:b9:c8:9b:bd:20:7e:a9:db:5c:37
+# SHA1 Fingerprint: 60:d6:89:74:b5:c2:65:9e:8a:0f:c1:88:7c:88:d2:46:69:1b:18:2c
+# SHA256 Fingerprint: b9:be:a7:86:0a:96:2e:a3:61:1d:ab:97:ab:6d:a3:e2:1c:10:68:b9:7d:55:57:5e:d0:e1:12:79:c1:1c:89:32
+-----BEGIN CERTIFICATE-----
+MIIEAjCCAuqgAwIBAgIFORFFEJQwDQYJKoZIhvcNAQEFBQAwgYUxCzAJBgNVBAYT
+AkZSMQ8wDQYDVQQIEwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQ
+TS9TR0ROMQ4wDAYDVQQLEwVEQ1NTSTEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG
+9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZyMB4XDTAyMTIxMzE0MjkyM1oXDTIw
+MTAxNzE0MjkyMlowgYUxCzAJBgNVBAYTAkZSMQ8wDQYDVQQIEwZGcmFuY2UxDjAM
+BgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVEQ1NTSTEO
+MAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2
+LmZyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsh/R0GLFMzvABIaI
+s9z4iPf930Pfeo2aSVz2TqrMHLmh6yeJ8kbpO0px1R2OLc/mratjUMdUC24SyZA2
+xtgv2pGqaMVy/hcKshd+ebUyiHDKcMCWSo7kVc0dJ5S/znIq7Fz5cyD+vfcuiWe4
+u0dzEvfRNWk68gq5rv9GQkaiv6GFGvm/5P9JhfejcIYyHF2fYPepraX/z9E0+X1b
+F8bc1g4oa8Ld8fUzaJ1O/Id8NhLWo4DoQw1VYZTqZDdH6nfK0LJYBcNdfrGoRpAx
+Vs5wKpayMLh35nnAvSk7/ZR3TL0gzUEl4C7HG7vupARB0l2tEmqKm0f7yd1GQOGd
+PDPQtQIDAQABo3cwdTAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBRjAVBgNV
+HSAEDjAMMAoGCCqBegF5AQEBMB0GA1UdDgQWBBSjBS8YYFDCiQrdKyFP/45OqDAx
+NjAfBgNVHSMEGDAWgBSjBS8YYFDCiQrdKyFP/45OqDAxNjANBgkqhkiG9w0BAQUF
+AAOCAQEABdwm2Pp3FURo/C9mOnTgXeQp/wYHE4RKq89toB9RlPhJy3Q2FLwV3duJ
+L92PoF189RLrn544pEfMs5bZvpwlqwN+Mw+VgQ39FuCIvjfwbF3QMZsyK10XZZOY
+YLxuj7GoPB7ZHPOpJkL5ZB3C55L29B5aqhlSXa/oovdgoPaN8In1buAKBQGVyYsg
+Crpa/JosPL3Dt8ldeCUFP1YUmwza+zpI/pdpXsoQhvdOlgQITeywvl3cO45Pwf2a
+NjSaTFR+FwNIlQgRHAdvhQh+XU3Endv7rs6y0bO4g2wdsrN58dhwmX7wEwLOXt1R
+0982gaEbeC9xs/FZTEYYKKuF0mBWWg==
+-----END CERTIFICATE-----
+
+# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication EV RootCA1
+# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication EV RootCA1
+# Label: "Security Communication EV RootCA1"
+# Serial: 0
+# MD5 Fingerprint: 22:2d:a6:01:ea:7c:0a:f7:f0:6c:56:43:3f:77:76:d3
+# SHA1 Fingerprint: fe:b8:c4:32:dc:f9:76:9a:ce:ae:3d:d8:90:8f:fd:28:86:65:64:7d
+# SHA256 Fingerprint: a2:2d:ba:68:1e:97:37:6e:2d:39:7d:72:8a:ae:3a:9b:62:96:b9:fd:ba:60:bc:2e:11:f6:47:f2:c6:75:fb:37
+-----BEGIN CERTIFICATE-----
+MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDEl
+MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMh
+U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIz
+MloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09N
+IFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNlY3VyaXR5IENvbW11
+bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSE
+RMqm4miO/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gO
+zXppFodEtZDkBp2uoQSXWHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5
+bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4zZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDF
+MxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4bepJz11sS6/vmsJWXMY1
+VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK9U2vP9eC
+OKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G
+CSqGSIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HW
+tWS3irO4G8za+6xmiEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZ
+q51ihPZRwSzJIxXYKLerJRO1RuGGAv8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDb
+EJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnWmHyojf6GPgcWkuF75x3sM3Z+
+Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEWT1MKZPlO9L9O
+VL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490
+-----END CERTIFICATE-----
+
+# Issuer: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed
+# Subject: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed
+# Label: "OISTE WISeKey Global Root GA CA"
+# Serial: 86718877871133159090080555911823548314
+# MD5 Fingerprint: bc:6c:51:33:a7:e9:d3:66:63:54:15:72:1b:21:92:93
+# SHA1 Fingerprint: 59:22:a1:e1:5a:ea:16:35:21:f8:98:39:6a:46:46:b0:44:1b:0f:a9
+# SHA256 Fingerprint: 41:c9:23:86:6a:b4:ca:d6:b7:ad:57:80:81:58:2e:02:07:97:a6:cb:df:4f:ff:78:ce:83:96:b3:89:37:d7:f5
+-----BEGIN CERTIFICATE-----
+MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCB
+ijELMAkGA1UEBhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHly
+aWdodCAoYykgMjAwNTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl
+ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQSBDQTAeFw0w
+NTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYDVQQGEwJDSDEQMA4G
+A1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIwIAYD
+VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBX
+SVNlS2V5IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAy0+zAJs9Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxR
+VVuuk+g3/ytr6dTqvirdqFEr12bDYVxgAsj1znJ7O7jyTmUIms2kahnBAbtzptf2
+w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbDd50kc3vkDIzh2TbhmYsF
+mQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ/yxViJGg
+4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t9
+4B3RLoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYw
+DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQw
+EAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOx
+SPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vImMMkQyh2I+3QZH4VFvbBsUfk2
+ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4+vg1YFkCExh8
+vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa
+hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZi
+Fj4A4xylNoEYokxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ
+/L7fCg0=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Microsec e-Szigno Root CA O=Microsec Ltd. OU=e-Szigno CA
+# Subject: CN=Microsec e-Szigno Root CA O=Microsec Ltd. OU=e-Szigno CA
+# Label: "Microsec e-Szigno Root CA"
+# Serial: 272122594155480254301341951808045322001
+# MD5 Fingerprint: f0:96:b6:2f:c5:10:d5:67:8e:83:25:32:e8:5e:2e:e5
+# SHA1 Fingerprint: 23:88:c9:d3:71:cc:9e:96:3d:ff:7d:3c:a7:ce:fc:d6:25:ec:19:0d
+# SHA256 Fingerprint: 32:7a:3d:76:1a:ba:de:a0:34:eb:99:84:06:27:5c:b1:a4:77:6e:fd:ae:2f:df:6d:01:68:ea:1c:4f:55:67:d0
+-----BEGIN CERTIFICATE-----
+MIIHqDCCBpCgAwIBAgIRAMy4579OKRr9otxmpRwsDxEwDQYJKoZIhvcNAQEFBQAw
+cjELMAkGA1UEBhMCSFUxETAPBgNVBAcTCEJ1ZGFwZXN0MRYwFAYDVQQKEw1NaWNy
+b3NlYyBMdGQuMRQwEgYDVQQLEwtlLVN6aWdubyBDQTEiMCAGA1UEAxMZTWljcm9z
+ZWMgZS1Temlnbm8gUm9vdCBDQTAeFw0wNTA0MDYxMjI4NDRaFw0xNzA0MDYxMjI4
+NDRaMHIxCzAJBgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVzdDEWMBQGA1UEChMN
+TWljcm9zZWMgTHRkLjEUMBIGA1UECxMLZS1Temlnbm8gQ0ExIjAgBgNVBAMTGU1p
+Y3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
+ggEKAoIBAQDtyADVgXvNOABHzNuEwSFpLHSQDCHZU4ftPkNEU6+r+ICbPHiN1I2u
+uO/TEdyB5s87lozWbxXGd36hL+BfkrYn13aaHUM86tnsL+4582pnS4uCzyL4ZVX+
+LMsvfUh6PXX5qqAnu3jCBspRwn5mS6/NoqdNAoI/gqyFxuEPkEeZlApxcpMqyabA
+vjxWTHOSJ/FrtfX9/DAFYJLG65Z+AZHCabEeHXtTRbjcQR/Ji3HWVBTji1R4P770
+Yjtb9aPs1ZJ04nQw7wHb4dSrmZsqa/i9phyGI0Jf7Enemotb9HI6QMVJPqW+jqpx
+62z69Rrkav17fVVA71hu5tnVvCSrwe+3AgMBAAGjggQ3MIIEMzBnBggrBgEFBQcB
+AQRbMFkwKAYIKwYBBQUHMAGGHGh0dHBzOi8vcmNhLmUtc3ppZ25vLmh1L29jc3Aw
+LQYIKwYBBQUHMAKGIWh0dHA6Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNydDAP
+BgNVHRMBAf8EBTADAQH/MIIBcwYDVR0gBIIBajCCAWYwggFiBgwrBgEEAYGoGAIB
+AQEwggFQMCgGCCsGAQUFBwIBFhxodHRwOi8vd3d3LmUtc3ppZ25vLmh1L1NaU1ov
+MIIBIgYIKwYBBQUHAgIwggEUHoIBEABBACAAdABhAG4A+gBzAO0AdAB2AOEAbgB5
+ACAA6QByAHQAZQBsAG0AZQB6AOkAcwDpAGgAZQB6ACAA6QBzACAAZQBsAGYAbwBn
+AGEAZADhAHMA4QBoAG8AegAgAGEAIABTAHoAbwBsAGcA4QBsAHQAYQB0APMAIABT
+AHoAbwBsAGcA4QBsAHQAYQB0AOEAcwBpACAAUwB6AGEAYgDhAGwAeQB6AGEAdABh
+ACAAcwB6AGUAcgBpAG4AdAAgAGsAZQBsAGwAIABlAGwAagDhAHIAbgBpADoAIABo
+AHQAdABwADoALwAvAHcAdwB3AC4AZQAtAHMAegBpAGcAbgBvAC4AaAB1AC8AUwBa
+AFMAWgAvMIHIBgNVHR8EgcAwgb0wgbqggbeggbSGIWh0dHA6Ly93d3cuZS1zemln
+bm8uaHUvUm9vdENBLmNybIaBjmxkYXA6Ly9sZGFwLmUtc3ppZ25vLmh1L0NOPU1p
+Y3Jvc2VjJTIwZS1Temlnbm8lMjBSb290JTIwQ0EsT1U9ZS1Temlnbm8lMjBDQSxP
+PU1pY3Jvc2VjJTIwTHRkLixMPUJ1ZGFwZXN0LEM9SFU/Y2VydGlmaWNhdGVSZXZv
+Y2F0aW9uTGlzdDtiaW5hcnkwDgYDVR0PAQH/BAQDAgEGMIGWBgNVHREEgY4wgYuB
+EGluZm9AZS1zemlnbm8uaHWkdzB1MSMwIQYDVQQDDBpNaWNyb3NlYyBlLVN6aWdu
+w7MgUm9vdCBDQTEWMBQGA1UECwwNZS1TemlnbsOzIEhTWjEWMBQGA1UEChMNTWlj
+cm9zZWMgS2Z0LjERMA8GA1UEBxMIQnVkYXBlc3QxCzAJBgNVBAYTAkhVMIGsBgNV
+HSMEgaQwgaGAFMegSXUWYYTbMUuE0vE3QJDvTtz3oXakdDByMQswCQYDVQQGEwJI
+VTERMA8GA1UEBxMIQnVkYXBlc3QxFjAUBgNVBAoTDU1pY3Jvc2VjIEx0ZC4xFDAS
+BgNVBAsTC2UtU3ppZ25vIENBMSIwIAYDVQQDExlNaWNyb3NlYyBlLVN6aWdubyBS
+b290IENBghEAzLjnv04pGv2i3GalHCwPETAdBgNVHQ4EFgQUx6BJdRZhhNsxS4TS
+8TdAkO9O3PcwDQYJKoZIhvcNAQEFBQADggEBANMTnGZjWS7KXHAM/IO8VbH0jgds
+ZifOwTsgqRy7RlRw7lrMoHfqaEQn6/Ip3Xep1fvj1KcExJW4C+FEaGAHQzAxQmHl
+7tnlJNUb3+FKG6qfx1/4ehHqE5MAyopYse7tDk2016g2JnzgOsHVV4Lxdbb9iV/a
+86g4nzUGCM4ilb7N1fy+W955a9x6qWVmvrElWl/tftOsRm1M9DKHtCAE4Gx4sHfR
+hUZLphK3dehKyVZs15KrnfVJONJPU+NVkBHbmJbGSfI+9J8b4PeI3CVimUTYc78/
+MPMMNz7UwiiAc7EBt51alhQBS6kRnSlqLtBdgcDPsiBDxwPgN05dCtxZICU=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certigna O=Dhimyotis
+# Subject: CN=Certigna O=Dhimyotis
+# Label: "Certigna"
+# Serial: 18364802974209362175
+# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff
+# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97
+# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d
+-----BEGIN CERTIFICATE-----
+MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV
+BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X
+DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ
+BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4
+QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny
+gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw
+zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q
+130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2
+JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw
+DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw
+ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT
+AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj
+AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG
+9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h
+bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc
+fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu
+HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w
+t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw
WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg==
-----END CERTIFICATE-----
-Deutsche Telekom Root CA 2
-==========================
------BEGIN CERTIFICATE-----
-MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMT
-RGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEG
-A1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENBIDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5
-MjM1OTAwWjBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0G
-A1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBS
-b290IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEUha88EOQ5
-bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhCQN/Po7qCWWqSG6wcmtoI
-KyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1MjwrrFDa1sPeg5TKqAyZMg4ISFZbavva4VhY
-AUlfckE8FQYBjl2tqriTtM2e66foai1SNNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aK
-Se5TBY8ZTNXeWHmb0mocQqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTV
-jlsB9WoHtxa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAPBgNV
-HRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAlGRZrTlk5ynr
-E/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756AbrsptJh6sTtU6zkXR34ajgv8HzFZMQSy
-zhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpaIzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8
-rZ7/gFnkm0W09juwzTkZmDLl6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4G
-dyd1Lx+4ivn+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU
+# Issuer: CN=TC TrustCenter Class 2 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 2 CA
+# Subject: CN=TC TrustCenter Class 2 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 2 CA
+# Label: "TC TrustCenter Class 2 CA II"
+# Serial: 941389028203453866782103406992443
+# MD5 Fingerprint: ce:78:33:5c:59:78:01:6e:18:ea:b9:36:a0:b9:2e:23
+# SHA1 Fingerprint: ae:50:83:ed:7c:f4:5c:bc:8f:61:c6:21:fe:68:5d:79:42:21:15:6e
+# SHA256 Fingerprint: e6:b8:f8:76:64:85:f8:07:ae:7f:8d:ac:16:70:46:1f:07:c0:a1:3e:ef:3a:1f:f7:17:53:8d:7a:ba:d3:91:b4
+-----BEGIN CERTIFICATE-----
+MIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjEL
+MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNV
+BAsTGVRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0
+Q2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYwMTEyMTQzODQzWhcNMjUxMjMxMjI1
+OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIgR21i
+SDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UEAxMc
+VEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jf
+tMjWQ+nEdVl//OEd+DFwIxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKg
+uNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2J
+XjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQXa7pIXSSTYtZgo+U4+lK
+8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7uSNQZu+99
+5OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1Ud
+EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3
+kUrL84J6E1wIqzCB7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRy
+dXN0Y2VudGVyLmRlL2NybC92Mi90Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6
+Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBUcnVzdENlbnRlciUyMENsYXNz
+JTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21iSCxPVT1yb290
+Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u
+TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iS
+GNn3Bzn1LL4GdXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprt
+ZjluS5TmVfwLG4t3wVMTZonZKNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8
+au0WOB9/WIFaGusyiC2y8zl3gK9etmF1KdsjTYjKUCjLhdLTEKJZbtOTVAB6okaV
+hgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kPJOzHdiEoZa5X6AeI
+dUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfkvQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=TC TrustCenter Universal CA I O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA
+# Subject: CN=TC TrustCenter Universal CA I O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA
+# Label: "TC TrustCenter Universal CA I"
+# Serial: 601024842042189035295619584734726
+# MD5 Fingerprint: 45:e1:a5:72:c5:a9:36:64:40:9e:f5:e4:58:84:67:8c
+# SHA1 Fingerprint: 6b:2f:34:ad:89:58:be:62:fd:b0:6b:5c:ce:bb:9d:d9:4f:4e:39:f3
+# SHA256 Fingerprint: eb:f3:c0:2a:87:89:b1:fb:7d:51:19:95:d6:63:b7:29:06:d9:13:ce:0d:5e:10:56:8a:8a:77:e2:58:61:67:e7
+-----BEGIN CERTIFICATE-----
+MIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTEL
+MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNV
+BAsTG1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1
+c3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcNMDYwMzIyMTU1NDI4WhcNMjUxMjMx
+MjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIg
+R21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYwJAYD
+VQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSR
+JJZ4Hgmgm5qVSkr1YnwCqMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3T
+fCZdzHd55yx4Oagmcw6iXSVphU9VDprvxrlE4Vc93x9UIuVvZaozhDrzznq+VZeu
+jRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtwag+1m7Z3W0hZneTvWq3z
+wZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9OgdwZu5GQ
+fezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYD
+VR0jBBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0G
+CSqGSIb3DQEBBQUAA4IBAQAo0uCG1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X1
+7caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/CyvwbZ71q+s2IhtNerNXxTPqYn
+8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3ghUJGooWMNjs
+ydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT
+ujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/
+2TYcuiUaUj0a7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY
+-----END CERTIFICATE-----
+
+# Issuer: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center
+# Subject: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center
+# Label: "Deutsche Telekom Root CA 2"
+# Serial: 38
+# MD5 Fingerprint: 74:01:4a:91:b1:08:c4:58:ce:47:cd:f0:dd:11:53:08
+# SHA1 Fingerprint: 85:a4:08:c0:9c:19:3e:5d:51:58:7d:cd:d6:13:30:fd:8c:de:37:bf
+# SHA256 Fingerprint: b6:19:1a:50:d0:c3:97:7f:7d:a9:9b:cd:aa:c8:6a:22:7d:ae:b9:67:9e:c7:0b:a3:b0:c9:d9:22:71:c1:70:d3
+-----BEGIN CERTIFICATE-----
+MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc
+MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj
+IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB
+IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE
+RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl
+U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290
+IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU
+ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC
+QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr
+rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S
+NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc
+QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH
+txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP
+BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC
+AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp
+tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa
+IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl
+6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+
+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU
Cm26OWMohpLzGITY+9HPBVZkVw==
-----END CERTIFICATE-----
-Cybertrust Global Root
-======================
------BEGIN CERTIFICATE-----
-MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYGA1UEChMPQ3li
-ZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBSb290MB4XDTA2MTIxNTA4
-MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQD
-ExZDeWJlcnRydXN0IEdsb2JhbCBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
-+Mi8vRRQZhP/8NN57CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW
-0ozSJ8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2yHLtgwEZL
-AfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iPt3sMpTjr3kfb1V05/Iin
-89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNzFtApD0mpSPCzqrdsxacwOUBdrsTiXSZT
-8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAYXSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAP
-BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2
-MDSgMqAwhi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3JsMB8G
-A1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUAA4IBAQBW7wojoFRO
-lZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMjWqd8BfP9IjsO0QbE2zZMcwSO5bAi
-5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUxXOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2
-hO0j9n0Hq0V+09+zv+mKts2oomcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+T
-X3EJIrduPuocA06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW
+# Issuer: CN=ComSign Secured CA O=ComSign
+# Subject: CN=ComSign Secured CA O=ComSign
+# Label: "ComSign Secured CA"
+# Serial: 264725503855295744117309814499492384489
+# MD5 Fingerprint: 40:01:25:06:8d:21:43:6a:0e:43:00:9c:e7:43:f3:d5
+# SHA1 Fingerprint: f9:cd:0e:2c:da:76:24:c1:8f:bd:f0:f0:ab:b6:45:b8:f7:fe:d5:7a
+# SHA256 Fingerprint: 50:79:41:c7:44:60:a0:b4:70:86:22:0d:4e:99:32:57:2a:b5:d1:b5:bb:cb:89:80:ab:1c:b1:76:51:a8:44:d2
+-----BEGIN CERTIFICATE-----
+MIIDqzCCApOgAwIBAgIRAMcoRwmzuGxFjB36JPU2TukwDQYJKoZIhvcNAQEFBQAw
+PDEbMBkGA1UEAxMSQ29tU2lnbiBTZWN1cmVkIENBMRAwDgYDVQQKEwdDb21TaWdu
+MQswCQYDVQQGEwJJTDAeFw0wNDAzMjQxMTM3MjBaFw0yOTAzMTYxNTA0NTZaMDwx
+GzAZBgNVBAMTEkNvbVNpZ24gU2VjdXJlZCBDQTEQMA4GA1UEChMHQ29tU2lnbjEL
+MAkGA1UEBhMCSUwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGtWhf
+HZQVw6QIVS3joFd67+l0Kru5fFdJGhFeTymHDEjWaueP1H5XJLkGieQcPOqs49oh
+gHMhCu95mGwfCP+hUH3ymBvJVG8+pSjsIQQPRbsHPaHA+iqYHU4Gk/v1iDurX8sW
+v+bznkqH7Rnqwp9D5PGBpX8QTz7RSmKtUxvLg/8HZaWSLWapW7ha9B20IZFKF3ue
+Mv5WJDmyVIRD9YTC2LxBkMyd1mja6YJQqTtoz7VdApRgFrFD2UNd3V2Hbuq7s8lr
+9gOUCXDeFhF6K+h2j0kQmHe5Y1yLM5d19guMsqtb3nQgJT/j8xH5h2iGNXHDHYwt
+6+UarA9z1YJZQIDTAgMBAAGjgacwgaQwDAYDVR0TBAUwAwEB/zBEBgNVHR8EPTA7
+MDmgN6A1hjNodHRwOi8vZmVkaXIuY29tc2lnbi5jby5pbC9jcmwvQ29tU2lnblNl
+Y3VyZWRDQS5jcmwwDgYDVR0PAQH/BAQDAgGGMB8GA1UdIwQYMBaAFMFL7XC29z58
+ADsAj8c+DkWfHl3sMB0GA1UdDgQWBBTBS+1wtvc+fAA7AI/HPg5Fnx5d7DANBgkq
+hkiG9w0BAQUFAAOCAQEAFs/ukhNQq3sUnjO2QiBq1BW9Cav8cujvR3qQrFHBZE7p
+iL1DRYHjZiM/EoZNGeQFsOY3wo3aBijJD4mkU6l1P7CW+6tMM1X5eCZGbxs2mPtC
+dsGCuY7e+0X5YxtiOzkGynd6qDwJz2w2PQ8KRUtpFhpFfTMDZflScZAmlaxMDPWL
+kz/MdXSFmLr/YnpNH4n+rr2UAJm/EaXc4HnFFgt9AmEd6oX5AhVP51qJThRv4zdL
+hfXBPGHg/QVBspJ/wx2g0K5SZGBrGMYmnNj1ZOQ2GmKfig8+/21OGVZOIJFsnzQz
+OjRXUDpvgV4GxvU+fE6OK85lBi5d0ipTdF7Tbieejw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc
+# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc
+# Label: "Cybertrust Global Root"
+# Serial: 4835703278459682877484360
+# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1
+# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6
+# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3
+-----BEGIN CERTIFICATE-----
+MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG
+A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh
+bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE
+ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS
+b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5
+7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS
+J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y
+HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP
+t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz
+FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY
+XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/
+MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw
+hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js
+MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA
+A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj
+Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx
+XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o
+omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc
+A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW
WL1WMRJOEcgh4LMRkWXbtKaIOM5V
-----END CERTIFICATE-----
-ePKI Root Certification Authority
-=================================
------BEGIN CERTIFICATE-----
-MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQG
-EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xKjAoBgNVBAsMIWVQS0kg
-Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMx
-MjdaMF4xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEq
-MCgGA1UECwwhZVBLSSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0B
-AQEFAAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAHSyZbCUNs
-IZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAhijHyl3SJCRImHJ7K2RKi
-lTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3XDZoTM1PRYfl61dd4s5oz9wCGzh1NlDiv
-qOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX
-12ruOzjjK9SXDrkb5wdJfzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0O
-WQqraffAsgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uUWH1+
-ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLSnT0IFaUQAS2zMnao
-lQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pHdmX2Os+PYhcZewoozRrSgx4hxyy/
-vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJipNiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXi
-Zo1jDiVN1Rmy5nk3pyKdVDECAwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/Qkqi
-MAwGA1UdEwQFMAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH
-ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGBuvl2ICO1J2B0
-1GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6YlPwZpVnPDimZI+ymBV3QGypzq
-KOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkPJXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdV
-xrsStZf0X4OFunHB2WyBEXYKCrC/gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEP
-NXubrjlpC2JgQCA2j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+r
-GNm65ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUBo2M3IUxE
-xJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS/jQ6fbjpKdx2qcgw+BRx
-gMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2zGp1iro2C6pSe3VkQw63d4k3jMdXH7Ojy
-sP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTEW9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmOD
-BCEIZ43ygknQW/2xzQ+DhNQ+IIX3Sj0rnP0qCglN6oH4EZw=
------END CERTIFICATE-----
-
-certSIGN ROOT CA
-================
------BEGIN CERTIFICATE-----
-MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYTAlJPMREwDwYD
-VQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTAeFw0wNjA3MDQxNzIwMDRa
-Fw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UE
-CxMQY2VydFNJR04gUk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7I
-JUqOtdu0KBuqV5Do0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHH
-rfAQUySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5dRdY4zTW2
-ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQOA7+j0xbm0bqQfWwCHTD
-0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwvJoIQ4uNllAoEwF73XVv4EOLQunpL+943
-AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B
-Af8EBAMCAcYwHQYDVR0OBBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IB
-AQA+0hyJLjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecYMnQ8
-SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ44gx+FkagQnIl6Z0
-x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6IJd1hJyMctTEHBDa0GpC9oHRxUIlt
-vBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNwi/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7Nz
-TogVZ96edhBiIL5VaZVDADlN9u6wWk5JRFRYX0KD
------END CERTIFICATE-----
-
-GeoTrust Primary Certification Authority - G3
-=============================================
------BEGIN CERTIFICATE-----
-MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UE
-BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA4IEdlb1RydXN0
-IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFy
-eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIz
-NTk1OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAo
-YykgMjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMT
-LUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZI
-hvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5j
-K/BGvESyiaHAKAxJcCGVn2TAppMSAmUmhsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdE
-c5IiaacDiGydY8hS2pgn5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3C
-IShwiP/WJmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exALDmKu
-dlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZChuOl1UcCAwEAAaNC
-MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMR5yo6hTgMdHNxr
-2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IBAQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9
-cr5HqQ6XErhK8WTTOd8lNNTBzU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbE
-Ap7aDHdlDkQNkv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD
-AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUHSJsMC8tJP33s
-t/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2Gspki4cErx5z481+oghLrGREt
------END CERTIFICATE-----
-
-thawte Primary Root CA - G2
-===========================
------BEGIN CERTIFICATE-----
-MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDELMAkGA1UEBhMC
-VVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMpIDIwMDcgdGhhd3RlLCBJbmMu
-IC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3Qg
-Q0EgLSBHMjAeFw0wNzExMDUwMDAwMDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEV
-MBMGA1UEChMMdGhhd3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBG
-b3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAt
-IEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/BebfowJPDQfGAFG6DAJS
-LSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6papu+7qzcMBniKI11KOasf2twu8x+qi5
-8/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU
-mtgAMADna3+FGO6Lts6KDPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUN
-G4k8VIZ3KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41oxXZ3K
-rr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg==
------END CERTIFICATE-----
-
-thawte Primary Root CA - G3
-===========================
------BEGIN CERTIFICATE-----
-MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCBrjELMAkGA1UE
-BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2
-aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv
-cml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0w
-ODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh
-d3RlLCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9uMTgwNgYD
-VQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIG
-A1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-MIIBCgKCAQEAsr8nLPvb2FvdeHsbnndmgcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2At
-P0LMqmsywCPLLEHd5N/8YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC
-+BsUa0Lfb1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS99irY
-7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2SzhkGcuYMXDhpxwTW
-vGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUkOQIDAQABo0IwQDAPBgNVHRMBAf8E
-BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJ
-KoZIhvcNAQELBQADggEBABpA2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweK
-A3rD6z8KLFIWoCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu
-t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7cKUGRIjxpp7sC
-8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fMm7v/OeZWYdMKp8RcTGB7BXcm
-er/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZuMdRAGmI0Nj81Aa6sY6A=
------END CERTIFICATE-----
-
-GeoTrust Primary Certification Authority - G2
-=============================================
------BEGIN CERTIFICATE-----
-MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDELMAkGA1UEBhMC
-VVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA3IEdlb1RydXN0IElu
-Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBD
-ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1
-OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg
-MjAwNyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMTLUdl
-b1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjB2MBAGByqGSM49AgEG
-BSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcLSo17VDs6bl8VAsBQps8lL33KSLjHUGMc
-KiEIfJo22Av+0SbFWDEwKCXzXV2juLaltJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYD
-VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+
-EVXVMAoGCCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGTqQ7m
-ndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBuczrD6ogRLQy7rQkgu2
-npaqBA+K
------END CERTIFICATE-----
-
-VeriSign Universal Root Certification Authority
-===============================================
------BEGIN CERTIFICATE-----
-MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCBvTELMAkGA1UE
-BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO
-ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk
-IHVzZSBvbmx5MTgwNgYDVQQDEy9WZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9u
-IEF1dGhvcml0eTAeFw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJV
-UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv
-cmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
-IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmljYXRpb24gQXV0
-aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj
-1mCOkdeQmIN65lgZOIzF9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGP
-MiJhgsWHH26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+HLL72
-9fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN/BMReYTtXlT2NJ8I
-AfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPTrJ9VAMf2CGqUuV/c4DPxhGD5WycR
-tPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0G
-CCsGAQUFBwEMBGEwX6FdoFswWTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2O
-a8PPgGrUSBgsexkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud
-DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4sAPmLGd75JR3
-Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+seQxIcaBlVZaDrHC1LGmWazx
-Y8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTx
-P/jgdFcrGJ2BtMQo2pSXpXDrrB2+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+P
-wGZsY6rp2aQW9IHRlRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4
-mJO37M2CYfE45k+XmCpajQ==
------END CERTIFICATE-----
-
-VeriSign Class 3 Public Primary Certification Authority - G4
-============================================================
------BEGIN CERTIFICATE-----
-MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjELMAkGA1UEBhMC
-VVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3
-b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVz
-ZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmlj
-YXRpb24gQXV0aG9yaXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjEL
-MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBU
-cnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRo
-b3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5
-IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8
-Utpkmw4tXNherJI9/gHmGUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGz
-rl0Bp3vefLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUwAwEB
-/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEw
-HzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVyaXNpZ24u
-Y29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMWkf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMD
-A2gAMGUCMGYhDBgmYFo4e1ZC4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIx
-AJw9SDkjOVgaFRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA==
------END CERTIFICATE-----
-
-NetLock Arany (Class Gold) Főtanúsítvány
-========================================
------BEGIN CERTIFICATE-----
-MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQGEwJIVTERMA8G
-A1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3MDUGA1UECwwuVGFuw7pzw610
-dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBB
-cmFueSAoQ2xhc3MgR29sZCkgRsWRdGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgx
-MjA2MTUwODIxWjCBpzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxO
-ZXRMb2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlmaWNhdGlv
-biBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNzIEdvbGQpIEbFkXRhbsO6
-c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxCRec75LbRTDofTjl5Bu
-0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrTlF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw
-/HpYzY6b7cNGbIRwXdrzAZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAk
-H3B5r9s5VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRGILdw
-fzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2BJtr+UBdADTHLpl1
-neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIB
-BjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2MU9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwW
-qZw8UQCgwBEIBaeZ5m8BiFRhbvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTta
-YtOUZcTh5m2C+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC
-bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2FuLjbvrW5Kfna
-NwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2XjG4Kvte9nHfRCaexOYNkbQu
-dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E=
------END CERTIFICATE-----
-
-Staat der Nederlanden Root CA - G2
-==================================
------BEGIN CERTIFICATE-----
-MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE
-CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g
-Um9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oXDTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMC
-TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l
-ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ
-5291qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8SpuOUfiUtn
-vWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPUZ5uW6M7XxgpT0GtJlvOj
-CwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvEpMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiil
-e7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCR
-OME4HYYEhLoaJXhena/MUGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpI
-CT0ugpTNGmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy5V65
-48r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv6q012iDTiIJh8BIi
-trzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEKeN5KzlW/HdXZt1bv8Hb/C3m1r737
-qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMB
-AAGjgZcwgZQwDwYDVR0TAQH/BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcC
-ARYxaHR0cDovL3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV
-HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqGSIb3DQEBCwUA
-A4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLySCZa59sCrI2AGeYwRTlHSeYAz
-+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwj
-f/ST7ZwaUb7dRUG/kSS0H4zpX897IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaN
-kqbG9AclVMwWVxJKgnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfk
-CpYL+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxLvJxxcypF
-URmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkmbEgeqmiSBeGCc1qb3Adb
-CG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvkN1trSt8sV4pAWja63XVECDdCcAz+3F4h
-oKOKwJCcaNpQ5kUQR3i2TtJlycM33+FCY7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoV
-IPVVYpbtbZNQvOSqeK3Zywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm
-66+KAQ==
------END CERTIFICATE-----
-
-Hongkong Post Root CA 1
-=======================
------BEGIN CERTIFICATE-----
-MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoT
-DUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMB4XDTAzMDUx
-NTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25n
-IFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEF
-AAOCAQ8AMIIBCgKCAQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1
-ApzQjVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEnPzlTCeqr
-auh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjhZY4bXSNmO7ilMlHIhqqh
-qZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9nnV0ttgCXjqQesBCNnLsak3c78QA3xMY
-V18meMjWCnl3v/evt3a5pQuEF10Q6m/hq5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNV
-HRMBAf8ECDAGAQH/AgEDMA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7i
-h9legYsCmEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI37pio
-l7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clBoiMBdDhViw+5Lmei
-IAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJsEhTkYY2sEJCehFC78JZvRZ+K88ps
-T/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpOfMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilT
-c4afU9hDDl3WY4JxHYB0yvbiAmvZWg==
------END CERTIFICATE-----
-
-SecureSign RootCA11
-===================
------BEGIN CERTIFICATE-----
-MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDErMCkGA1UEChMi
-SmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoGA1UEAxMTU2VjdXJlU2lnbiBS
-b290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSsw
-KQYDVQQKEyJKYXBhbiBDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1
-cmVTaWduIFJvb3RDQTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvL
-TJszi1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8h9uuywGO
-wvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOVMdrAG/LuYpmGYz+/3ZMq
-g6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rP
-O7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitA
-bpSACW22s293bzUIUPsCh8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZX
-t94wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAKCh
-OBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xmKbabfSVSSUOrTC4r
-bnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQX5Ucv+2rIrVls4W6ng+4reV6G4pQ
-Oh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWrQbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01
-y8hSyn+B/tlr0/cR7SXf+Of5pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061
-lgeLKBObjBmNQSdJQO7e5iNEOdyhIta6A/I=
------END CERTIFICATE-----
-
-Microsec e-Szigno Root CA 2009
-==============================
------BEGIN CERTIFICATE-----
-MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYDVQQGEwJIVTER
-MA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jv
-c2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o
-dTAeFw0wOTA2MTYxMTMwMThaFw0yOTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UE
-BwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUt
-U3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTCCASIw
-DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvPkd6mJviZpWNwrZuuyjNA
-fW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tccbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG
-0IMZfcChEhyVbUr02MelTTMuhTlAdX4UfIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKA
-pxn1ntxVUwOXewdI/5n7N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm
-1HxdrtbCxkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1+rUC
-AwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTLD8bf
-QkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAbBgNVHREE
-FDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqGSIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0o
-lZMEyL/azXm4Q5DwpL7v8u8hmLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfX
-I/OMn74dseGkddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775
-tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c2Pm2G2JwCz02
-yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5tHMN1Rq41Bab2XD0h7lbwyYIi
-LXpUq3DDfSJlgnCW
------END CERTIFICATE-----
-
-GlobalSign Root CA - R3
-=======================
------BEGIN CERTIFICATE-----
-MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4GA1UECxMXR2xv
-YmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh
-bFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT
-aWduIFJvb3QgQ0EgLSBSMzETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln
-bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWt
-iHL8RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsTgHeMCOFJ
-0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmmKPZpO/bLyCiR5Z2KYVc3
-rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zdQQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjl
-OCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZXriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2
-xmmFghcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE
-FI/wS3+oLkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZURUm7
-lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMpjjM5RcOO5LlXbKr8
-EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK6fBdRoyV3XpYKBovHd7NADdBj+1E
-bddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQXmcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18
-YIvDQVETI53O9zJrlAGomecsMx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7r
-kpeDMdmztcpHWD9f
------END CERTIFICATE-----
-
-Autoridad de Certificacion Firmaprofesional CIF A62634068
-=========================================================
------BEGIN CERTIFICATE-----
-MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UEBhMCRVMxQjBA
-BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2
-MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEyMzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIw
-QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB
-NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD
-Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P
-B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY
-7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH
-ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI
-plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX
-MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX
-LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK
-bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU
-vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1Ud
-EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNH
-DhpkLzCBpgYDVR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp
-cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBvACAAZABlACAA
-bABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBlAGwAbwBuAGEAIAAwADgAMAAx
-ADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx
-51tkljYyGOylMnfX40S2wBEqgLk9am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qk
-R71kMrv2JYSiJ0L1ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaP
-T481PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS3a/DTg4f
-Jl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5kSeTy36LssUzAKh3ntLFl
-osS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF3dvd6qJ2gHN99ZwExEWN57kci57q13XR
-crHedUTnQn3iV2t93Jm8PYMo6oCTjcVMZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoR
-saS8I8nkvof/uZS2+F0gStRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTD
-KCOM/iczQ0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQBjLMi
-6Et8Vcad+qMUu2WFbm5PEn4KPJ2V
------END CERTIFICATE-----
-
-Izenpe.com
-==========
------BEGIN CERTIFICATE-----
-MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4MQswCQYDVQQG
-EwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wHhcNMDcxMjEz
-MTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMu
-QS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ
-03rKDx6sp4boFmVqscIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAK
-ClaOxdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6HLmYRY2xU
-+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFXuaOKmMPsOzTFlUFpfnXC
-PCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQDyCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxT
-OTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbK
-F7jJeodWLBoBHmy+E60QrLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK
-0GqfvEyNBjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8Lhij+
-0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIBQFqNeb+Lz0vPqhbB
-leStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+HMh3/1uaD7euBUbl8agW7EekFwID
-AQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2luZm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+
-SVpFTlBFIFMuQS4gLSBDSUYgQTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBG
-NjIgUzgxQzBBBgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx
-MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O
-BBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUAA4ICAQB4pgwWSp9MiDrAyw6l
-Fn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWblaQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbga
-kEyrkgPH7UIBzg/YsfqikuFgba56awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8q
-hT/AQKM6WfxZSzwoJNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Cs
-g1lwLDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCTVyvehQP5
-aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGkLhObNA5me0mrZJfQRsN5
-nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJbUjWumDqtujWTI6cfSN01RpiyEGjkpTHC
-ClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZo
-Q0iy2+tzJOeRf1SktoA+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1Z
-WrOZyGlsQyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw==
------END CERTIFICATE-----
-
-Chambers of Commerce Root - 2008
-================================
------BEGIN CERTIFICATE-----
-MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYDVQQGEwJFVTFD
-MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv
-bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu
-QS4xKTAnBgNVBAMTIENoYW1iZXJzIG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEy
-Mjk1MFoXDTM4MDczMTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNl
-ZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQF
-EwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJl
-cnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
-AQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW928sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKA
-XuFixrYp4YFs8r/lfTJqVKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorj
-h40G072QDuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR5gN/
-ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfLZEFHcpOrUMPrCXZk
-NNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05aSd+pZgvMPMZ4fKecHePOjlO+Bd5g
-D2vlGts/4+EhySnB8esHnFIbAURRPHsl18TlUlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331
-lubKgdaX8ZSD6e2wsWsSaR6s+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ
-0wlf2eOKNcx5Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj
-ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAxhduub+84Mxh2
-EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNVHQ4EFgQU+SSsD7K1+HnA+mCI
-G8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJ
-BgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNh
-bWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENh
-bWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDiC
-CQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUH
-AgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAJASryI1
-wqM58C7e6bXpeHxIvj99RZJe6dqxGfwWPJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH
-3qLPaYRgM+gQDROpI9CF5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbU
-RWpGqOt1glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaHFoI6
-M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2pSB7+R5KBWIBpih1
-YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MDxvbxrN8y8NmBGuScvfaAFPDRLLmF
-9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QGtjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcK
-zBIKinmwPQN/aUv0NCB9szTqjktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvG
-nrDQWzilm1DefhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg
-OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZd0jQ
------END CERTIFICATE-----
-
-Global Chambersign Root - 2008
-==============================
------BEGIN CERTIFICATE-----
-MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYDVQQGEwJFVTFD
-MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv
-bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu
-QS4xJzAlBgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMx
-NDBaFw0zODA3MzExMjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUg
-Y3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJ
-QTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD
-aGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDf
-VtPkOpt2RbQT2//BthmLN0EYlVJH6xedKYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXf
-XjaOcNFccUMd2drvXNL7G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0
-ZJJ0YPP2zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4ddPB
-/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyGHoiMvvKRhI9lNNgA
-TH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2Id3UwD2ln58fQ1DJu7xsepeY7s2M
-H/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3VyJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfe
-Ox2YItaswTXbo6Al/3K1dh3ebeksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSF
-HTynyQbehP9r6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh
-wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsogzCtLkykPAgMB
-AAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQWBBS5CcqcHtvTbDprru1U8VuT
-BjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDprru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UE
-BhMCRVUxQzBBBgNVBAcTOk1hZHJpZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJm
-aXJtYS5jb20vYWRkcmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJm
-aXJtYSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiCCQDJzdPp
-1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUHAgEWHGh0
-dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAICIf3DekijZBZRG
-/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZUohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6
-ReAJ3spED8IXDneRRXozX1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/s
-dZ7LoR/xfxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVza2Mg
-9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yydYhz2rXzdpjEetrHH
-foUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMdSqlapskD7+3056huirRXhOukP9Du
-qqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9OAP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETr
-P3iZ8ntxPjzxmKfFGBI/5rsoM0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVq
-c5iJWzouE4gev8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z
+# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
+# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
+# Label: "ePKI Root Certification Authority"
+# Serial: 28956088682735189655030529057352760477
+# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3
+# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0
+# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5
+-----BEGIN CERTIFICATE-----
+MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe
+MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0
+ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
+Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw
+IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL
+SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH
+SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh
+ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X
+DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1
+TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ
+fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA
+sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU
+WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS
+nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH
+dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip
+NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC
+AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF
+MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH
+ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB
+uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl
+PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP
+JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/
+gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2
+j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6
+5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB
+o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS
+/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z
+Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE
+W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D
+hNQ+IIX3Sj0rnP0qCglN6oH4EZw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TÜBİTAK UEKAE Kök Sertifika Hizmet Sağlayıcısı - Sürüm 3 O=Türkiye Bilimsel ve Teknolojik Araştırma Kurumu - TÜBİTAK OU=Ulusal Elektronik ve Kriptoloji Araştırma Enstitüsü - UEKAE/Kamu Sertifikasyon Merkezi
+# Subject: CN=TÜBİTAK UEKAE Kök Sertifika Hizmet Sağlayıcısı - Sürüm 3 O=Türkiye Bilimsel ve Teknolojik Araştırma Kurumu - TÜBİTAK OU=Ulusal Elektronik ve Kriptoloji Araştırma Enstitüsü - UEKAE/Kamu Sertifikasyon Merkezi
+# Label: "T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3"
+# Serial: 17
+# MD5 Fingerprint: ed:41:f5:8c:50:c5:2b:9c:73:e6:ee:6c:eb:c2:a8:26
+# SHA1 Fingerprint: 1b:4b:39:61:26:27:6b:64:91:a2:68:6d:d7:02:43:21:2d:1f:1d:96
+# SHA256 Fingerprint: e4:c7:34:30:d7:a5:b5:09:25:df:43:37:0a:0d:21:6e:9a:79:b9:d6:db:83:73:a0:c6:9e:b1:cc:31:c7:c5:2a
+-----BEGIN CERTIFICATE-----
+MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRS
+MRgwFgYDVQQHDA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJp
+bGltc2VsIHZlIFRla25vbG9qaWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSw
+VEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ryb25payB2ZSBLcmlwdG9sb2ppIEFy
+YcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNVBAsMGkthbXUgU2Vy
+dGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUgS8O2
+ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAe
+Fw0wNzA4MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIx
+GDAWBgNVBAcMD0dlYnplIC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmls
+aW1zZWwgdmUgVGVrbm9sb2ppayBBcmHFn3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBU
+QUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZlIEtyaXB0b2xvamkgQXJh
+xZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2FtdSBTZXJ0
+aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7Zr
+IFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIB
+IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4h
+gb46ezzb8R1Sf1n68yJMlaCQvEhOEav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yK
+O7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1xnnRFDDtG1hba+818qEhTsXO
+fJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR6Oqeyjh1jmKw
+lZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL
+hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQID
+AQABo0IwQDAdBgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/
+BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmP
+NOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4N5EY3ATIZJkrGG2AA1nJrvhY0D7t
+wyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLTy9LQQfMmNkqblWwM
+7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYhLBOh
+gLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5n
+oN+J1q2MdqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUs
+yZyQ2uypQjyttgI=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Buypass Class 2 CA 1 O=Buypass AS-983163327
+# Subject: CN=Buypass Class 2 CA 1 O=Buypass AS-983163327
+# Label: "Buypass Class 2 CA 1"
+# Serial: 1
+# MD5 Fingerprint: b8:08:9a:f0:03:cc:1b:0d:c8:6c:0b:76:a1:75:64:23
+# SHA1 Fingerprint: a0:a1:ab:90:c9:fc:84:7b:3b:12:61:e8:97:7d:5f:d3:22:61:d3:cc
+# SHA256 Fingerprint: 0f:4e:9c:dd:26:4b:02:55:50:d1:70:80:63:40:21:4f:e9:44:34:c9:b0:2f:69:7e:c7:10:fc:5f:ea:fb:5e:38
+-----BEGIN CERTIFICATE-----
+MIIDUzCCAjugAwIBAgIBATANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEd
+MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3Mg
+Q2xhc3MgMiBDQSAxMB4XDTA2MTAxMzEwMjUwOVoXDTE2MTAxMzEwMjUwOVowSzEL
+MAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MR0wGwYD
+VQQDDBRCdXlwYXNzIENsYXNzIDIgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBAIs8B0XY9t/mx8q6jUPFR42wWsE425KEHK8T1A9vNkYgxC7McXA0
+ojTTNy7Y3Tp3L8DrKehc0rWpkTSHIln+zNvnma+WwajHQN2lFYxuyHyXA8vmIPLX
+l18xoS830r7uvqmtqEyeIWZDO6i88wmjONVZJMHCR3axiFyCO7srpgTXjAePzdVB
+HfCuuCkslFJgNJQ72uA40Z0zPhX0kzLFANq1KWYOOngPIVJfAuWSeyXTkh4vFZ2B
+5J2O6O+JzhRMVB0cgRJNcKi+EAUXfh/RuFdV7c27UsKwHnjCTTZoy1YmwVLBvXb3
+WNVyfh9EdrsAiR0WnVE1703CVu9r4Iw7DekCAwEAAaNCMEAwDwYDVR0TAQH/BAUw
+AwEB/zAdBgNVHQ4EFgQUP42aWYv8e3uco684sDntkHGA1sgwDgYDVR0PAQH/BAQD
+AgEGMA0GCSqGSIb3DQEBBQUAA4IBAQAVGn4TirnoB6NLJzKyQJHyIdFkhb5jatLP
+gcIV1Xp+DCmsNx4cfHZSldq1fyOhKXdlyTKdqC5Wq2B2zha0jX94wNWZUYN/Xtm+
+DKhQ7SLHrQVMdvvt7h5HZPb3J31cKA9FxVxiXqaakZG3Uxcu3K1gnZZkOb1naLKu
+BctN518fV4bVIJwo+28TOPX2EZL2fZleHwzoq0QkKXJAPTZSr4xYkHPB7GEseaHs
+h7U/2k3ZIQAw3pDaDtMaSKk+hQsUi4y8QZ5q9w5wwDX3OaJdZtB7WZ+oRxKaJyOk
+LY4ng5IgodcVf/EuGO70SH8vf/GhGLWhC5SgYiAynB321O+/TIho
+-----END CERTIFICATE-----
+
+# Issuer: CN=Buypass Class 3 CA 1 O=Buypass AS-983163327
+# Subject: CN=Buypass Class 3 CA 1 O=Buypass AS-983163327
+# Label: "Buypass Class 3 CA 1"
+# Serial: 2
+# MD5 Fingerprint: df:3c:73:59:81:e7:39:50:81:04:4c:34:a2:cb:b3:7b
+# SHA1 Fingerprint: 61:57:3a:11:df:0e:d8:7e:d5:92:65:22:ea:d0:56:d7:44:b3:23:71
+# SHA256 Fingerprint: b7:b1:2b:17:1f:82:1d:aa:99:0c:d0:fe:50:87:b1:28:44:8b:a8:e5:18:4f:84:c5:1e:02:b5:c8:fb:96:2b:24
+-----BEGIN CERTIFICATE-----
+MIIDUzCCAjugAwIBAgIBAjANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEd
+MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3Mg
+Q2xhc3MgMyBDQSAxMB4XDTA1MDUwOTE0MTMwM1oXDTE1MDUwOTE0MTMwM1owSzEL
+MAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MR0wGwYD
+VQQDDBRCdXlwYXNzIENsYXNzIDMgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBAKSO13TZKWTeXx+HgJHqTjnmGcZEC4DVC69TB4sSveZn8AKxifZg
+isRbsELRwCGoy+Gb72RRtqfPFfV0gGgEkKBYouZ0plNTVUhjP5JW3SROjvi6K//z
+NIqeKNc0n6wv1g/xpC+9UrJJhW05NfBEMJNGJPO251P7vGGvqaMU+8IXF4Rs4HyI
++MkcVyzwPX6UvCWThOiaAJpFBUJXgPROztmuOfbIUxAMZTpHe2DC1vqRycZxbL2R
+hzyRhkmr8w+gbCZ2Xhysm3HljbybIR6c1jh+JIAVMYKWsUnTYjdbiAwKYjT+p0h+
+mbEwi5A3lRyoH6UsjfRVyNvdWQrCrXig9IsCAwEAAaNCMEAwDwYDVR0TAQH/BAUw
+AwEB/zAdBgNVHQ4EFgQUOBTmyPCppAP0Tj4io1vy1uCtQHQwDgYDVR0PAQH/BAQD
+AgEGMA0GCSqGSIb3DQEBBQUAA4IBAQABZ6OMySU9E2NdFm/soT4JXJEVKirZgCFP
+Bdy7pYmrEzMqnji3jG8CcmPHc3ceCQa6Oyh7pEfJYWsICCD8igWKH7y6xsL+z27s
+EzNxZy5p+qksP2bAEllNC1QCkoS72xLvg3BweMhT+t/Gxv/ciC8HwEmdMldg0/L2
+mSlf56oBzKwzqBwKu5HEA6BvtjT5htOzdlSY9EqBs1OdTUDs5XcTRa9bqh/YL0yC
+e/4qxFi7T/ye/QNlGioOw6UgFpRreaaiErS7GqQjel/wroQk5PMr+4okoyeYZdow
+dXb8GZHo2+ubPzK/QJcHJrrM85SFSnonk8+QQtS4Wxam58tAA915
+-----END CERTIFICATE-----
+
+# Issuer: CN=EBG Elektronik Sertifika Hizmet Sağlayıcısı O=EBG Bilişim Teknolojileri ve Hizmetleri A.Ş.
+# Subject: CN=EBG Elektronik Sertifika Hizmet Sağlayıcısı O=EBG Bilişim Teknolojileri ve Hizmetleri A.Ş.
+# Label: "EBG Elektronik Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1"
+# Serial: 5525761995591021570
+# MD5 Fingerprint: 2c:20:26:9d:cb:1a:4a:00:85:b5:b7:5a:ae:c2:01:37
+# SHA1 Fingerprint: 8c:96:ba:eb:dd:2b:07:07:48:ee:30:32:66:a0:f3:98:6e:7c:ae:58
+# SHA256 Fingerprint: 35:ae:5b:dd:d8:f7:ae:63:5c:ff:ba:56:82:a8:f0:0b:95:f4:84:62:c7:10:8e:e9:a0:e5:29:2b:07:4a:af:b2
+-----BEGIN CERTIFICATE-----
+MIIF5zCCA8+gAwIBAgIITK9zQhyOdAIwDQYJKoZIhvcNAQEFBQAwgYAxODA2BgNV
+BAMML0VCRyBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx
+c8SxMTcwNQYDVQQKDC5FQkcgQmlsacWfaW0gVGVrbm9sb2ppbGVyaSB2ZSBIaXpt
+ZXRsZXJpIEEuxZ4uMQswCQYDVQQGEwJUUjAeFw0wNjA4MTcwMDIxMDlaFw0xNjA4
+MTQwMDMxMDlaMIGAMTgwNgYDVQQDDC9FQkcgRWxla3Ryb25payBTZXJ0aWZpa2Eg
+SGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTE3MDUGA1UECgwuRUJHIEJpbGnFn2ltIFRl
+a25vbG9qaWxlcmkgdmUgSGl6bWV0bGVyaSBBLsWeLjELMAkGA1UEBhMCVFIwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDuoIRh0DpqZhAy2DE4f6en5f2h
+4fuXd7hxlugTlkaDT7byX3JWbhNgpQGR4lvFzVcfd2NR/y8927k/qqk153nQ9dAk
+tiHq6yOU/im/+4mRDGSaBUorzAzu8T2bgmmkTPiab+ci2hC6X5L8GCcKqKpE+i4s
+tPtGmggDg3KriORqcsnlZR9uKg+ds+g75AxuetpX/dfreYteIAbTdgtsApWjluTL
+dlHRKJ2hGvxEok3MenaoDT2/F08iiFD9rrbskFBKW5+VQarKD7JK/oCZTqNGFav4
+c0JqwmZ2sQomFd2TkuzbqV9UIlKRcF0T6kjsbgNs2d1s/OsNA/+mgxKb8amTD8Um
+TDGyY5lhcucqZJnSuOl14nypqZoaqsNW2xCaPINStnuWt6yHd6i58mcLlEOzrz5z
++kI2sSXFCjEmN1ZnuqMLfdb3ic1nobc6HmZP9qBVFCVMLDMNpkGMvQQxahByCp0O
+Lna9XvNRiYuoP1Vzv9s6xiQFlpJIqkuNKgPlV5EQ9GooFW5Hd4RcUXSfGenmHmMW
+OeMRFeNYGkS9y8RsZteEBt8w9DeiQyJ50hBs37vmExH8nYQKE3vwO9D8owrXieqW
+fo1IhR5kX9tUoqzVegJ5a9KK8GfaZXINFHDk6Y54jzJ0fFfy1tb0Nokb+Clsi7n2
+l9GkLqq+CxnCRelwXQIDAJ3Zo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB
+/wQEAwIBBjAdBgNVHQ4EFgQU587GT/wWZ5b6SqMHwQSny2re2kcwHwYDVR0jBBgw
+FoAU587GT/wWZ5b6SqMHwQSny2re2kcwDQYJKoZIhvcNAQEFBQADggIBAJuYml2+
+8ygjdsZs93/mQJ7ANtyVDR2tFcU22NU57/IeIl6zgrRdu0waypIN30ckHrMk2pGI
+6YNw3ZPX6bqz3xZaPt7gyPvT/Wwp+BVGoGgmzJNSroIBk5DKd8pNSe/iWtkqvTDO
+TLKBtjDOWU/aWR1qeqRFsIImgYZ29fUQALjuswnoT4cCB64kXPBfrAowzIpAoHME
+wfuJJPaaHFy3PApnNgUIMbOv2AFoKuB4j3TeuFGkjGwgPaL7s9QJ/XvCgKqTbCmY
+Iai7FvOpEl90tYeY8pUm3zTvilORiF0alKM/fCL414i6poyWqD1SNGKfAB5UVUJn
+xk1Gj7sURT0KlhaOEKGXmdXTMIXM3rRyt7yKPBgpaP3ccQfuJDlq+u2lrDgv+R4Q
+DgZxGhBM/nV+/x5XOULK1+EVoVZVWRvRo68R2E7DpSvvkL/A7IITW43WciyTTo9q
+Kd+FPNMN4KIYEsxVL0e3p5sC/kH2iExt2qkBR4NkJ2IQgtYSe14DHzSpyZH+r11t
+hie3I6p1GMog57AP14kOpmciY/SDQSsGS7tY1dHXt7kQY9iJSrSq3RZj9W6+YKH4
+7ejWkE8axsWgKdOnIaj1Wjz3x0miIZpKlVIglnKaZsv30oZDfCK+lvm9AahH3eU7
+QPl1K5srRmSGjR70j/sHd9DqSaIcjVIUpgqT
+-----END CERTIFICATE-----
+
+# Issuer: O=certSIGN OU=certSIGN ROOT CA
+# Subject: O=certSIGN OU=certSIGN ROOT CA
+# Label: "certSIGN ROOT CA"
+# Serial: 35210227249154
+# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17
+# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b
+# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb
+-----BEGIN CERTIFICATE-----
+MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT
+AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD
+QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP
+MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do
+0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ
+UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d
+RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ
+OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv
+JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C
+AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O
+BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ
+LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY
+MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ
+44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I
+Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw
+i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN
+9u6wWk5JRFRYX0KD
+-----END CERTIFICATE-----
+
+# Issuer: CN=CNNIC ROOT O=CNNIC
+# Subject: CN=CNNIC ROOT O=CNNIC
+# Label: "CNNIC ROOT"
+# Serial: 1228079105
+# MD5 Fingerprint: 21:bc:82:ab:49:c4:13:3b:4b:b2:2b:5c:6b:90:9c:19
+# SHA1 Fingerprint: 8b:af:4c:9b:1d:f0:2a:92:f7:da:12:8e:b9:1b:ac:f4:98:60:4b:6f
+# SHA256 Fingerprint: e2:83:93:77:3d:a8:45:a6:79:f2:08:0c:c7:fb:44:a3:b7:a1:c3:79:2c:b7:eb:77:29:fd:cb:6a:8d:99:ae:a7
+-----BEGIN CERTIFICATE-----
+MIIDVTCCAj2gAwIBAgIESTMAATANBgkqhkiG9w0BAQUFADAyMQswCQYDVQQGEwJD
+TjEOMAwGA1UEChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1QwHhcNMDcwNDE2
+MDcwOTE0WhcNMjcwNDE2MDcwOTE0WjAyMQswCQYDVQQGEwJDTjEOMAwGA1UEChMF
+Q05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1QwggEiMA0GCSqGSIb3DQEBAQUAA4IB
+DwAwggEKAoIBAQDTNfc/c3et6FtzF8LRb+1VvG7q6KR5smzDo+/hn7E7SIX1mlwh
+IhAsxYLO2uOabjfhhyzcuQxauohV3/2q2x8x6gHx3zkBwRP9SFIhxFXf2tizVHa6
+dLG3fdfA6PZZxU3Iva0fFNrfWEQlMhkqx35+jq44sDB7R3IJMfAw28Mbdim7aXZO
+V/kbZKKTVrdvmW7bCgScEeOAH8tjlBAKqeFkgjH5jCftppkA9nCTGPihNIaj3XrC
+GHn2emU1z5DrvTOTn1OrczvmmzQgLx3vqR1jGqCA2wMv+SYahtKNu6m+UjqHZ0gN
+v7Sg2Ca+I19zN38m5pIEo3/PIKe38zrKy5nLAgMBAAGjczBxMBEGCWCGSAGG+EIB
+AQQEAwIABzAfBgNVHSMEGDAWgBRl8jGtKvf33VKWCscCwQ7vptU7ETAPBgNVHRMB
+Af8EBTADAQH/MAsGA1UdDwQEAwIB/jAdBgNVHQ4EFgQUZfIxrSr3991SlgrHAsEO
+76bVOxEwDQYJKoZIhvcNAQEFBQADggEBAEs17szkrr/Dbq2flTtLP1se31cpolnK
+OOK5Gv+e5m4y3R6u6jW39ZORTtpC4cMXYFDy0VwmuYK36m3knITnA3kXr5g9lNvH
+ugDnuL8BV8F3RTIMO/G0HAiw/VGgod2aHRM2mm23xzy54cXZF/qD1T0VoDy7Hgvi
+yJA/qIYM/PmLXoXLT1tLYhFHxUV8BS9BsZ4QaRuZluBVeftOhpm4lNqGOGqTo+fL
+buXf6iFViZx9fX+Y9QCJ7uOEwFyWtcVG6kbghVW2G8kS1sHNzYDzAgE8yGnLRUhj
+2JTQ7IUOO04RZfSCjKY9ri4ilAnIXOo8gV0WKgOXFlUJ24pBgp5mmxE=
+-----END CERTIFICATE-----
+
+# Issuer: O=Japanese Government OU=ApplicationCA
+# Subject: O=Japanese Government OU=ApplicationCA
+# Label: "ApplicationCA - Japanese Government"
+# Serial: 49
+# MD5 Fingerprint: 7e:23:4e:5b:a7:a5:b4:25:e9:00:07:74:11:62:ae:d6
+# SHA1 Fingerprint: 7f:8a:b0:cf:d0:51:87:6a:66:f3:36:0f:47:c8:8d:8c:d3:35:fc:74
+# SHA256 Fingerprint: 2d:47:43:7d:e1:79:51:21:5a:12:f3:c5:8e:51:c7:29:a5:80:26:ef:1f:cc:0a:5f:b3:d9:dc:01:2f:60:0d:19
+-----BEGIN CERTIFICATE-----
+MIIDoDCCAoigAwIBAgIBMTANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJKUDEc
+MBoGA1UEChMTSmFwYW5lc2UgR292ZXJubWVudDEWMBQGA1UECxMNQXBwbGljYXRp
+b25DQTAeFw0wNzEyMTIxNTAwMDBaFw0xNzEyMTIxNTAwMDBaMEMxCzAJBgNVBAYT
+AkpQMRwwGgYDVQQKExNKYXBhbmVzZSBHb3Zlcm5tZW50MRYwFAYDVQQLEw1BcHBs
+aWNhdGlvbkNBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp23gdE6H
+j6UG3mii24aZS2QNcfAKBZuOquHMLtJqO8F6tJdhjYq+xpqcBrSGUeQ3DnR4fl+K
+f5Sk10cI/VBaVuRorChzoHvpfxiSQE8tnfWuREhzNgaeZCw7NCPbXCbkcXmP1G55
+IrmTwcrNwVbtiGrXoDkhBFcsovW8R0FPXjQilbUfKW1eSvNNcr5BViCH/OlQR9cw
+FO5cjFW6WY2H/CPek9AEjP3vbb3QesmlOmpyM8ZKDQUXKi17safY1vC+9D/qDiht
+QWEjdnjDuGWk81quzMKq2edY3rZ+nYVunyoKb58DKTCXKB28t89UKU5RMfkntigm
+/qJj5kEW8DOYRwIDAQABo4GeMIGbMB0GA1UdDgQWBBRUWssmP3HMlEYNllPqa0jQ
+k/5CdTAOBgNVHQ8BAf8EBAMCAQYwWQYDVR0RBFIwUKROMEwxCzAJBgNVBAYTAkpQ
+MRgwFgYDVQQKDA/ml6XmnKzlm73mlL/lupwxIzAhBgNVBAsMGuOCouODl+ODquOC
+seODvOOCt+ODp+ODs0NBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD
+ggEBADlqRHZ3ODrso2dGD/mLBqj7apAxzn7s2tGJfHrrLgy9mTLnsCTWw//1sogJ
+hyzjVOGjprIIC8CFqMjSnHH2HZ9g/DgzE+Ge3Atf2hZQKXsvcJEPmbo0NI2VdMV+
+eKlmXb3KIXdCEKxmJj3ekav9FfBv7WxfEPjzFvYDio+nEhEMy/0/ecGc/WLuo89U
+DNErXxc+4z6/wCs+CZv+iKZ+tJIX/COUgb1up8WMwusRRdv4QcmWdupwX3kSa+Sj
+B1oF7ydJzyGfikwJcGapJsErEU4z0g781mzSDjJkaP+tBXhfAx2o45CsJOAPQKdL
+rosot4LKGAfmt1t06SAZf7IbiVQ=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only
+# Subject: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only
+# Label: "GeoTrust Primary Certification Authority - G3"
+# Serial: 28809105769928564313984085209975885599
+# MD5 Fingerprint: b5:e8:34:36:c9:10:44:58:48:70:6d:2e:83:d4:b8:05
+# SHA1 Fingerprint: 03:9e:ed:b8:0b:e7:a0:3c:69:53:89:3b:20:d2:d9:32:3a:4c:2a:fd
+# SHA256 Fingerprint: b4:78:b8:12:25:0d:f8:78:63:5c:2a:a7:ec:7d:15:5e:aa:62:5e:e8:29:16:e2:cd:29:43:61:88:6c:d1:fb:d4
+-----BEGIN CERTIFICATE-----
+MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB
+mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT
+MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s
+eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv
+cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ
+BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg
+MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0
+BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
+LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz
++uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm
+hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn
+5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W
+JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL
+DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC
+huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw
+HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB
+AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB
+zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN
+kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD
+AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH
+SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G
+spki4cErx5z481+oghLrGREt
+-----END CERTIFICATE-----
+
+# Issuer: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only
+# Subject: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only
+# Label: "thawte Primary Root CA - G2"
+# Serial: 71758320672825410020661621085256472406
+# MD5 Fingerprint: 74:9d:ea:60:24:c4:fd:22:53:3e:cc:3a:72:d9:29:4f
+# SHA1 Fingerprint: aa:db:bc:22:23:8f:c4:01:a1:27:bb:38:dd:f4:1d:db:08:9e:f0:12
+# SHA256 Fingerprint: a4:31:0d:50:af:18:a6:44:71:90:37:2a:86:af:af:8b:95:1f:fb:43:1d:83:7f:1e:56:88:b4:59:71:ed:15:57
+-----BEGIN CERTIFICATE-----
+MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL
+MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp
+IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi
+BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw
+MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh
+d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig
+YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v
+dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/
+BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6
+papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E
+BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K
+DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3
+KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox
+XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only
+# Subject: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only
+# Label: "thawte Primary Root CA - G3"
+# Serial: 127614157056681299805556476275995414779
+# MD5 Fingerprint: fb:1b:5d:43:8a:94:cd:44:c6:76:f2:43:4b:47:e7:31
+# SHA1 Fingerprint: f1:8b:53:8d:1b:e9:03:b6:a6:f0:56:43:5b:17:15:89:ca:f3:6b:f2
+# SHA256 Fingerprint: 4b:03:f4:58:07:ad:70:f2:1b:fc:2c:ae:71:c9:fd:e4:60:4c:06:4c:f5:ff:b6:86:ba:e5:db:aa:d7:fd:d3:4c
+-----BEGIN CERTIFICATE-----
+MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB
+rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
+Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
+MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV
+BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa
+Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl
+LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u
+MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl
+ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm
+gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8
+YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf
+b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9
+9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S
+zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk
+OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV
+HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA
+2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW
+oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu
+t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c
+KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM
+m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu
+MdRAGmI0Nj81Aa6sY6A=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only
+# Subject: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only
+# Label: "GeoTrust Primary Certification Authority - G2"
+# Serial: 80682863203381065782177908751794619243
+# MD5 Fingerprint: 01:5e:d8:6b:bd:6f:3d:8e:a1:31:f8:12:e0:98:73:6a
+# SHA1 Fingerprint: 8d:17:84:d5:37:f3:03:7d:ec:70:fe:57:8b:51:9a:99:e6:10:d7:b0
+# SHA256 Fingerprint: 5e:db:7a:c4:3b:82:a0:6a:87:61:e8:d7:be:49:79:eb:f2:61:1f:7d:d7:9b:f9:1c:1c:6b:56:6a:21:9e:d7:66
+-----BEGIN CERTIFICATE-----
+MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL
+MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj
+KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2
+MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0
+eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV
+BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw
+NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV
+BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH
+MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL
+So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal
+tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG
+CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT
+qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz
+rD6ogRLQy7rQkgu2npaqBA+K
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only
+# Label: "VeriSign Universal Root Certification Authority"
+# Serial: 85209574734084581917763752644031726877
+# MD5 Fingerprint: 8e:ad:b5:01:aa:4d:81:e4:8c:1d:d1:e1:14:00:95:19
+# SHA1 Fingerprint: 36:79:ca:35:66:87:72:30:4d:30:a5:fb:87:3b:0f:a7:7b:b7:0d:54
+# SHA256 Fingerprint: 23:99:56:11:27:a5:71:25:de:8c:ef:ea:61:0d:df:2f:a0:78:b5:c8:06:7f:4e:82:82:90:bf:b8:60:e8:4b:3c
+-----BEGIN CERTIFICATE-----
+MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB
+vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL
+ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp
+U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W
+ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
+Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX
+MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0
+IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y
+IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh
+bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF
+9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH
+H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H
+LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN
+/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT
+rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud
+EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw
+WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs
+exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud
+DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4
+sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+
+seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz
+4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+
+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR
+lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3
+7M2CYfE45k+XmCpajQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only
+# Label: "VeriSign Class 3 Public Primary Certification Authority - G4"
+# Serial: 63143484348153506665311985501458640051
+# MD5 Fingerprint: 3a:52:e1:e7:fd:6f:3a:e3:6f:f3:6f:99:1b:f9:22:41
+# SHA1 Fingerprint: 22:d5:d8:df:8f:02:31:d1:8d:f7:9d:b7:cf:8a:2d:64:c9:3f:6c:3a
+# SHA256 Fingerprint: 69:dd:d7:ea:90:bb:57:c9:3e:13:5d:c8:5e:a6:fc:d5:48:0b:60:32:39:bd:c4:54:fc:75:8b:2a:26:cf:7f:79
+-----BEGIN CERTIFICATE-----
+MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL
+MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW
+ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln
+biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp
+U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG
+A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp
+U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg
+SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln
+biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm
+GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve
+fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw
+AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ
+aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj
+aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW
+kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC
+4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga
+FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=NetLock Arany (Class Gold) Főtanúsítvány O=NetLock Kft. OU=Tanúsítványkiadók (Certification Services)
+# Subject: CN=NetLock Arany (Class Gold) Főtanúsítvány O=NetLock Kft. OU=Tanúsítványkiadók (Certification Services)
+# Label: "NetLock Arany (Class Gold) Főtanúsítvány"
+# Serial: 80544274841616
+# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88
+# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91
+# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98
+-----BEGIN CERTIFICATE-----
+MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG
+EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3
+MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl
+cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR
+dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB
+pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM
+b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm
+aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz
+IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT
+lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz
+AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5
+VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG
+ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2
+BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG
+AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M
+U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh
+bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C
++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC
+bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F
+uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2
+XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden
+# Subject: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden
+# Label: "Staat der Nederlanden Root CA - G2"
+# Serial: 10000012
+# MD5 Fingerprint: 7c:a5:0f:f8:5b:9a:7d:6d:30:ae:54:5a:e3:42:a2:8a
+# SHA1 Fingerprint: 59:af:82:79:91:86:c7:b4:75:07:cb:cf:03:57:46:eb:04:dd:b7:16
+# SHA256 Fingerprint: 66:8c:83:94:7d:a6:3b:72:4b:ec:e1:74:3c:31:a0:e6:ae:d0:db:8e:c5:b3:1b:e3:77:bb:78:4f:91:b6:71:6f
+-----BEGIN CERTIFICATE-----
+MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO
+TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh
+dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oX
+DTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl
+ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv
+b3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ5291
+qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8Sp
+uOUfiUtnvWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPU
+Z5uW6M7XxgpT0GtJlvOjCwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvE
+pMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiile7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp
+5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCROME4HYYEhLoaJXhena/M
+UGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpICT0ugpTN
+GmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy
+5V6548r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv
+6q012iDTiIJh8BIitrzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEK
+eN5KzlW/HdXZt1bv8Hb/C3m1r737qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6
+B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMBAAGjgZcwgZQwDwYDVR0TAQH/
+BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcCARYxaHR0cDov
+L3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV
+HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqG
+SIb3DQEBCwUAA4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLyS
+CZa59sCrI2AGeYwRTlHSeYAz+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen
+5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwjf/ST7ZwaUb7dRUG/kSS0H4zpX897
+IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaNkqbG9AclVMwWVxJK
+gnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfkCpYL
++63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxL
+vJxxcypFURmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkm
+bEgeqmiSBeGCc1qb3AdbCG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvk
+N1trSt8sV4pAWja63XVECDdCcAz+3F4hoKOKwJCcaNpQ5kUQR3i2TtJlycM33+FC
+Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z
+ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=CA Disig O=Disig a.s.
+# Subject: CN=CA Disig O=Disig a.s.
+# Label: "CA Disig"
+# Serial: 1
+# MD5 Fingerprint: 3f:45:96:39:e2:50:87:f7:bb:fe:98:0c:3c:20:98:e6
+# SHA1 Fingerprint: 2a:c8:d5:8b:57:ce:bf:2f:49:af:f2:fc:76:8f:51:14:62:90:7a:41
+# SHA256 Fingerprint: 92:bf:51:19:ab:ec:ca:d0:b1:33:2d:c4:e1:d0:5f:ba:75:b5:67:90:44:ee:0c:a2:6e:93:1f:74:4f:2f:33:cf
+-----BEGIN CERTIFICATE-----
+MIIEDzCCAvegAwIBAgIBATANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJTSzET
+MBEGA1UEBxMKQnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UE
+AxMIQ0EgRGlzaWcwHhcNMDYwMzIyMDEzOTM0WhcNMTYwMzIyMDEzOTM0WjBKMQsw
+CQYDVQQGEwJTSzETMBEGA1UEBxMKQnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcg
+YS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
+ggEKAoIBAQCS9jHBfYj9mQGp2HvycXXxMcbzdWb6UShGhJd4NLxs/LxFWYgmGErE
+Nx+hSkS943EE9UQX4j/8SFhvXJ56CbpRNyIjZkMhsDxkovhqFQ4/61HhVKndBpnX
+mjxUizkDPw/Fzsbrg3ICqB9x8y34dQjbYkzo+s7552oftms1grrijxaSfQUMbEYD
+XcDtab86wYqg6I7ZuUUohwjstMoVvoLdtUSLLa2GDGhibYVW8qwUYzrG0ZmsNHhW
+S8+2rT+MitcE5eN4TPWGqvWP+j1scaMtymfraHtuM6kMgiioTGohQBUgDCZbg8Kp
+FhXAJIJdKxatymP2dACw30PEEGBWZ2NFAgMBAAGjgf8wgfwwDwYDVR0TAQH/BAUw
+AwEB/zAdBgNVHQ4EFgQUjbJJaJ1yCCW5wCf1UJNWSEZx+Y8wDgYDVR0PAQH/BAQD
+AgEGMDYGA1UdEQQvMC2BE2Nhb3BlcmF0b3JAZGlzaWcuc2uGFmh0dHA6Ly93d3cu
+ZGlzaWcuc2svY2EwZgYDVR0fBF8wXTAtoCugKYYnaHR0cDovL3d3dy5kaXNpZy5z
+ay9jYS9jcmwvY2FfZGlzaWcuY3JsMCygKqAohiZodHRwOi8vY2EuZGlzaWcuc2sv
+Y2EvY3JsL2NhX2Rpc2lnLmNybDAaBgNVHSAEEzARMA8GDSuBHpGT5goAAAABAQEw
+DQYJKoZIhvcNAQEFBQADggEBAF00dGFMrzvY/59tWDYcPQuBDRIrRhCA/ec8J9B6
+yKm2fnQwM6M6int0wHl5QpNt/7EpFIKrIYwvF/k/Ji/1WcbvgAa3mkkp7M5+cTxq
+EEHA9tOasnxakZzArFvITV734VP/Q3f8nktnbNfzg9Gg4H8l37iYC5oyOGwwoPP/
+CBUz91BKez6jPiCp3C9WgArtQVCwyfTssuMmRAAOb54GvCKWU3BlxFAKRmukLyeB
+EicTXxChds6KezfqwzlhA5WYOudsiCUI/HloDYd9Yvi0X/vF2Ey9WLw/Q1vUHgFN
+PGO+I++MzVpQuGhU+QqZMxEA4Z7CRneC9VkGjCFMhwnN5ag=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Juur-SK O=AS Sertifitseerimiskeskus
+# Subject: CN=Juur-SK O=AS Sertifitseerimiskeskus
+# Label: "Juur-SK"
+# Serial: 999181308
+# MD5 Fingerprint: aa:8e:5d:d9:f8:db:0a:58:b7:8d:26:87:6c:82:35:55
+# SHA1 Fingerprint: 40:9d:4b:d9:17:b5:5c:27:b6:9b:64:cb:98:22:44:0d:cd:09:b8:89
+# SHA256 Fingerprint: ec:c3:e9:c3:40:75:03:be:e0:91:aa:95:2f:41:34:8f:f8:8b:aa:86:3b:22:64:be:fa:c8:07:90:15:74:e9:39
+-----BEGIN CERTIFICATE-----
+MIIE5jCCA86gAwIBAgIEO45L/DANBgkqhkiG9w0BAQUFADBdMRgwFgYJKoZIhvcN
+AQkBFglwa2lAc2suZWUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKExlBUyBTZXJ0aWZp
+dHNlZXJpbWlza2Vza3VzMRAwDgYDVQQDEwdKdXVyLVNLMB4XDTAxMDgzMDE0MjMw
+MVoXDTE2MDgyNjE0MjMwMVowXTEYMBYGCSqGSIb3DQEJARYJcGtpQHNrLmVlMQsw
+CQYDVQQGEwJFRTEiMCAGA1UEChMZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1czEQ
+MA4GA1UEAxMHSnV1ci1TSzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+AIFxNj4zB9bjMI0TfncyRsvPGbJgMUaXhvSYRqTCZUXP00B841oiqBB4M8yIsdOB
+SvZiF3tfTQou0M+LI+5PAk676w7KvRhj6IAcjeEcjT3g/1tf6mTll+g/mX8MCgkz
+ABpTpyHhOEvWgxutr2TC+Rx6jGZITWYfGAriPrsfB2WThbkasLnE+w0R9vXW+RvH
+LCu3GFH+4Hv2qEivbDtPL+/40UceJlfwUR0zlv/vWT3aTdEVNMfqPxZIe5EcgEMP
+PbgFPtGzlc3Yyg/CQ2fbt5PgIoIuvvVoKIO5wTtpeyDaTpxt4brNj3pssAki14sL
+2xzVWiZbDcDq5WDQn/413z8CAwEAAaOCAawwggGoMA8GA1UdEwEB/wQFMAMBAf8w
+ggEWBgNVHSAEggENMIIBCTCCAQUGCisGAQQBzh8BAQEwgfYwgdAGCCsGAQUFBwIC
+MIHDHoHAAFMAZQBlACAAcwBlAHIAdABpAGYAaQBrAGEAYQB0ACAAbwBuACAAdgDk
+AGwAagBhAHMAdABhAHQAdQBkACAAQQBTAC0AaQBzACAAUwBlAHIAdABpAGYAaQB0
+AHMAZQBlAHIAaQBtAGkAcwBrAGUAcwBrAHUAcwAgAGEAbABhAG0ALQBTAEsAIABz
+AGUAcgB0AGkAZgBpAGsAYQBhAHQAaQBkAGUAIABrAGkAbgBuAGkAdABhAG0AaQBz
+AGUAawBzMCEGCCsGAQUFBwIBFhVodHRwOi8vd3d3LnNrLmVlL2Nwcy8wKwYDVR0f
+BCQwIjAgoB6gHIYaaHR0cDovL3d3dy5zay5lZS9qdXVyL2NybC8wHQYDVR0OBBYE
+FASqekej5ImvGs8KQKcYP2/v6X2+MB8GA1UdIwQYMBaAFASqekej5ImvGs8KQKcY
+P2/v6X2+MA4GA1UdDwEB/wQEAwIB5jANBgkqhkiG9w0BAQUFAAOCAQEAe8EYlFOi
+CfP+JmeaUOTDBS8rNXiRTHyoERF5TElZrMj3hWVcRrs7EKACr81Ptcw2Kuxd/u+g
+kcm2k298gFTsxwhwDY77guwqYHhpNjbRxZyLabVAyJRld/JXIWY7zoVAtjNjGr95
+HvxcHdMdkxuLDF2FvZkwMhgJkVLpfKG6/2SSmuz+Ne6ML678IIbsSt4beDI3poHS
+na9aEhbKmVv8b20OxaAehsmR0FyYgl9jDIpaq9iVpszLita/ZEuOyoqysOkhMp6q
+qIWYNIE5ITuoOlIyPfZrN4YGWhWY3PARZv40ILcD9EEQfTmEeZZyY7aWAuVrua0Z
+TbvGRNs2yyqcjg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post
+# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post
+# Label: "Hongkong Post Root CA 1"
+# Serial: 1000
+# MD5 Fingerprint: a8:0d:6f:39:78:b9:43:6d:77:42:6d:98:5a:cc:23:ca
+# SHA1 Fingerprint: d6:da:a8:20:8d:09:d2:15:4d:24:b5:2f:cb:34:6e:b2:58:b2:8a:58
+# SHA256 Fingerprint: f9:e6:7d:33:6c:51:00:2a:c0:54:c6:32:02:2d:66:dd:a2:e7:e3:ff:f1:0a:d0:61:ed:31:d8:bb:b4:10:cf:b2
+-----BEGIN CERTIFICATE-----
+MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx
+FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg
+Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG
+A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr
+b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ
+jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn
+PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh
+ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9
+nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h
+q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED
+MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC
+mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3
+7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB
+oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs
+EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO
+fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi
+AmvZWg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
+# Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
+# Label: "SecureSign RootCA11"
+# Serial: 1
+# MD5 Fingerprint: b7:52:74:e2:92:b4:80:93:f2:75:e4:cc:d7:f2:ea:26
+# SHA1 Fingerprint: 3b:c4:9f:48:f8:f3:73:a0:9c:1e:bd:f8:5b:b1:c3:65:c7:d8:11:b3
+# SHA256 Fingerprint: bf:0f:ee:fb:9e:3a:58:1a:d5:f9:e9:db:75:89:98:57:43:d2:61:08:5c:4d:31:4f:6f:5d:72:59:aa:42:16:12
+-----BEGIN CERTIFICATE-----
+MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr
+MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG
+A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0
+MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp
+Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD
+QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz
+i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8
+h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV
+MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9
+UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni
+8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC
+h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD
+VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
+AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm
+KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ
+X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr
+QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5
+pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN
+QSdJQO7e5iNEOdyhIta6A/I=
+-----END CERTIFICATE-----
+
+# Issuer: CN=ACEDICOM Root O=EDICOM OU=PKI
+# Subject: CN=ACEDICOM Root O=EDICOM OU=PKI
+# Label: "ACEDICOM Root"
+# Serial: 7029493972724711941
+# MD5 Fingerprint: 42:81:a0:e2:1c:e3:55:10:de:55:89:42:65:96:22:e6
+# SHA1 Fingerprint: e0:b4:32:2e:b2:f6:a5:68:b6:54:53:84:48:18:4a:50:36:87:43:84
+# SHA256 Fingerprint: 03:95:0f:b4:9a:53:1f:3e:19:91:94:23:98:df:a9:e0:ea:32:d7:ba:1c:dd:9b:c8:5d:b5:7e:d9:40:0b:43:4a
+-----BEGIN CERTIFICATE-----
+MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UE
+AwwNQUNFRElDT00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00x
+CzAJBgNVBAYTAkVTMB4XDTA4MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEW
+MBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZF
+RElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC
+AgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHkWLn7
+09gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7
+XBZXehuDYAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5P
+Grjm6gSSrj0RuVFCPYewMYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAK
+t0SdE3QrwqXrIhWYENiLxQSfHY9g5QYbm8+5eaA9oiM/Qj9r+hwDezCNzmzAv+Yb
+X79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbkHQl/Sog4P75n/TSW9R28
+MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTTxKJxqvQU
+fecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI
+2Sf23EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyH
+K9caUPgn6C9D4zq92Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEae
+ZAwUswdbxcJzbPEHXEUkFDWug/FqTYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAP
+BgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz4SsrSbbXc6GqlPUB53NlTKxQ
+MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU9QHnc2VMrFAw
+RAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv
+bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWIm
+fQwng4/F9tqgaHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3
+gvoFNTPhNahXwOf9jU8/kzJPeGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKe
+I6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1PwkzQSulgUV1qzOMPPKC8W64iLgpq0i
+5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1ThCojz2GuHURwCRi
+ipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oIKiMn
+MCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZ
+o5NjEFIqnxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6
+zqylfDJKZ0DcMDQj3dcEI2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacN
+GHk0vFQYXlPKNFHtRQrmjseCNj6nOGOpMCwXEGCSn1WHElkQwg9naRHMTh5+Spqt
+r0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3otkYNbn5XOmeUwssfnHdK
+Z05phkOTOPu220+DkdRgfks+KzgHVZhepA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd.
+# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd.
+# Label: "Microsec e-Szigno Root CA 2009"
+# Serial: 14014712776195784473
+# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1
+# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e
+# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78
+-----BEGIN CERTIFICATE-----
+MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD
+VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0
+ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G
+CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y
+OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx
+FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp
+Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o
+dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP
+kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc
+cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U
+fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7
+N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC
+xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1
++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
+A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM
+Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG
+SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h
+mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk
+ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775
+tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c
+2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t
+HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
+# Label: "GlobalSign Root CA - R3"
+# Serial: 4835703278459759426209954
+# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28
+# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad
+# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b
+-----BEGIN CERTIFICATE-----
+MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G
+A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp
+Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4
+MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG
+A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8
+RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT
+gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm
+KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd
+QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ
+XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o
+LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU
+RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp
+jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK
+6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX
+mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs
+Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH
+WD9f
+-----END CERTIFICATE-----
+
+# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
+# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
+# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068"
+# Serial: 6047274297262753887
+# MD5 Fingerprint: 73:3a:74:7a:ec:bb:a3:96:a6:c2:e4:e2:c8:9b:c0:c3
+# SHA1 Fingerprint: ae:c5:fb:3f:c8:e1:bf:c4:e5:4f:03:07:5a:9a:e8:00:b7:f7:b6:fa
+# SHA256 Fingerprint: 04:04:80:28:bf:1f:28:64:d4:8f:9a:d4:d8:32:94:36:6a:82:88:56:55:3f:3b:14:30:3f:90:14:7f:5d:40:ef
+-----BEGIN CERTIFICATE-----
+MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE
+BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h
+cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy
+MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg
+Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9
+thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM
+cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG
+L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i
+NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h
+X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b
+m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy
+Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja
+EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T
+KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF
+6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh
+OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD
+VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD
+VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp
+cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv
+ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl
+AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF
+661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9
+am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1
+ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481
+PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS
+3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k
+SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF
+3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM
+ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g
+StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz
+Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB
+jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V
+-----END CERTIFICATE-----
+
+# Issuer: CN=Izenpe.com O=IZENPE S.A.
+# Subject: CN=Izenpe.com O=IZENPE S.A.
+# Label: "Izenpe.com"
+# Serial: 917563065490389241595536686991402621
+# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73
+# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19
+# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f
+-----BEGIN CERTIFICATE-----
+MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4
+MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6
+ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD
+VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j
+b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq
+scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO
+xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H
+LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX
+uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD
+yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+
+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q
+rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN
+BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L
+hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB
+QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+
+HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu
+Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg
+QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB
+BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx
+MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA
+A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb
+laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56
+awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo
+JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw
+LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT
+VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk
+LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb
+UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/
+QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+
+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls
+QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A.
+# Subject: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A.
+# Label: "Chambers of Commerce Root - 2008"
+# Serial: 11806822484801597146
+# MD5 Fingerprint: 5e:80:9e:84:5a:0e:65:0b:17:02:f3:55:18:2a:3e:d7
+# SHA1 Fingerprint: 78:6a:74:ac:76:ab:14:7f:9c:6a:30:50:ba:9e:a8:7e:fe:9a:ce:3c
+# SHA256 Fingerprint: 06:3e:4a:fa:c4:91:df:d3:32:f3:08:9b:85:42:e9:46:17:d8:93:d7:fe:94:4e:10:a7:93:7e:e2:9d:96:93:c0
+-----BEGIN CERTIFICATE-----
+MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYD
+VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0
+IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3
+MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xKTAnBgNVBAMTIENoYW1iZXJz
+IG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEyMjk1MFoXDTM4MDcz
+MTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBj
+dXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIw
+EAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEp
+MCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW9
+28sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKAXuFixrYp4YFs8r/lfTJq
+VKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorjh40G072Q
+DuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR
+5gN/ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfL
+ZEFHcpOrUMPrCXZkNNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05a
+Sd+pZgvMPMZ4fKecHePOjlO+Bd5gD2vlGts/4+EhySnB8esHnFIbAURRPHsl18Tl
+UlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331lubKgdaX8ZSD6e2wsWsSaR6s
++12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ0wlf2eOKNcx5
+Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj
+ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAx
+hduub+84Mxh2EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNV
+HQ4EFgQU+SSsD7K1+HnA+mCIG8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1
++HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpN
+YWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29t
+L2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVy
+ZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAt
+IDIwMDiCCQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRV
+HSAAMCowKAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20w
+DQYJKoZIhvcNAQEFBQADggIBAJASryI1wqM58C7e6bXpeHxIvj99RZJe6dqxGfwW
+PJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH3qLPaYRgM+gQDROpI9CF
+5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbURWpGqOt1
+glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaH
+FoI6M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2
+pSB7+R5KBWIBpih1YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MD
+xvbxrN8y8NmBGuScvfaAFPDRLLmF9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QG
+tjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcKzBIKinmwPQN/aUv0NCB9szTq
+jktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvGnrDQWzilm1De
+fhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg
+OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZ
+d0jQ
+-----END CERTIFICATE-----
+
+# Issuer: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A.
+# Subject: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A.
+# Label: "Global Chambersign Root - 2008"
+# Serial: 14541511773111788494
+# MD5 Fingerprint: 9e:80:ff:78:01:0c:2e:c1:36:bd:fe:96:90:6e:08:f3
+# SHA1 Fingerprint: 4a:bd:ee:ec:95:0d:35:9c:89:ae:c7:52:a1:2c:5b:29:f6:d6:aa:0c
+# SHA256 Fingerprint: 13:63:35:43:93:34:a7:69:80:16:a0:d3:24:de:72:28:4e:07:9d:7b:52:20:bb:8f:bd:74:78:16:ee:be:ba:ca
+-----BEGIN CERTIFICATE-----
+MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYD
+VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0
+IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3
+MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD
+aGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMxNDBaFw0zODA3MzEx
+MjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3Vy
+cmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAG
+A1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAl
+BgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZI
+hvcNAQEBBQADggIPADCCAgoCggIBAMDfVtPkOpt2RbQT2//BthmLN0EYlVJH6xed
+KYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXfXjaOcNFccUMd2drvXNL7
+G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0ZJJ0YPP2
+zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4
+ddPB/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyG
+HoiMvvKRhI9lNNgATH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2
+Id3UwD2ln58fQ1DJu7xsepeY7s2MH/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3V
+yJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfeOx2YItaswTXbo6Al/3K1dh3e
+beksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSFHTynyQbehP9r
+6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh
+wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsog
+zCtLkykPAgMBAAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQW
+BBS5CcqcHtvTbDprru1U8VuTBjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDpr
+ru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UEBhMCRVUxQzBBBgNVBAcTOk1hZHJp
+ZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJmaXJtYS5jb20vYWRk
+cmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJmaXJt
+YSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiC
+CQDJzdPp1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCow
+KAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZI
+hvcNAQEFBQADggIBAICIf3DekijZBZRG/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZ
+UohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6ReAJ3spED8IXDneRRXoz
+X1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/sdZ7LoR/x
+fxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVz
+a2Mg9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yyd
+Yhz2rXzdpjEetrHHfoUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMd
+SqlapskD7+3056huirRXhOukP9DuqqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9O
+AP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETrP3iZ8ntxPjzxmKfFGBI/5rso
+M0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVqc5iJWzouE4ge
+v8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z
09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B
-----END CERTIFICATE-----
-Go Daddy Root Certificate Authority - G2
-========================================
------BEGIN CERTIFICATE-----
-MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT
-B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoTEUdvRGFkZHkuY29tLCBJbmMu
-MTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5
-MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6
-b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8G
-A1UEAxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI
-hvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKDE6bFIEMBO4Tx5oVJnyfq
-9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD
-+qK+ihVqf94Lw7YZFAXK6sOoBJQ7RnwyDfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutd
-fMh8+7ArU6SSYmlRJQVhGkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMl
-NAJWJwGRtDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEAAaNC
-MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFDqahQcQZyi27/a9
-BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmXWWcDYfF+OwYxdS2hII5PZYe096ac
-vNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r
-5N9ss4UXnT3ZJE95kTXWXwTrgIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYV
-N8Gb5DKj7Tjo2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO
-LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI4uJEvlz36hz1
------END CERTIFICATE-----
-
-Starfield Root Certificate Authority - G2
-=========================================
------BEGIN CERTIFICATE-----
-MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT
-B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s
-b2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVsZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0
-eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAw
-DgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQg
-VGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZpY2F0ZSBB
-dXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3twQP89o/8ArFv
-W59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMgnLRJdzIpVv257IzdIvpy3Cdhl+72WoTs
-bhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNk
-N3mSwOxGXn/hbVNMYq/NHwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7Nf
-ZTD4p7dNdloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0HZbU
-JtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
-AQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0GCSqGSIb3DQEBCwUAA4IBAQARWfol
-TwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjUsHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx
-4mcujJUDJi5DnUox9g61DLu34jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUw
-F5okxBDgBPfg8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K
-pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1mMpYjn0q7pBZ
-c2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0
------END CERTIFICATE-----
-
-Starfield Services Root Certificate Authority - G2
-==================================================
------BEGIN CERTIFICATE-----
-MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgT
-B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s
-b2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRl
-IEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNV
-BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxT
-dGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2VydmljZXMg
-Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20pOsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2
-h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm28xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4Pa
-hHQUw2eeBGg6345AWh1KTs9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLP
-LJGmpufehRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk6mFB
-rMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAwDwYDVR0TAQH/BAUw
-AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMA0GCSqG
-SIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMIbw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPP
-E95Dz+I0swSdHynVv/heyNXBve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTy
-xQGjhdByPq1zqwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd
-iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn0q23KXB56jza
-YyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCNsSi6
------END CERTIFICATE-----
-
-AffirmTrust Commercial
-======================
------BEGIN CERTIFICATE-----
-MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UEBhMCVVMxFDAS
-BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMB4XDTEw
-MDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly
-bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEF
-AAOCAQ8AMIIBCgKCAQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6Eqdb
-DuKPHx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yrba0F8PrV
-C8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPALMeIrJmqbTFeurCA+ukV6
-BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1yHp52UKqK39c/s4mT6NmgTWvRLpUHhww
-MmWd5jyTXlBOeuM61G7MGvv50jeuJCqrVwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNV
-HQ4EFgQUnZPGU4teyq8/nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
-AQYwDQYJKoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYGXUPG
-hi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNjvbz4YYCanrHOQnDi
-qX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivtZ8SOyUOyXGsViQK8YvxO8rUzqrJv
-0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9gN53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0kh
-sUlHRUe072o0EclNmsxZt9YCnlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8=
------END CERTIFICATE-----
-
-AffirmTrust Networking
-======================
------BEGIN CERTIFICATE-----
-MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UEBhMCVVMxFDAS
-BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMB4XDTEw
-MDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly
-bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEF
-AAOCAQ8AMIIBCgKCAQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SE
-Hi3yYJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbuakCNrmreI
-dIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRLQESxG9fhwoXA3hA/Pe24
-/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gb
-h+0t+nvujArjqWaJGctB+d1ENmHP4ndGyH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNV
-HQ4EFgQUBx/S55zawm6iQLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
-AQYwDQYJKoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfOtDIu
-UFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzuQY0x2+c06lkh1QF6
-12S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZLgo/bNjR9eUJtGxUAArgFU2HdW23
-WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4uolu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9
-/ZFvgrG+CJPbFEfxojfHRZ48x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s=
------END CERTIFICATE-----
-
-AffirmTrust Premium
-===================
------BEGIN CERTIFICATE-----
-MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UEBhMCVVMxFDAS
-BgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMB4XDTEwMDEy
-OTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRy
-dXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A
-MIICCgKCAgEAxBLfqV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtn
-BKAQJG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ+jjeRFcV
-5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrSs8PhaJyJ+HoAVt70VZVs
-+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmd
-GPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d770O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5R
-p9EixAqnOEhss/n/fauGV+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NI
-S+LI+H+SqHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S5u04
-6uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4IaC1nEWTJ3s7xgaVY5
-/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TXOwF0lkLgAOIua+rF7nKsu7/+6qqo
-+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYEFJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB
-/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByv
-MiPIs0laUZx2KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg
-Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B8OWycvpEgjNC
-6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQMKSOyARiqcTtNd56l+0OOF6S
-L5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK
-+4w1IX2COPKpVJEZNZOUbWo6xbLQu4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmV
-BtWVyuEklut89pMFu+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFg
-IxpHYoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8GKa1qF60
-g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaORtGdFNrHF+QFlozEJLUb
-zxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6eKeC2uAloGRwYQw==
------END CERTIFICATE-----
-
-AffirmTrust Premium ECC
-=======================
------BEGIN CERTIFICATE-----
-MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMCVVMxFDASBgNV
-BAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQcmVtaXVtIEVDQzAeFw0xMDAx
-MjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJBgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1U
-cnVzdDEgMB4GA1UEAwwXQWZmaXJtVHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQA
-IgNiAAQNMF4bFZ0D0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQ
-N8O9ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0GA1UdDgQW
-BBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAK
-BggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/VsaobgxCd05DhT1wV/GzTjxi+zygk8N53X
-57hG8f2h4nECMEJZh0PUUd+60wkyWs6Iflc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKM
-eQ==
------END CERTIFICATE-----
-
-Certum Trusted Network CA
-=========================
------BEGIN CERTIFICATE-----
-MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBMMSIwIAYDVQQK
-ExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlv
-biBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBUcnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIy
-MTIwNzM3WhcNMjkxMjMxMTIwNzM3WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBU
-ZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5
-MSIwIAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0BAQEFAAOC
-AQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rHUV+rpDKmYYe2bg+G0jAC
-l/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LMTXPb865Px1bVWqeWifrzq2jUI4ZZJ88J
-J7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVUBBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4
-fOQtf/WsX+sWn7Et0brMkUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0
-cvW0QM8xAcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNVHRMB
-Af8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNVHQ8BAf8EBAMCAQYw
-DQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15ysHhE49wcrwn9I0j6vSrEuVUEtRCj
-jSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfLI9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1
-mS1FhIrlQgnXdAIv94nYmem8J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5aj
-Zt3hrvJBW8qYVoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI
+# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
+# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
+# Label: "Go Daddy Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01
+# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b
+# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT
+EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp
+ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz
+NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH
+EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE
+AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD
+E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH
+/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy
+DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh
+GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR
+tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA
+AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
+FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX
+WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu
+9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr
+gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo
+2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO
+LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI
+4uJEvlz36hz1
+-----END CERTIFICATE-----
+
+# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Label: "Starfield Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96
+# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e
+# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5
+-----BEGIN CERTIFICATE-----
+MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs
+ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw
+MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6
+b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj
+aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp
+Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg
+nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1
+HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N
+Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN
+dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0
+HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G
+CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU
+sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3
+4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg
+8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K
+pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1
+mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0
+-----END CERTIFICATE-----
+
+# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Label: "Starfield Services Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2
+# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f
+# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5
+-----BEGIN CERTIFICATE-----
+MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs
+ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5
+MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD
+VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy
+ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy
+dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p
+OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2
+8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K
+Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe
+hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk
+6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw
+DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q
+AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI
+bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB
+ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z
+qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd
+iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn
+0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN
+sSi6
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Commercial O=AffirmTrust
+# Subject: CN=AffirmTrust Commercial O=AffirmTrust
+# Label: "AffirmTrust Commercial"
+# Serial: 8608355977964138876
+# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7
+# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7
+# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP
+Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr
+ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL
+MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1
+yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr
+VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/
+nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG
+XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj
+vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt
+Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g
+N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC
+nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Networking O=AffirmTrust
+# Subject: CN=AffirmTrust Networking O=AffirmTrust
+# Label: "AffirmTrust Networking"
+# Serial: 8957382827206547757
+# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f
+# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f
+# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y
+YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua
+kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL
+QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp
+6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG
+yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i
+QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO
+tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu
+QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ
+Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u
+olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48
+x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Premium O=AffirmTrust
+# Subject: CN=AffirmTrust Premium O=AffirmTrust
+# Label: "AffirmTrust Premium"
+# Serial: 7893706540734352110
+# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57
+# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27
+# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a
+-----BEGIN CERTIFICATE-----
+MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz
+dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG
+A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U
+cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf
+qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ
+JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ
++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS
+s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5
+HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7
+70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG
+V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S
+qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S
+5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia
+C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX
+OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE
+FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2
+KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg
+Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B
+8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ
+MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc
+0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ
+u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF
+u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH
+YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8
+GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO
+RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e
+KeC2uAloGRwYQw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust
+# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust
+# Label: "AffirmTrust Premium ECC"
+# Serial: 8401224907861490260
+# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d
+# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb
+# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23
+-----BEGIN CERTIFICATE-----
+MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC
+VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ
+cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ
+BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt
+VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D
+0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9
+ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G
+A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs
+aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I
+flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Label: "Certum Trusted Network CA"
+# Serial: 279744
+# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78
+# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e
+# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e
+-----BEGIN CERTIFICATE-----
+MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM
+MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D
+ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU
+cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3
+WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg
+Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw
+IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH
+UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM
+TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU
+BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM
+kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x
+AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV
+HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y
+sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL
+I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8
+J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY
+VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI
03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw=
-----END CERTIFICATE-----
-TWCA Root Certification Authority
-=================================
------BEGIN CERTIFICATE-----
-MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJ
-VEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlmaWNh
-dGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMzWhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQG
-EwJUVzESMBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NB
-IFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
-AoIBAQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFEAcK0HMMx
-QhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HHK3XLfJ+utdGdIzdjp9xC
-oi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeXRfwZVzsrb+RH9JlF/h3x+JejiB03HFyP
-4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/zrX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1r
-y+UPizgN7gr8/g+YnzAx3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIB
-BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkqhkiG
-9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeCMErJk/9q56YAf4lC
-mtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdlsXebQ79NqZp4VKIV66IIArB6nCWlW
-QtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62Dlhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVY
-T0bf+215WfKEIlKuD8z7fDvnaspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocny
-Yh0igzyXxfkZYiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw==
------END CERTIFICATE-----
-
-Security Communication RootCA2
-==============================
------BEGIN CERTIFICATE-----
-MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc
-U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMeU2VjdXJpdHkgQ29tbXVuaWNh
-dGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoXDTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMC
-SlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3Vy
-aXR5IENvbW11bmljYXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
-ANAVOVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGrzbl+dp++
-+T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVMVAX3NuRFg3sUZdbcDE3R
-3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQhNBqyjoGADdH5H5XTz+L62e4iKrFvlNV
-spHEfbmwhRkGeC7bYRr6hfVKkaHnFtWOojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1K
-EOtOghY6rCcMU/Gt1SSwawNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8
-QIH4D5csOPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB
-CwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpFcoJxDjrSzG+ntKEj
-u/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXcokgfGT+Ok+vx+hfuzU7jBBJV1uXk
-3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6q
-tnRGEmyR7jTV7JqR50S+kDFy1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29
-mvVXIwAHIRc/SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03
------END CERTIFICATE-----
-
-EC-ACC
-======
------BEGIN CERTIFICATE-----
-MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB8zELMAkGA1UE
-BhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2VydGlmaWNhY2lvIChOSUYgUS0w
-ODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYD
-VQQLEyxWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UE
-CxMsSmVyYXJxdWlhIEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMT
-BkVDLUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQGEwJFUzE7
-MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8gKE5JRiBRLTA4MDExNzYt
-SSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBDZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZl
-Z2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQubmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJh
-cnF1aWEgRW50aXRhdHMgZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUND
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R85iK
-w5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm4CgPukLjbo73FCeT
-ae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaVHMf5NLWUhdWZXqBIoH7nF2W4onW4
-HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNdQlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0a
-E9jD2z3Il3rucO2n5nzbcc8tlGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw
-0JDnJwIDAQABo4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E
-BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4opvpXY0wfwYD
-VR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBodHRwczovL3d3dy5jYXRjZXJ0
-Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5l
-dC92ZXJhcnJlbCAwDQYJKoZIhvcNAQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJ
-lF7W2u++AVtd0x7Y/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNa
-Al6kSBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhyRp/7SNVe
-l+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOSAgu+TGbrIP65y7WZf+a2
-E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xlnJ2lYJU6Un/10asIbvPuW/mIPX64b24D
-5EI=
------END CERTIFICATE-----
-
-Hellenic Academic and Research Institutions RootCA 2011
-=======================================================
------BEGIN CERTIFICATE-----
-MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1IxRDBCBgNVBAoT
-O0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9y
-aXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z
-IFJvb3RDQSAyMDExMB4XDTExMTIwNjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYT
-AkdSMUQwQgYDVQQKEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z
-IENlcnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNo
-IEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
-AKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPzdYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI
-1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJfel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa
-71HFK9+WXesyHgLacEnsbgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u
-8yBRQlqD75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSPFEDH
-3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNVHRMBAf8EBTADAQH/
-MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp5dgTBCPuQSUwRwYDVR0eBEAwPqA8
-MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQub3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQu
-b3JnMA0GCSqGSIb3DQEBBQUAA4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVt
-XdMiKahsog2p6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8
-TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7dIsXRSZMFpGD
-/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8AcysNnq/onN694/BtZqhFLKPM58N
-7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXIl7WdmplNsDz4SgCbZN2fOUvRJ9e4
------END CERTIFICATE-----
-
-Actalis Authentication Root CA
-==============================
------BEGIN CERTIFICATE-----
-MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCSVQxDjAM
-BgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UE
-AwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDky
-MjExMjIwMlowazELMAkGA1UEBhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlz
-IFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290
-IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNvUTufClrJ
-wkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX4ay8IMKx4INRimlNAJZa
-by/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9KK3giq0itFZljoZUj5NDKd45RnijMCO6
-zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1f
-YVEiVRvjRuPjPdA1YprbrxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2
-oxgkg4YQ51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2Fbe8l
-EfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxeKF+w6D9Fz8+vm2/7
-hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4Fv6MGn8i1zeQf1xcGDXqVdFUNaBr8
-EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbnfpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5
-jF66CyCU3nuDuP/jVo23Eek7jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLY
-iDrIn3hm7YnzezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt
-ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQALe3KHwGCmSUyI
-WOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70jsNjLiNmsGe+b7bAEzlgqqI0
-JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDzWochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKx
-K3JCaKygvU5a2hi/a5iB0P2avl4VSM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+
-Xlff1ANATIGk0k9jpwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC
-4yyXX04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+OkfcvHlXHo
-2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7RK4X9p2jIugErsWx0Hbhz
-lefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btUZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXem
-OR/qnuOf0GZvBeyqdn6/axag67XH/JJULysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9
-vwGYT7JZVEc+NHt4bVaTLnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg==
------END CERTIFICATE-----
-
-Trustis FPS Root CA
-===================
------BEGIN CERTIFICATE-----
-MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQG
-EwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQLExNUcnVzdGlzIEZQUyBSb290
-IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTExMzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNV
-BAoTD1RydXN0aXMgTGltaXRlZDEcMBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJ
-KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQ
-RUN+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihHiTHcDnlk
-H5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjjvSkCqPoc4Vu5g6hBSLwa
-cY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zt
-o3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlBOrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEA
-AaNTMFEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAd
-BgNVHQ4EFgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01GX2c
-GE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmWzaD+vkAMXBJV+JOC
-yinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP41BIy+Q7DsdwyhEQsb8tGD+pmQQ9P
-8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZEf1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHV
-l/9D7S3B2l0pKoU/rGXuhg8FjZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYl
-iB6XzCGcKQENZetX2fNXlrtIzYE=
------END CERTIFICATE-----
-
-Buypass Class 2 Root CA
-=======================
------BEGIN CERTIFICATE-----
-MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU
-QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMiBSb290IENBMB4X
-DTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1owTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1
-eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIw
-DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1
-g1Lr6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPVL4O2fuPn
-9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC911K2GScuVr1QGbNgGE41b
-/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHxMlAQTn/0hpPshNOOvEu/XAFOBz3cFIqU
-CqTqc/sLUegTBxj6DvEr0VQVfTzh97QZQmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeff
-awrbD02TTqigzXsu8lkBarcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgI
-zRFo1clrUs3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLiFRhn
-Bkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRSP/TizPJhk9H9Z2vX
-Uq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN9SG9dKpN6nIDSdvHXx1iY8f93ZHs
-M+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxPAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD
-VR0OBBYEFMmAd+BikoL1RpzzuvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF
-AAOCAgEAU18h9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s
-A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3tOluwlN5E40EI
-osHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo+fsicdl9sz1Gv7SEr5AcD48S
-aq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYd
-DnkM/crqJIByw5c/8nerQyIKx+u2DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWD
-LfJ6v9r9jv6ly0UsH8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0
-oyLQI+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK75t98biGC
-wWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h3PFaTWwyI0PurKju7koS
-CTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPzY11aWOIv4x3kqdbQCtCev9eBCfHJxyYN
-rJgWVqA=
------END CERTIFICATE-----
-
-Buypass Class 3 Root CA
-=======================
------BEGIN CERTIFICATE-----
-MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU
-QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMyBSb290IENBMB4X
-DTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFowTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1
-eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIw
-DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRH
-sJ8YZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3EN3coTRiR
-5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9tznDDgFHmV0ST9tD+leh
-7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX0DJq1l1sDPGzbjniazEuOQAnFN44wOwZ
-ZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH
-2xc519woe2v1n/MuwU8XKhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV
-/afmiSTYzIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvSO1UQ
-RwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D34xFMFbG02SrZvPA
-Xpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgPK9Dx2hzLabjKSWJtyNBjYt1gD1iq
-j6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD
-VR0OBBYEFEe4zf/lb+74suwvTg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF
-AAOCAgEAACAjQTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV
-cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXSIGrs/CIBKM+G
-uIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2HJLw5QY33KbmkJs4j1xrG0aG
-Q0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsaO5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8
-ZORK15FTAaggiG6cX0S5y2CBNOxv033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2
-KSb12tjE8nVhz36udmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz
-6MkEkbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg413OEMXbug
-UZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvDu79leNKGef9JOxqDDPDe
-eOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq4/g7u9xN12TyUb7mqqta6THuBrxzvxNi
-Cp/HuZc=
------END CERTIFICATE-----
-
-T-TeleSec GlobalRoot Class 3
-============================
------BEGIN CERTIFICATE-----
-MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM
-IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU
-cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgx
-MDAxMTAyOTU2WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz
-dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD
-ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0GCSqGSIb3
-DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN8ELg63iIVl6bmlQdTQyK
-9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/RLyTPWGrTs0NvvAgJ1gORH8EGoel15YU
-NpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZF
-iP0Zf3WHHx+xGwpzJFu5ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W
-0eDrXltMEnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGjQjBA
-MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1A/d2O2GCahKqGFPr
-AyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOyWL6ukK2YJ5f+AbGwUgC4TeQbIXQb
-fsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzT
-ucpH9sry9uetuUg/vBa3wW306gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7h
-P0HHRwA11fXT91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml
-e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4pTpPDpFQUWw==
------END CERTIFICATE-----
-
-EE Certification Centre Root CA
-===============================
------BEGIN CERTIFICATE-----
-MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG
-EwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1czEoMCYGA1UEAwwfRUUgQ2Vy
-dGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYGCSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIw
-MTAxMDMwMTAxMDMwWhgPMjAzMDEyMTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlB
-UyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRy
-ZSBSb290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEBAQUAA4IB
-DwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUyeuuOF0+W2Ap7kaJjbMeM
-TC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvObntl8jixwKIy72KyaOBhU8E2lf/slLo2
-rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIwWFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw
-93X2PaRka9ZP585ArQ/dMtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtN
-P2MbRMNE1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYDVR0T
-AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/zQas8fElyalL1BSZ
-MEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEF
-BQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEFBQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+Rj
-xY6hUFaTlrg4wCQiZrxTFGGVv9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqM
-lIpPnTX/dqQGE5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u
-uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIWiAYLtqZLICjU
-3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/vGVCJYMzpJJUPwssd8m92kMfM
-dcGWxZ0=
------END CERTIFICATE-----
-
-D-TRUST Root Class 3 CA 2 2009
-==============================
------BEGIN CERTIFICATE-----
-MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQK
-DAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTAe
-Fw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NThaME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxE
-LVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIw
-DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOAD
-ER03UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42tSHKXzlA
-BF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9RySPocq60vFYJfxLLHLGv
-KZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsMlFqVlNpQmvH/pStmMaTJOKDfHR+4CS7z
-p+hnUquVH+BGPtikw8paxTGA6Eian5Rp/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUC
-AwEAAaOCARowggEWMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ
-4PGEMA4GA1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVjdG9y
-eS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUyMENBJTIwMiUyMDIw
-MDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRlcmV2b2NhdGlvbmxpc3QwQ6BBoD+G
-PWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAw
-OS5jcmwwDQYJKoZIhvcNAQELBQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm
-2H6NMLVwMeniacfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0
-o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4KzCUqNQT4YJEV
-dT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8PIWmawomDeCTmGCufsYkl4ph
-X5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3YJohw1+qRzT65ysCQblrGXnRl11z+o+I=
------END CERTIFICATE-----
-
-D-TRUST Root Class 3 CA 2 EV 2009
-=================================
------BEGIN CERTIFICATE-----
-MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK
-DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw
-OTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUwNDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK
-DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw
-OTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfS
-egpnljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM03TP1YtHh
-zRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6ZqQTMFexgaDbtCHu39b+T
-7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lRp75mpoo6Kr3HGrHhFPC+Oh25z1uxav60
-sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure35
-11H3a6UCAwEAAaOCASQwggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyv
-cop9NteaHNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFwOi8v
-ZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xhc3MlMjAzJTIwQ0El
-MjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1ERT9jZXJ0aWZpY2F0ZXJldm9jYXRp
-b25saXN0MEagRKBChkBodHRwOi8vd3d3LmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xh
-c3NfM19jYV8yX2V2XzIwMDkuY3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+
-PPoeUSbrh/Yp3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05
-nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNFCSuGdXzfX2lX
-ANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7naxpeG0ILD5EJt/rDiZE4OJudA
-NCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqXKVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVv
-w9y4AyHqnxbxLFS1
------END CERTIFICATE-----
-
-CA Disig Root R2
-================
------BEGIN CERTIFICATE-----
-MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNVBAYTAlNLMRMw
-EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp
-ZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQyMDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sx
-EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp
-c2lnIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbC
-w3OeNcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNHPWSb6Wia
-xswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3Ix2ymrdMxp7zo5eFm1tL7
-A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbeQTg06ov80egEFGEtQX6sx3dOy1FU+16S
-GBsEWmjGycT6txOgmLcRK7fWV8x8nhfRyyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqV
-g8NTEQxzHQuyRpDRQjrOQG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa
-5Beny912H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJQfYE
-koopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUDi/ZnWejBBhG93c+A
-Ak9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORsnLMOPReisjQS1n6yqEm70XooQL6i
-Fh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNV
-HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5u
-Qu0wDQYJKoZIhvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM
-tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqfGopTpti72TVV
-sRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkblvdhuDvEK7Z4bLQjb/D907Je
-dR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka+elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W8
-1k/BfDxujRNt+3vrMNDcTa/F1balTFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjx
-mHHEt38OFdAlab0inSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01
-utI3gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18DrG5gPcFw0
-sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3OszMOl6W8KjptlwlCFtaOg
-UxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8xL4ysEr3vQCj8KWefshNPZiTEUxnpHikV
-7+ZtsH8tZ/3zbBt1RqPlShfppNcL
------END CERTIFICATE-----
-
-ACCVRAIZ1
-=========
------BEGIN CERTIFICATE-----
-MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UEAwwJQUNDVlJB
-SVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQswCQYDVQQGEwJFUzAeFw0xMTA1
-MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQBgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwH
-UEtJQUNDVjENMAsGA1UECgwEQUNDVjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4IC
-DwAwggIKAoICAQCbqau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gM
-jmoYHtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWoG2ioPej0
-RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpAlHPrzg5XPAOBOp0KoVdD
-aaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhrIA8wKFSVf+DuzgpmndFALW4ir50awQUZ
-0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDG
-WuzndN9wrqODJerWx5eHk6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs7
-8yM2x/474KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMOm3WR
-5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpacXpkatcnYGMN285J
-9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPluUsXQA+xtrn13k/c4LOsOxFwYIRK
-Q26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYIKwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRw
-Oi8vd3d3LmFjY3YuZXMvZmlsZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEu
-Y3J0MB8GCCsGAQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2
-VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeTVfZW6oHlNsyM
-Hj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIGCCsGAQUFBwICMIIBFB6CARAA
-QQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUAcgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBh
-AO0AegAgAGQAZQAgAGwAYQAgAEEAQwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUA
-YwBuAG8AbABvAGcA7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBj
-AHQAcgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAAQwBQAFMA
-IABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUAczAwBggrBgEFBQcCARYk
-aHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2MuaHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0
-dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRtaW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2
-MV9kZXIuY3JsMA4GA1UdDwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZI
-hvcNAQEFBQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdpD70E
-R9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gUJyCpZET/LtZ1qmxN
-YEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+mAM/EKXMRNt6GGT6d7hmKG9Ww7Y49
-nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepDvV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJ
-TS+xJlsndQAJxGJ3KQhfnlmstn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3
-sCPdK6jT2iWH7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h
-I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szAh1xA2syVP1Xg
-Nce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xFd3+YJ5oyXSrjhO7FmGYvliAd
-3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2HpPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3p
-EfbRD0tVNEYqi4Y7
------END CERTIFICATE-----
-
-TWCA Global Root CA
-===================
------BEGIN CERTIFICATE-----
-MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcxEjAQBgNVBAoT
-CVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMTVFdDQSBHbG9iYWwgUm9vdCBD
-QTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQK
-EwlUQUlXQU4tQ0ExEDAOBgNVBAsTB1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3Qg
-Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2C
-nJfF10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz0ALfUPZV
-r2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfChMBwqoJimFb3u/Rk28OKR
-Q4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbHzIh1HrtsBv+baz4X7GGqcXzGHaL3SekV
-tTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1W
-KKD+u4ZqyPpcC1jcxkt2yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99
-sy2sbZCilaLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYPoA/p
-yJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQABDzfuBSO6N+pjWxn
-kjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcEqYSjMq+u7msXi7Kx/mzhkIyIqJdI
-zshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMC
-AQYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6g
-cFGn90xHNcgL1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn
-LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WFH6vPNOw/KP4M
-8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNoRI2T9GRwoD2dKAXDOXC4Ynsg
-/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlg
-lPx4mI88k1HtQJAH32RjJMtOcQWh15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryP
-A9gK8kxkRr05YuWW6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3m
-i4TWnsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5jwa19hAM8
-EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWzaGHQRiapIVJpLesux+t3
-zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmyKwbQBM0=
------END CERTIFICATE-----
-
-TeliaSonera Root CA v1
-======================
------BEGIN CERTIFICATE-----
-MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAwNzEUMBIGA1UE
-CgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJvb3QgQ0EgdjEwHhcNMDcxMDE4
-MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYDVQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwW
-VGVsaWFTb25lcmEgUm9vdCBDQSB2MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+
-6yfwIaPzaSZVfp3FVRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA
-3GV17CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+XZ75Ljo1k
-B1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+/jXh7VB7qTCNGdMJjmhn
-Xb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxH
-oLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkmdtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3
-F0fUTPHSiXk+TT2YqGHeOh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJ
-oWjiUIMusDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4pgd7
-gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fsslESl1MpWtTwEhDc
-TwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQarMCpgKIv7NHfirZ1fpoeDVNAgMB
-AAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qW
-DNXr+nuqF+gTEjANBgkqhkiG9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNm
-zqjMDfz1mgbldxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx
-0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1TjTQpgcmLNkQfW
-pb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBedY2gea+zDTYa4EzAvXUYNR0PV
-G6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpc
-c41teyWRyu5FrgZLAMzTsVlQ2jqIOylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOT
-JsjrDNYmiLbAJM+7vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2
-qReWt88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcnHL/EVlP6
-Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVxSK236thZiNSQvxaz2ems
-WWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY=
------END CERTIFICATE-----
-
-E-Tugra Certification Authority
-===============================
------BEGIN CERTIFICATE-----
-MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNVBAYTAlRSMQ8w
-DQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamls
-ZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN
-ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMw
-NTEyMDk0OFoXDTIzMDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmEx
-QDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxl
-cmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQD
-DB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A
-MIICCgKCAgEA4vU/kwVRHoViVF56C/UYB4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vd
-hQd2h8y/L5VMzH2nPbxHD5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5K
-CKpbknSFQ9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEoq1+g
-ElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3Dk14opz8n8Y4e0ypQ
-BaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcHfC425lAcP9tDJMW/hkd5s3kc91r0
-E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsutdEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gz
-rt48Ue7LE3wBf4QOXVGUnhMMti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAq
-jqFGOjGY5RH8zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn
-rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUXU8u3Zg5mTPj5
-dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6Jyr+zE7S6E5UMA8GA1UdEwEB
-/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEG
-MA0GCSqGSIb3DQEBCwUAA4ICAQAFNzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAK
-kEh47U6YA5n+KGCRHTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jO
-XKqYGwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c77NCR807
-VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3+GbHeJAAFS6LrVE1Uweo
-a2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WKvJUawSg5TB9D0pH0clmKuVb8P7Sd2nCc
-dlqMQ1DujjByTd//SffGqWfZbawCEeI6FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEV
-KV0jq9BgoRJP3vQXzTLlyb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gT
-Dx4JnW2PAJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpDy4Q0
-8ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8dNL/+I5c30jn6PQ0G
-C7TbO6Orb1wdtn7os4I07QZcJA==
------END CERTIFICATE-----
-
-T-TeleSec GlobalRoot Class 2
-============================
------BEGIN CERTIFICATE-----
-MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM
-IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU
-cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgx
-MDAxMTA0MDE0WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz
-dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD
-ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0GCSqGSIb3
-DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUdAqSzm1nzHoqvNK38DcLZ
-SBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiCFoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/F
-vudocP05l03Sx5iRUKrERLMjfTlH6VJi1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx970
-2cu+fjOlbpSD8DT6IavqjnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGV
-WOHAD3bZwI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGjQjBA
-MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/WSA2AHmgoCJrjNXy
-YdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhyNsZt+U2e+iKo4YFWz827n+qrkRk4
-r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPACuvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNf
-vNoBYimipidx5joifsFvHZVwIEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR
-3p1m0IvVVGb6g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN
-9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlPBSeOE6Fuwg==
------END CERTIFICATE-----
-
-Atos TrustedRoot 2011
-=====================
------BEGIN CERTIFICATE-----
-MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UEAwwVQXRvcyBU
-cnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0xMTA3MDcxNDU4
-MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMMFUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsG
-A1UECgwEQXRvczELMAkGA1UEBhMCREUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCV
-hTuXbyo7LjvPpvMpNb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr
-54rMVD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+SZFhyBH+
-DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ4J7sVaE3IqKHBAUsR320
-HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0Lcp2AMBYHlT8oDv3FdU9T1nSatCQujgKR
-z3bFmx5VdJx4IbHwLfELn8LVlhgf8FQieowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7R
-l+lwrrw7GWzbITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZ
-bNshMBgGA1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB
-CwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8jvZfza1zv7v1Apt+h
-k6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kPDpFrdRbhIfzYJsdHt6bPWHJxfrrh
-TZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pcmaHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a9
-61qn8FYiqTxlVMYVqL2Gns2Dlmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G
-3mB/ufNPRJLvKrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed
------END CERTIFICATE-----
-
-QuoVadis Root CA 1 G3
-=====================
------BEGIN CERTIFICATE-----
-MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQELBQAwSDELMAkG
-A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv
-b3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJN
-MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEg
-RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakE
-PBtVwedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWerNrwU8lm
-PNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF34168Xfuw6cwI2H44g4hWf6
-Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh4Pw5qlPafX7PGglTvF0FBM+hSo+LdoIN
-ofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXpUhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/l
-g6AnhF4EwfWQvTA9xO+oabw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV
-7qJZjqlc3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/GKubX
-9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSthfbZxbGL0eUQMk1f
-iyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KOTk0k+17kBL5yG6YnLUlamXrXXAkg
-t3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOtzCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
-AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZI
-hvcNAQELBQADggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC
-MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2cDMT/uFPpiN3
-GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUNqXsCHKnQO18LwIE6PWThv6ct
-Tr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP
-+V04ikkwj+3x6xn0dxoxGE1nVGwvb2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh
-3jRJjehZrJ3ydlo28hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fa
-wx/kNSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNjZgKAvQU6
-O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhpq1467HxpvMc7hU6eFbm0
-FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFtnh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOV
-hMJKzRwuJIczYOXD
------END CERTIFICATE-----
-
-QuoVadis Root CA 2 G3
-=====================
------BEGIN CERTIFICATE-----
-MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQELBQAwSDELMAkG
-A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv
-b3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJN
-MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIg
-RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFh
-ZiFfqq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMWn4rjyduY
-NM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ymc5GQYaYDFCDy54ejiK2t
-oIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+o
-MiwMzAkd056OXbxMmO7FGmh77FOm6RQ1o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+l
-V0POKa2Mq1W/xPtbAd0jIaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZo
-L1NesNKqIcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz8eQQ
-sSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43ehvNURG3YBZwjgQQvD
-6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l7ZizlWNof/k19N+IxWA1ksB8aRxh
-lRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALGcC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
-AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZI
-hvcNAQELBQADggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66
-AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RCroijQ1h5fq7K
-pVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0GaW/ZZGYjeVYg3UQt4XAoeo0L9
-x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4nlv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgz
-dWqTHBLmYF5vHX/JHyPLhGGfHoJE+V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6X
-U/IyAgkwo1jwDQHVcsaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+Nw
-mNtddbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNgKCLjsZWD
-zYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeMHVOyToV7BjjHLPj4sHKN
-JeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4WSr2Rz0ZiC3oheGe7IUIarFsNMkd7Egr
-O3jtZsSOeWmD3n+M
------END CERTIFICATE-----
-
-QuoVadis Root CA 3 G3
-=====================
------BEGIN CERTIFICATE-----
-MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQELBQAwSDELMAkG
-A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv
-b3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJN
-MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMg
-RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286
-IxSR/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNuFoM7pmRL
-Mon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXRU7Ox7sWTaYI+FrUoRqHe
-6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+cra1AdHkrAj80//ogaX3T7mH1urPnMNA3
-I4ZyYUUpSFlob3emLoG+B01vr87ERRORFHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3U
-VDmrJqMz6nWB2i3ND0/kA9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f7
-5li59wzweyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634RylsSqi
-Md5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBpVzgeAVuNVejH38DM
-dyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0QA4XN8f+MFrXBsj6IbGB/kE+V9/Yt
-rQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
-AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZI
-hvcNAQELBQADggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px
-KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnIFUBhynLWcKzS
-t/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5WvvoxXqA/4Ti2Tk08HS6IT7SdEQ
-TXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFgu/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9Du
-DcpmvJRPpq3t/O5jrFc/ZSXPsoaP0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGib
-Ih6BJpsQBJFxwAYf3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmD
-hPbl8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+DhcI00iX
-0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HNPlopNLk9hM6xZdRZkZFW
-dSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ywaZWWDYWGWVjUTR939+J399roD1B0y2
-PpxxVJkES/1Y+Zj0
------END CERTIFICATE-----
-
-DigiCert Assured ID Root G2
-===========================
------BEGIN CERTIFICATE-----
-MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBlMQswCQYDVQQG
-EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw
-IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgw
-MTE1MTIwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL
-ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIw
-ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSAn61UQbVH
-35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4HteccbiJVMWWXvdMX0h5i89vq
-bFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9HpEgjAALAcKxHad3A2m67OeYfcgnDmCXRw
-VWmvo2ifv922ebPynXApVfSr/5Vh88lAbx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OP
-YLfykqGxvYmJHzDNw6YuYjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+Rn
-lTGNAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTO
-w0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPIQW5pJ6d1Ee88hjZv
-0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I0jJmwYrA8y8678Dj1JGG0VDjA9tz
-d29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4GnilmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAW
-hsI6yLETcDbYz+70CjTVW0z9B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0M
-jomZmWzwPDCvON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo
+# Issuer: CN=Certinomis - Autorité Racine O=Certinomis OU=0002 433998903
+# Subject: CN=Certinomis - Autorité Racine O=Certinomis OU=0002 433998903
+# Label: "Certinomis - Autorité Racine"
+# Serial: 1
+# MD5 Fingerprint: 7f:30:78:8c:03:e3:ca:c9:0a:e2:c9:ea:1e:aa:55:1a
+# SHA1 Fingerprint: 2e:14:da:ec:28:f0:fa:1e:8e:38:9a:4e:ab:eb:26:c0:0a:d3:83:c3
+# SHA256 Fingerprint: fc:bf:e2:88:62:06:f7:2b:27:59:3c:8b:07:02:97:e1:2d:76:9e:d1:0e:d7:93:07:05:a8:09:8e:ff:c1:4d:17
+-----BEGIN CERTIFICATE-----
+MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjET
+MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAk
+BgNVBAMMHUNlcnRpbm9taXMgLSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4
+Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNl
+cnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYwJAYDVQQDDB1DZXJ0
+aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQADggIP
+ADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jY
+F1AMnmHawE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N
+8y4oH3DfVS9O7cdxbwlyLu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWe
+rP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K
+/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92NjMD2AR5vpTESOH2VwnHu
+7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9qc1pkIuVC
+28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6
+lSTClrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1E
+nn1So2+WLhl+HPNbxxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB
+0iSVL1N6aaLwD4ZFjliCK0wi1F6g530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql09
+5gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna4NH4+ej9Uji29YnfAgMBAAGj
+WzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBQN
+jLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ
+KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9s
+ov3/4gbIOZ/xWqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZM
+OH8oMDX/nyNTt7buFHAAQCvaR6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q
+619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40nJ+U8/aGH88bc62UeYdocMMzpXDn
+2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1BCxMjidPJC+iKunqj
+o3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjvJL1v
+nxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG
+5ERQL1TEqkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWq
+pdEdnV1j6CTmNhTih60bWfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZb
+dsLLO7XSAPCjDuGtbkD326C00EauFddEwk01+dIL8hf2rGbVJLJP0RyZwG71fet0
+BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/vgt2Fl43N+bYdJeimUV5
+-----END CERTIFICATE-----
+
+# Issuer: CN=Root CA Generalitat Valenciana O=Generalitat Valenciana OU=PKIGVA
+# Subject: CN=Root CA Generalitat Valenciana O=Generalitat Valenciana OU=PKIGVA
+# Label: "Root CA Generalitat Valenciana"
+# Serial: 994436456
+# MD5 Fingerprint: 2c:8c:17:5e:b1:54:ab:93:17:b5:36:5a:db:d1:c6:f2
+# SHA1 Fingerprint: a0:73:e5:c5:bd:43:61:0d:86:4c:21:13:0a:85:58:57:cc:9c:ea:46
+# SHA256 Fingerprint: 8c:4e:df:d0:43:48:f3:22:96:9e:7e:29:a4:cd:4d:ca:00:46:55:06:1c:16:e1:b0:76:42:2e:f3:42:ad:63:0e
+-----BEGIN CERTIFICATE-----
+MIIGizCCBXOgAwIBAgIEO0XlaDANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJF
+UzEfMB0GA1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJ
+R1ZBMScwJQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwHhcN
+MDEwNzA2MTYyMjQ3WhcNMjEwNzAxMTUyMjQ3WjBoMQswCQYDVQQGEwJFUzEfMB0G
+A1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScw
+JQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGKqtXETcvIorKA3Qdyu0togu8M1JAJke+
+WmmmO3I2F0zo37i7L3bhQEZ0ZQKQUgi0/6iMweDHiVYQOTPvaLRfX9ptI6GJXiKj
+SgbwJ/BXufjpTjJ3Cj9BZPPrZe52/lSqfR0grvPXdMIKX/UIKFIIzFVd0g/bmoGl
+u6GzwZTNVOAydTGRGmKy3nXiz0+J2ZGQD0EbtFpKd71ng+CT516nDOeB0/RSrFOy
+A8dEJvt55cs0YFAQexvba9dHq198aMpunUEDEO5rmXteJajCq+TA81yc477OMUxk
+Hl6AovWDfgzWyoxVjr7gvkkHD6MkQXpYHYTqWBLI4bft75PelAgxAgMBAAGjggM7
+MIIDNzAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly9vY3NwLnBr
+aS5ndmEuZXMwEgYDVR0TAQH/BAgwBgEB/wIBAjCCAjQGA1UdIASCAiswggInMIIC
+IwYKKwYBBAG/VQIBADCCAhMwggHoBggrBgEFBQcCAjCCAdoeggHWAEEAdQB0AG8A
+cgBpAGQAYQBkACAAZABlACAAQwBlAHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAFIA
+YQDtAHoAIABkAGUAIABsAGEAIABHAGUAbgBlAHIAYQBsAGkAdABhAHQAIABWAGEA
+bABlAG4AYwBpAGEAbgBhAC4ADQAKAEwAYQAgAEQAZQBjAGwAYQByAGEAYwBpAPMA
+bgAgAGQAZQAgAFAAcgDhAGMAdABpAGMAYQBzACAAZABlACAAQwBlAHIAdABpAGYA
+aQBjAGEAYwBpAPMAbgAgAHEAdQBlACAAcgBpAGcAZQAgAGUAbAAgAGYAdQBuAGMA
+aQBvAG4AYQBtAGkAZQBuAHQAbwAgAGQAZQAgAGwAYQAgAHAAcgBlAHMAZQBuAHQA
+ZQAgAEEAdQB0AG8AcgBpAGQAYQBkACAAZABlACAAQwBlAHIAdABpAGYAaQBjAGEA
+YwBpAPMAbgAgAHMAZQAgAGUAbgBjAHUAZQBuAHQAcgBhACAAZQBuACAAbABhACAA
+ZABpAHIAZQBjAGMAaQDzAG4AIAB3AGUAYgAgAGgAdAB0AHAAOgAvAC8AdwB3AHcA
+LgBwAGsAaQAuAGcAdgBhAC4AZQBzAC8AYwBwAHMwJQYIKwYBBQUHAgEWGWh0dHA6
+Ly93d3cucGtpLmd2YS5lcy9jcHMwHQYDVR0OBBYEFHs100DSHHgZZu90ECjcPk+y
+eAT8MIGVBgNVHSMEgY0wgYqAFHs100DSHHgZZu90ECjcPk+yeAT8oWykajBoMQsw
+CQYDVQQGEwJFUzEfMB0GA1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0G
+A1UECxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVu
+Y2lhbmGCBDtF5WgwDQYJKoZIhvcNAQEFBQADggEBACRhTvW1yEICKrNcda3Fbcrn
+lD+laJWIwVTAEGmiEi8YPyVQqHxK6sYJ2fR1xkDar1CdPaUWu20xxsdzCkj+IHLt
+b8zog2EWRpABlUt9jppSCS/2bxzkoXHPjCpaF3ODR00PNvsETUlR4hTJZGH71BTg
+9J63NI8KJr2XXPR5OkowGcytT6CYirQxlyric21+eLj4iIlPsSKRZEv1UN4D2+XF
+ducTZnV+ZfsBn5OHiJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmC
+IoaZM3Fa6hlXPZHNqcCjbgcTpsnt+GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=A-Trust-nQual-03 O=A-Trust Ges. f. Sicherheitssysteme im elektr. Datenverkehr GmbH OU=A-Trust-nQual-03
+# Subject: CN=A-Trust-nQual-03 O=A-Trust Ges. f. Sicherheitssysteme im elektr. Datenverkehr GmbH OU=A-Trust-nQual-03
+# Label: "A-Trust-nQual-03"
+# Serial: 93214
+# MD5 Fingerprint: 49:63:ae:27:f4:d5:95:3d:d8:db:24:86:b8:9c:07:53
+# SHA1 Fingerprint: d3:c0:63:f2:19:ed:07:3e:34:ad:5d:75:0b:32:76:29:ff:d5:9a:f2
+# SHA256 Fingerprint: 79:3c:bf:45:59:b9:fd:e3:8a:b2:2d:f1:68:69:f6:98:81:ae:14:c4:b0:13:9a:c7:88:a7:8a:1a:fc:ca:02:fb
+-----BEGIN CERTIFICATE-----
+MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJB
+VDFIMEYGA1UECgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBp
+bSBlbGVrdHIuIERhdGVudmVya2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5R
+dWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5RdWFsLTAzMB4XDTA1MDgxNzIyMDAw
+MFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgwRgYDVQQKDD9BLVRy
+dXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0ZW52
+ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMM
+EEEtVHJ1c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQCtPWFuA/OQO8BBC4SAzewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUj
+lUC5B3ilJfYKvUWG6Nm9wASOhURh73+nyfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZ
+znF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPESU7l0+m0iKsMrmKS1GWH
+2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4iHQF63n1
+k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs
+2e3Vcuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYD
+VR0OBAoECERqlWdVeRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC
+AQEAVdRU0VlIXLOThaq/Yy/kgM40ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fG
+KOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmrsQd7TZjTXLDR8KdCoLXEjq/+
+8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZdJXDRZslo+S4R
+FGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS
+mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmE
+DNuxUCAKGkq6ahq97BvIxYSazQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
+# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
+# Label: "TWCA Root Certification Authority"
+# Serial: 1
+# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79
+# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48
+# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44
+-----BEGIN CERTIFICATE-----
+MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES
+MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU
+V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz
+WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO
+LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE
+AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH
+K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX
+RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z
+rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx
+3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq
+hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC
+MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls
+XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D
+lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn
+aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ
+YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw==
+-----END CERTIFICATE-----
+
+# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2
+# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2
+# Label: "Security Communication RootCA2"
+# Serial: 0
+# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43
+# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74
+# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl
+MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe
+U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX
+DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy
+dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj
+YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV
+OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr
+zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM
+VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ
+hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO
+ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw
+awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs
+OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3
+DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF
+coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc
+okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8
+t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy
+1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/
+SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority
+# Subject: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority
+# Label: "Hellenic Academic and Research Institutions RootCA 2011"
+# Serial: 0
+# MD5 Fingerprint: 73:9f:4c:4b:73:5b:79:e9:fa:ba:1c:ef:6e:cb:d5:c9
+# SHA1 Fingerprint: fe:45:65:9b:79:03:5b:98:a1:61:b5:51:2e:ac:da:58:09:48:22:4d
+# SHA256 Fingerprint: bc:10:4f:15:a4:8b:e7:09:dc:a5:42:a7:e1:d4:b9:df:6f:05:45:27:e8:02:ea:a9:2d:59:54:44:25:8a:fe:71
+-----BEGIN CERTIFICATE-----
+MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix
+RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1
+dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p
+YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw
+NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK
+EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl
+cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
+c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz
+dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ
+fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns
+bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD
+75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP
+FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV
+HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp
+5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu
+b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA
+A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p
+6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8
+TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7
+dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys
+Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI
+l7WdmplNsDz4SgCbZN2fOUvRJ9e4
+-----END CERTIFICATE-----
+
+# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
+# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
+# Label: "Actalis Authentication Root CA"
+# Serial: 6271844772424770508
+# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6
+# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac
+# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66
+-----BEGIN CERTIFICATE-----
+MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE
+BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w
+MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290
+IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC
+SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1
+ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv
+UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX
+4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9
+KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/
+gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb
+rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ
+51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F
+be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe
+KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F
+v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn
+fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7
+jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz
+ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt
+ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL
+e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70
+jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz
+WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V
+SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j
+pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX
+X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok
+fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R
+K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU
+ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU
+LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT
+LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg==
+-----END CERTIFICATE-----
+
+# Issuer: O=Trustis Limited OU=Trustis FPS Root CA
+# Subject: O=Trustis Limited OU=Trustis FPS Root CA
+# Label: "Trustis FPS Root CA"
+# Serial: 36053640375399034304724988975563710553
+# MD5 Fingerprint: 30:c9:e7:1e:6b:e6:14:eb:65:b2:16:69:20:31:67:4d
+# SHA1 Fingerprint: 3b:c0:38:0b:33:c3:f6:a6:0c:86:15:22:93:d9:df:f5:4b:81:c0:04
+# SHA256 Fingerprint: c1:b4:82:99:ab:a5:20:8f:e9:63:0a:ce:55:ca:68:a0:3e:da:5a:51:9c:88:02:a0:d3:a6:73:be:8f:8e:55:7d
+-----BEGIN CERTIFICATE-----
+MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBF
+MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQL
+ExNUcnVzdGlzIEZQUyBSb290IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTEx
+MzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNVBAoTD1RydXN0aXMgTGltaXRlZDEc
+MBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQRUN+
+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihH
+iTHcDnlkH5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjj
+vSkCqPoc4Vu5g6hBSLwacY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA
+0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zto3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlB
+OrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEAAaNTMFEwDwYDVR0TAQH/
+BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAdBgNVHQ4E
+FgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01
+GX2cGE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmW
+zaD+vkAMXBJV+JOCyinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP4
+1BIy+Q7DsdwyhEQsb8tGD+pmQQ9P8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZE
+f1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHVl/9D7S3B2l0pKoU/rGXuhg8F
+jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN
+ZetX2fNXlrtIzYE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing
+# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing
+# Label: "StartCom Certification Authority"
+# Serial: 45
+# MD5 Fingerprint: c9:3b:0d:84:41:fc:a4:76:79:23:08:57:de:10:19:16
+# SHA1 Fingerprint: a3:f1:33:3f:e2:42:bf:cf:c5:d1:4e:8f:39:42:98:40:68:10:d1:a0
+# SHA256 Fingerprint: e1:78:90:ee:09:a3:fb:f4:f4:8b:9c:41:4a:17:d6:37:b7:a5:06:47:e9:bc:75:23:22:72:7f:cc:17:42:a9:11
+-----BEGIN CERTIFICATE-----
+MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEW
+MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
+Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM3WhcNMzYwOTE3MTk0NjM2WjB9
+MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi
+U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh
+cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk
+pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf
+OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C
+Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT
+Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi
+HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM
+Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w
++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+
+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3
+Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B
+26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID
+AQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD
+VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFul
+F2mHMMo0aEPQQa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCC
+ATgwLgYIKwYBBQUHAgEWImh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5w
+ZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL2ludGVybWVk
+aWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENvbW1lcmNpYWwgKFN0
+YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0aGUg
+c2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0
+aWZpY2F0aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93
+d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgG
+CWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1
+dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5fPGFf59Jb2vKXfuM/gTF
+wWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWmN3PH/UvS
+Ta0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst
+0OcNOrg+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNc
+pRJvkrKTlMeIFw6Ttn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKl
+CcWw0bdT82AUuoVpaiF8H3VhFyAXe2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVF
+P0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA2MFrLH9ZXF2RsXAiV+uKa0hK
+1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBsHvUwyKMQ5bLm
+KhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE
+JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ
+8dCAWZvLMdibD4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnm
+fyWl8kgAwKQB2j8=
+-----END CERTIFICATE-----
+
+# Issuer: CN=StartCom Certification Authority G2 O=StartCom Ltd.
+# Subject: CN=StartCom Certification Authority G2 O=StartCom Ltd.
+# Label: "StartCom Certification Authority G2"
+# Serial: 59
+# MD5 Fingerprint: 78:4b:fb:9e:64:82:0a:d3:b8:4c:62:f3:64:f2:90:64
+# SHA1 Fingerprint: 31:f1:fd:68:22:63:20:ee:c6:3b:3f:9d:ea:4a:3e:53:7c:7c:39:17
+# SHA256 Fingerprint: c7:ba:65:67:de:93:a7:98:ae:1f:aa:79:1e:71:2d:37:8f:ae:1f:93:c4:39:7f:ea:44:1b:b7:cb:e6:fd:59:95
+-----BEGIN CERTIFICATE-----
+MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEW
+MBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkgRzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1
+OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoG
+A1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgRzIwggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8Oo1XJ
+JZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsD
+vfOpL9HG4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnoo
+D/Uefyf3lLE3PbfHkffiAez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/
+Q0kGi4xDuFby2X8hQxfqp0iVAXV16iulQ5XqFYSdCI0mblWbq9zSOdIxHWDirMxW
+RST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbsO+wmETRIjfaAKxojAuuK
+HDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8HvKTlXcxN
+nw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM
+0D4LnMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/i
+UUjXuG+v+E5+M5iSFGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9
+Ha90OrInwMEePnWjFqmveiJdnxMaz6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHg
+TuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE
+AwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJKoZIhvcNAQEL
+BQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K
+2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfX
+UfEpY9Z1zRbkJ4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl
+6/2o1PXWT6RbdejF0mCy2wl+JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK
+9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG/+gyRr61M3Z3qAFdlsHB1b6uJcDJ
+HgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTcnIhT76IxW1hPkWLI
+wpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/XldblhY
+XzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5l
+IxKVCCIcl85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoo
+hdVddLHRDiBYmxOlsGOm7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulr
+so8uBtjRkcfGEvRM/TAXw8HaOFvjqermobp573PYtlNXLfbQ4ddI
+-----END CERTIFICATE-----
+
+# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
+# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
+# Label: "Buypass Class 2 Root CA"
+# Serial: 2
+# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29
+# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99
+# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48
+-----BEGIN CERTIFICATE-----
+MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
+MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
+Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow
+TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
+HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr
+6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV
+L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91
+1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx
+MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ
+QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB
+arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr
+Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi
+FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS
+P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN
+9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP
+AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz
+uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h
+9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s
+A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t
+OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo
++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7
+KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2
+DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us
+H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ
+I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7
+5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h
+3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz
+Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327
+# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327
+# Label: "Buypass Class 3 Root CA"
+# Serial: 2
+# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec
+# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57
+# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d
+-----BEGIN CERTIFICATE-----
+MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
+MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
+Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow
+TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
+HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y
+ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E
+N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9
+tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX
+0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c
+/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X
+KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY
+zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS
+O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D
+34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP
+K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3
+AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv
+Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj
+QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV
+cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS
+IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2
+HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa
+O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv
+033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u
+dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE
+kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41
+3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD
+u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq
+4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc=
+-----END CERTIFICATE-----
+
+# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Label: "T-TeleSec GlobalRoot Class 3"
+# Serial: 1
+# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef
+# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1
+# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd
+-----BEGIN CERTIFICATE-----
+MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
+KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
+BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
+YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1
+OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
+aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
+ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN
+8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/
+RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4
+hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5
+ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM
+EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1
+A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy
+WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ
+1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30
+6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT
+91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml
+e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p
+TpPDpFQUWw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus
+# Subject: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus
+# Label: "EE Certification Centre Root CA"
+# Serial: 112324828676200291871926431888494945866
+# MD5 Fingerprint: 43:5e:88:d4:7d:1a:4a:7e:fd:84:2e:52:eb:01:d4:6f
+# SHA1 Fingerprint: c9:a8:b9:e7:55:80:5e:58:e3:53:77:a7:25:eb:af:c3:7b:27:cc:d7
+# SHA256 Fingerprint: 3e:84:ba:43:42:90:85:16:e7:75:73:c0:99:2f:09:79:ca:08:4e:46:85:68:1f:f1:95:cc:ba:8a:22:9b:8a:76
+-----BEGIN CERTIFICATE-----
+MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1
+MQswCQYDVQQGEwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1
+czEoMCYGA1UEAwwfRUUgQ2VydGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYG
+CSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIwMTAxMDMwMTAxMDMwWhgPMjAzMDEy
+MTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlBUyBTZXJ0aWZpdHNl
+ZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRyZSBS
+b290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEB
+AQUAA4IBDwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUy
+euuOF0+W2Ap7kaJjbMeMTC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvO
+bntl8jixwKIy72KyaOBhU8E2lf/slLo2rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIw
+WFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw93X2PaRka9ZP585ArQ/d
+MtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtNP2MbRMNE
+1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYD
+VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/
+zQas8fElyalL1BSZMEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYB
+BQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEF
+BQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+RjxY6hUFaTlrg4wCQiZrxTFGGV
+v9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqMlIpPnTX/dqQG
+E5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u
+uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIW
+iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v
+GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Aralık 2007
+# Subject: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Aralık 2007
+# Label: "TURKTRUST Certificate Services Provider Root 2007"
+# Serial: 1
+# MD5 Fingerprint: 2b:70:20:56:86:82:a0:18:c8:07:53:12:28:70:21:72
+# SHA1 Fingerprint: f1:7f:6f:b6:31:dc:99:e3:a3:c8:7f:fe:1c:f1:81:10:88:d9:60:33
+# SHA256 Fingerprint: 97:8c:d9:66:f2:fa:a0:7b:a7:aa:95:00:d9:c0:2e:9d:77:f2:cd:ad:a6:ad:6b:a7:4a:f4:b9:1c:66:59:3c:50
+-----BEGIN CERTIFICATE-----
+MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOc
+UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx
+c8SxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xS
+S1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kg
+SGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4XDTA3MTIyNTE4Mzcx
+OVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxla3Ry
+b25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMC
+VFIxDzANBgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDE
+sGxldGnFn2ltIHZlIEJpbGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7F
+ni4gKGMpIEFyYWzEsWsgMjAwNzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9NYvDdE3ePYakqtdTyuTFY
+KTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQvKUmi8wUG
++7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveG
+HtyaKhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6P
+IzdezKKqdfcYbwnTrqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M
+733WB2+Y8a+xwXrXgTW4qhe04MsCAwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHk
+Yb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G
+CSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/sPx+EnWVUXKgW
+AkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I
+aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5
+mxRZNTZPz/OOXl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsa
+XRik7r4EW5nVcV9VZWRi1aKbBFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZ
+qxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAKpoRq0Tl9
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
+# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
+# Label: "D-TRUST Root Class 3 CA 2 2009"
+# Serial: 623603
+# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f
+# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0
+# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1
+-----BEGIN CERTIFICATE-----
+MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF
+MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD
+bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha
+ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM
+HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03
+UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42
+tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R
+ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM
+lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp
+/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G
+A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G
+A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj
+dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy
+MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl
+cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js
+L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL
+BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni
+acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0
+o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K
+zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8
+PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y
+Johw1+qRzT65ysCQblrGXnRl11z+o+I=
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH
+# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH
+# Label: "D-TRUST Root Class 3 CA 2 EV 2009"
+# Serial: 623604
+# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6
+# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83
+# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81
+-----BEGIN CERTIFICATE-----
+MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF
+MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD
+bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw
+NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV
+BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn
+ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0
+3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z
+qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR
+p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8
+HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw
+ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea
+HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw
+Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh
+c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E
+RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt
+dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku
+Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp
+3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05
+nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF
+CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na
+xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX
+KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1
+-----END CERTIFICATE-----
+
+# Issuer: CN=Autoridad de Certificacion Raiz del Estado Venezolano O=Sistema Nacional de Certificacion Electronica OU=Superintendencia de Servicios de Certificacion Electronica
+# Subject: CN=PSCProcert O=Sistema Nacional de Certificacion Electronica OU=Proveedor de Certificados PROCERT
+# Label: "PSCProcert"
+# Serial: 11
+# MD5 Fingerprint: e6:24:e9:12:01:ae:0c:de:8e:85:c4:ce:a3:12:dd:ec
+# SHA1 Fingerprint: 70:c1:8d:74:b4:28:81:0a:e4:fd:a5:75:d7:01:9f:99:b0:3d:50:74
+# SHA256 Fingerprint: 3c:fc:3c:14:d1:f6:84:ff:17:e3:8c:43:ca:44:0c:00:b9:67:ec:93:3e:8b:fe:06:4c:a1:d7:2c:90:f2:ad:b0
+-----BEGIN CERTIFICATE-----
+MIIJhjCCB26gAwIBAgIBCzANBgkqhkiG9w0BAQsFADCCAR4xPjA8BgNVBAMTNUF1
+dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9s
+YW5vMQswCQYDVQQGEwJWRTEQMA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlz
+dHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0
+aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBlcmludGVuZGVuY2lh
+IGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUwIwYJ
+KoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyODE2NTEw
+MFoXDTIwMTIyNTIzNTk1OVowgdExJjAkBgkqhkiG9w0BCQEWF2NvbnRhY3RvQHBy
+b2NlcnQubmV0LnZlMQ8wDQYDVQQHEwZDaGFjYW8xEDAOBgNVBAgTB01pcmFuZGEx
+KjAoBgNVBAsTIVByb3ZlZWRvciBkZSBDZXJ0aWZpY2Fkb3MgUFJPQ0VSVDE2MDQG
+A1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9u
+aWNhMQswCQYDVQQGEwJWRTETMBEGA1UEAxMKUFNDUHJvY2VydDCCAiIwDQYJKoZI
+hvcNAQEBBQADggIPADCCAgoCggIBANW39KOUM6FGqVVhSQ2oh3NekS1wwQYalNo9
+7BVCwfWMrmoX8Yqt/ICV6oNEolt6Vc5Pp6XVurgfoCfAUFM+jbnADrgV3NZs+J74
+BCXfgI8Qhd19L3uA3VcAZCP4bsm+lU/hdezgfl6VzbHvvnpC2Mks0+saGiKLt38G
+ieU89RLAu9MLmV+QfI4tL3czkkohRqipCKzx9hEC2ZUWno0vluYC3XXCFCpa1sl9
+JcLB/KpnheLsvtF8PPqv1W7/U0HU9TI4seJfxPmOEO8GqQKJ/+MMbpfg353bIdD0
+PghpbNjU5Db4g7ayNo+c7zo3Fn2/omnXO1ty0K+qP1xmk6wKImG20qCZyFSTXai2
+0b1dCl53lKItwIKOvMoDKjSuc/HUtQy9vmebVOvh+qBa7Dh+PsHMosdEMXXqP+UH
+0quhJZb25uSgXTcYOWEAM11G1ADEtMo88aKjPvM6/2kwLkDd9p+cJsmWN63nOaK/
+6mnbVSKVUyqUtd+tFjiBdWbjxywbk5yqjKPK2Ww8F22c3HxT4CAnQzb5EuE8XL1m
+v6JpIzi4mWCZDlZTOpx+FIywBm/xhnaQr/2v/pDGj59/i5IjnOcVdo/Vi5QTcmn7
+K2FjiO/mpF7moxdqWEfLcU8UC17IAggmosvpr2uKGcfLFFb14dq12fy/czja+eev
+bqQ34gcnAgMBAAGjggMXMIIDEzASBgNVHRMBAf8ECDAGAQH/AgEBMDcGA1UdEgQw
+MC6CD3N1c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAzNi0w
+MB0GA1UdDgQWBBRBDxk4qpl/Qguk1yeYVKIXTC1RVDCCAVAGA1UdIwSCAUcwggFD
+gBStuyIdxuDSAaj9dlBSk+2YwU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0
+b3JpZGFkIGRlIENlcnRpZmljYWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xh
+bm8xCzAJBgNVBAYTAlZFMRAwDgYDVQQHEwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0
+cml0byBDYXBpdGFsMTYwNAYDVQQKEy1TaXN0ZW1hIE5hY2lvbmFsIGRlIENlcnRp
+ZmljYWNpb24gRWxlY3Ryb25pY2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5kZW5jaWEg
+ZGUgU2VydmljaW9zIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkq
+hkiG9w0BCQEWFmFjcmFpekBzdXNjZXJ0ZS5nb2IudmWCAQowDgYDVR0PAQH/BAQD
+AgEGME0GA1UdEQRGMESCDnByb2NlcnQubmV0LnZloBUGBWCGXgIBoAwMClBTQy0w
+MDAwMDKgGwYFYIZeAgKgEgwQUklGLUotMzE2MzUzNzMtNzB2BgNVHR8EbzBtMEag
+RKBChkBodHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9sY3IvQ0VSVElGSUNBRE8t
+UkFJWi1TSEEzODRDUkxERVIuY3JsMCOgIaAfhh1sZGFwOi8vYWNyYWl6LnN1c2Nl
+cnRlLmdvYi52ZTA3BggrBgEFBQcBAQQrMCkwJwYIKwYBBQUHMAGGG2h0dHA6Ly9v
+Y3NwLnN1c2NlcnRlLmdvYi52ZTBBBgNVHSAEOjA4MDYGBmCGXgMBAjAsMCoGCCsG
+AQUFBwIBFh5odHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9kcGMwDQYJKoZIhvcN
+AQELBQADggIBACtZ6yKZu4SqT96QxtGGcSOeSwORR3C7wJJg7ODU523G0+1ng3dS
+1fLld6c2suNUvtm7CpsR72H0xpkzmfWvADmNg7+mvTV+LFwxNG9s2/NkAZiqlCxB
+3RWGymspThbASfzXg0gTB1GEMVKIu4YXx2sviiCtxQuPcD4quxtxj7mkoP3Yldmv
+Wb8lK5jpY5MvYB7Eqvh39YtsL+1+LrVPQA3uvFd359m21D+VJzog1eWuq2w1n8Gh
+HVnchIHuTQfiSLaeS5UtQbHh6N5+LwUeaO6/u5BlOsju6rEYNxxik6SgMexxbJHm
+pHmJWhSnFFAFTKQAVzAswbVhltw+HoSvOULP5dAssSS830DD7X9jSr3hTxJkhpXz
+sOfIt+FTvZLm8wyWuevo5pLtp4EJFAv8lXrPj9Y0TzYS3F7RNHXGRoAvlQSMx4bE
+qCaJqD8Zm4G7UaRKhqsLEQ+xrmNTbSjq3TNWOByyrYDT13K9mmyZY+gAu0F2Bbdb
+mRiKw7gSXFbPVgx96OLP7bx0R/vu0xdOIk9W/1DzLuY5poLWccret9W6aAjtmcz9
+opLLabid+Qqkpj5PkygqYWwHJgD/ll9ohri4zspV4KuxPX+Y1zMOWj3YeMLEYC/H
+YvBhkdI4sPaeVdtAgAUSM84dkpvRabP/v/GSCmE1P93+hvS84Bpxs2Km
+-----END CERTIFICATE-----
+
+# Issuer: CN=China Internet Network Information Center EV Certificates Root O=China Internet Network Information Center
+# Subject: CN=China Internet Network Information Center EV Certificates Root O=China Internet Network Information Center
+# Label: "China Internet Network Information Center EV Certificates Root"
+# Serial: 1218379777
+# MD5 Fingerprint: 55:5d:63:00:97:bd:6a:97:f5:67:ab:4b:fb:6e:63:15
+# SHA1 Fingerprint: 4f:99:aa:93:fb:2b:d1:37:26:a1:99:4a:ce:7f:f0:05:f2:93:5d:1e
+# SHA256 Fingerprint: 1c:01:c6:f4:db:b2:fe:fc:22:55:8b:2b:ca:32:56:3f:49:84:4a:cf:c3:2b:7b:e4:b0:ff:59:9f:9e:8c:7a:f7
+-----BEGIN CERTIFICATE-----
+MIID9zCCAt+gAwIBAgIESJ8AATANBgkqhkiG9w0BAQUFADCBijELMAkGA1UEBhMC
+Q04xMjAwBgNVBAoMKUNoaW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24g
+Q2VudGVyMUcwRQYDVQQDDD5DaGluYSBJbnRlcm5ldCBOZXR3b3JrIEluZm9ybWF0
+aW9uIENlbnRlciBFViBDZXJ0aWZpY2F0ZXMgUm9vdDAeFw0xMDA4MzEwNzExMjVa
+Fw0zMDA4MzEwNzExMjVaMIGKMQswCQYDVQQGEwJDTjEyMDAGA1UECgwpQ2hpbmEg
+SW50ZXJuZXQgTmV0d29yayBJbmZvcm1hdGlvbiBDZW50ZXIxRzBFBgNVBAMMPkNo
+aW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24gQ2VudGVyIEVWIENlcnRp
+ZmljYXRlcyBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAm35z
+7r07eKpkQ0H1UN+U8i6yjUqORlTSIRLIOTJCBumD1Z9S7eVnAztUwYyZmczpwA//
+DdmEEbK40ctb3B75aDFk4Zv6dOtouSCV98YPjUesWgbdYavi7NifFy2cyjw1l1Vx
+zUOFsUcW9SxTgHbP0wBkvUCZ3czY28Sf1hNfQYOL+Q2HklY0bBoQCxfVWhyXWIQ8
+hBouXJE0bhlffxdpxWXvayHG1VA6v2G5BY3vbzQ6sm8UY78WO5upKv23KzhmBsUs
+4qpnHkWnjQRmQvaPK++IIGmPMowUc9orhpFjIpryp9vOiYurXccUwVswah+xt54u
+gQEC7c+WXmPbqOY4twIDAQABo2MwYTAfBgNVHSMEGDAWgBR8cks5x8DbYqVPm6oY
+NJKiyoOCWTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4E
+FgQUfHJLOcfA22KlT5uqGDSSosqDglkwDQYJKoZIhvcNAQEFBQADggEBACrDx0M3
+j92tpLIM7twUbY8opJhJywyA6vPtI2Z1fcXTIWd50XPFtQO3WKwMVC/GVhMPMdoG
+52U7HW8228gd+f2ABsqjPWYWqJ1MFn3AlUa1UeTiH9fqBk1jjZaM7+czV0I664zB
+echNdn3e9rG3geCg+aF4RhcaVpjwTj2rHO3sOdwHSPdj/gauwqRcalsyiMXHM4Ws
+ZkJHwlgkmeHlPuV1LI5D1l08eB6olYIpUNHRFrrvwb562bTYzB5MRuF3sTGrvSrI
+zo9uoV1/A3U05K2JRVRevq4opbs/eHnrc7MKDf2+yfdWrPa37S+bISnHOLaVxATy
+wy39FCqQmbkHzJ8=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Swisscom Root CA 2 O=Swisscom OU=Digital Certificate Services
+# Subject: CN=Swisscom Root CA 2 O=Swisscom OU=Digital Certificate Services
+# Label: "Swisscom Root CA 2"
+# Serial: 40698052477090394928831521023204026294
+# MD5 Fingerprint: 5b:04:69:ec:a5:83:94:63:18:a7:86:d0:e4:f2:6e:19
+# SHA1 Fingerprint: 77:47:4f:c6:30:e4:0f:4c:47:64:3f:84:ba:b8:c6:95:4a:8a:41:ec
+# SHA256 Fingerprint: f0:9b:12:2c:71:14:f4:a0:9b:d4:ea:4f:4a:99:d5:58:b4:6e:4c:25:cd:81:14:0d:29:c0:56:13:91:4c:38:41
+-----BEGIN CERTIFICATE-----
+MIIF2TCCA8GgAwIBAgIQHp4o6Ejy5e/DfEoeWhhntjANBgkqhkiG9w0BAQsFADBk
+MQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0
+YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3Qg
+Q0EgMjAeFw0xMTA2MjQwODM4MTRaFw0zMTA2MjUwNzM4MTRaMGQxCzAJBgNVBAYT
+AmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGlnaXRhbCBDZXJ0aWZp
+Y2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAyMIICIjAN
+BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAlUJOhJ1R5tMJ6HJaI2nbeHCOFvEr
+jw0DzpPMLgAIe6szjPTpQOYXTKueuEcUMncy3SgM3hhLX3af+Dk7/E6J2HzFZ++r
+0rk0X2s682Q2zsKwzxNoysjL67XiPS4h3+os1OD5cJZM/2pYmLcX5BtS5X4HAB1f
+2uY+lQS3aYg5oUFgJWFLlTloYhyxCwWJwDaCFCE/rtuh/bxvHGCGtlOUSbkrRsVP
+ACu/obvLP+DHVxxX6NZp+MEkUp2IVd3Chy50I9AU/SpHWrumnf2U5NGKpV+GY3aF
+y6//SSj8gO1MedK75MDvAe5QQQg1I3ArqRa0jG6F6bYRzzHdUyYb3y1aSgJA/MTA
+tukxGggo5WDDH8SQjhBiYEQN7Aq+VRhxLKX0srwVYv8c474d2h5Xszx+zYIdkeNL
+6yxSNLCK/RJOlrDrcH+eOfdmQrGrrFLadkBXeyq96G4DsguAhYidDMfCd7Camlf0
+uPoTXGiTOmekl9AbmbeGMktg2M7v0Ax/lZ9vh0+Hio5fCHyqW/xavqGRn1V9TrAL
+acywlKinh/LTSlDcX3KwFnUey7QYYpqwpzmqm59m2I2mbJYV4+by+PGDYmy7Velh
+k6M99bFXi08jsJvllGov34zflVEpYKELKeRcVVi3qPyZ7iVNTA6z00yPhOgpD/0Q
+VAKFyPnlw4vP5w8CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0hBBYw
+FDASBgdghXQBUwIBBgdghXQBUwIBMBIGA1UdEwEB/wQIMAYBAf8CAQcwHQYDVR0O
+BBYEFE0mICKJS9PVpAqhb97iEoHF8TwuMB8GA1UdIwQYMBaAFE0mICKJS9PVpAqh
+b97iEoHF8TwuMA0GCSqGSIb3DQEBCwUAA4ICAQAyCrKkG8t9voJXiblqf/P0wS4R
+fbgZPnm3qKhyN2abGu2sEzsOv2LwnN+ee6FTSA5BesogpxcbtnjsQJHzQq0Qw1zv
+/2BZf82Fo4s9SBwlAjxnffUy6S8w5X2lejjQ82YqZh6NM4OKb3xuqFp1mrjX2lhI
+REeoTPpMSQpKwhI3qEAMw8jh0FcNlzKVxzqfl9NX+Ave5XLzo9v/tdhZsnPdTSpx
+srpJ9csc1fV5yJmz/MFMdOO0vSk3FQQoHt5FRnDsr7p4DooqzgB53MBfGWcsa0vv
+aGgLQ+OswWIJ76bdZWGgr4RVSJFSHMYlkSrQwSIjYVmvRRGFHQEkNI/Ps/8XciAT
+woCqISxxOQ7Qj1zB09GOInJGTB2Wrk9xseEFKZZZ9LuedT3PDTcNYtsmjGOpI99n
+Bjx8Oto0QuFmtEYE3saWmA9LSHokMnWRn6z3aOkquVVlzl1h0ydw2Df+n7mvoC5W
+t6NlUe07qxS/TFED6F+KBZvuim6c779o+sjaC+NCydAXFJy3SuCvkychVSa1ZC+N
+8f+mQAWFBVzKBxlcCxMoTFh/wqXvRdpg065lYZ1Tg3TCrvJcwhbtkj6EPnNgiLx2
+9CzP0H1907he0ZESEOnN3col49XtmS++dYFLJPlFRpTJKSFTnCZFqhMX5OfNeOI5
+wSsSnqaeG8XmDtkx2Q==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Swisscom Root EV CA 2 O=Swisscom OU=Digital Certificate Services
+# Subject: CN=Swisscom Root EV CA 2 O=Swisscom OU=Digital Certificate Services
+# Label: "Swisscom Root EV CA 2"
+# Serial: 322973295377129385374608406479535262296
+# MD5 Fingerprint: 7b:30:34:9f:dd:0a:4b:6b:35:ca:31:51:28:5d:ae:ec
+# SHA1 Fingerprint: e7:a1:90:29:d3:d5:52:dc:0d:0f:c6:92:d3:ea:88:0d:15:2e:1a:6b
+# SHA256 Fingerprint: d9:5f:ea:3c:a4:ee:dc:e7:4c:d7:6e:75:fc:6d:1f:f6:2c:44:1f:0f:a8:bc:77:f0:34:b1:9e:5d:b2:58:01:5d
+-----BEGIN CERTIFICATE-----
+MIIF4DCCA8igAwIBAgIRAPL6ZOJ0Y9ON/RAdBB92ylgwDQYJKoZIhvcNAQELBQAw
+ZzELMAkGA1UEBhMCY2gxETAPBgNVBAoTCFN3aXNzY29tMSUwIwYDVQQLExxEaWdp
+dGFsIENlcnRpZmljYXRlIFNlcnZpY2VzMR4wHAYDVQQDExVTd2lzc2NvbSBSb290
+IEVWIENBIDIwHhcNMTEwNjI0MDk0NTA4WhcNMzEwNjI1MDg0NTA4WjBnMQswCQYD
+VQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2Vy
+dGlmaWNhdGUgU2VydmljZXMxHjAcBgNVBAMTFVN3aXNzY29tIFJvb3QgRVYgQ0Eg
+MjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMT3HS9X6lds93BdY7Bx
+UglgRCgzo3pOCvrY6myLURYaVa5UJsTMRQdBTxB5f3HSek4/OE6zAMaVylvNwSqD
+1ycfMQ4jFrclyxy0uYAyXhqdk/HoPGAsp15XGVhRXrwsVgu42O+LgrQ8uMIkqBPH
+oCE2G3pXKSinLr9xJZDzRINpUKTk4RtiGZQJo/PDvO/0vezbE53PnUgJUmfANykR
+HvvSEaeFGHR55E+FFOtSN+KxRdjMDUN/rhPSays/p8LiqG12W0OfvrSdsyaGOx9/
+5fLoZigWJdBLlzin5M8J0TbDC77aO0RYjb7xnglrPvMyxyuHxuxenPaHZa0zKcQv
+idm5y8kDnftslFGXEBuGCxobP/YCfnvUxVFkKJ3106yDgYjTdLRZncHrYTNaRdHL
+OdAGalNgHa/2+2m8atwBz735j9m9W8E6X47aD0upm50qKGsaCnw8qyIL5XctcfaC
+NYGu+HuB5ur+rPQam3Rc6I8k9l2dRsQs0h4rIWqDJ2dVSqTjyDKXZpBy2uPUZC5f
+46Fq9mDU5zXNysRojddxyNMkM3OxbPlq4SjbX8Y96L5V5jcb7STZDxmPX2MYWFCB
+UWVv8p9+agTnNCRxunZLWB4ZvRVgRaoMEkABnRDixzgHcgplwLa7JSnaFp6LNYth
+7eVxV4O1PHGf40+/fh6Bn0GXAgMBAAGjgYYwgYMwDgYDVR0PAQH/BAQDAgGGMB0G
+A1UdIQQWMBQwEgYHYIV0AVMCAgYHYIV0AVMCAjASBgNVHRMBAf8ECDAGAQH/AgED
+MB0GA1UdDgQWBBRF2aWBbj2ITY1x0kbBbkUe88SAnTAfBgNVHSMEGDAWgBRF2aWB
+bj2ITY1x0kbBbkUe88SAnTANBgkqhkiG9w0BAQsFAAOCAgEAlDpzBp9SSzBc1P6x
+XCX5145v9Ydkn+0UjrgEjihLj6p7jjm02Vj2e6E1CqGdivdj5eu9OYLU43otb98T
+PLr+flaYC/NUn81ETm484T4VvwYmneTwkLbUwp4wLh/vx3rEUMfqe9pQy3omywC0
+Wqu1kx+AiYQElY2NfwmTv9SoqORjbdlk5LgpWgi/UOGED1V7XwgiG/W9mR4U9s70
+WBCCswo9GcG/W6uqmdjyMb3lOGbcWAXH7WMaLgqXfIeTK7KK4/HsGOV1timH59yL
+Gn602MnTihdsfSlEvoqq9X46Lmgxk7lq2prg2+kupYTNHAq4Sgj5nPFhJpiTt3tm
+7JFe3VE/23MPrQRYCd0EApUKPtN236YQHoA96M2kZNEzx5LH4k5E4wnJTsJdhw4S
+nr8PyQUQ3nqjsTzyP6WqJ3mtMX0f/fwZacXduT98zca0wjAefm6S139hdlqP65VN
+vBFuIXxZN5nQBrz5Bm0yFqXZaajh3DyAHmBR3NdUIR7KYndP+tiPsys6DXhyyWhB
+WkdKwqPrGtcKqzwyVcgKEZzfdNbwQBUdyLmPtTbFr/giuMod89a2GQ+fYWVq6nTI
+fI/DT11lgh/ZDYnadXL77/FHZxOzyNEZiCcmmpl5fx7kLD977vHeTYuWl8PVP3wb
+I+2ksx0WckNLIOFZfsLorSa/ovc=
+-----END CERTIFICATE-----
+
+# Issuer: CN=CA Disig Root R1 O=Disig a.s.
+# Subject: CN=CA Disig Root R1 O=Disig a.s.
+# Label: "CA Disig Root R1"
+# Serial: 14052245610670616104
+# MD5 Fingerprint: be:ec:11:93:9a:f5:69:21:bc:d7:c1:c0:67:89:cc:2a
+# SHA1 Fingerprint: 8e:1c:74:f8:a6:20:b9:e5:8a:f4:61:fa:ec:2b:47:56:51:1a:52:c6
+# SHA256 Fingerprint: f9:6f:23:f4:c3:e7:9c:07:7a:46:98:8d:5a:f5:90:06:76:a0:f0:39:cb:64:5d:d1:75:49:b2:16:c8:24:40:ce
+-----BEGIN CERTIFICATE-----
+MIIFaTCCA1GgAwIBAgIJAMMDmu5QkG4oMA0GCSqGSIb3DQEBBQUAMFIxCzAJBgNV
+BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu
+MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIxMB4XDTEyMDcxOTA5MDY1NloXDTQy
+MDcxOTA5MDY1NlowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx
+EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjEw
+ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqw3j33Jijp1pedxiy3QRk
+D2P9m5YJgNXoqqXinCaUOuiZc4yd39ffg/N4T0Dhf9Kn0uXKE5Pn7cZ3Xza1lK/o
+OI7bm+V8u8yN63Vz4STN5qctGS7Y1oprFOsIYgrY3LMATcMjfF9DCCMyEtztDK3A
+fQ+lekLZWnDZv6fXARz2m6uOt0qGeKAeVjGu74IKgEH3G8muqzIm1Cxr7X1r5OJe
+IgpFy4QxTaz+29FHuvlglzmxZcfe+5nkCiKxLU3lSCZpq+Kq8/v8kiky6bM+TR8n
+oc2OuRf7JT7JbvN32g0S9l3HuzYQ1VTW8+DiR0jm3hTaYVKvJrT1cU/J19IG32PK
+/yHoWQbgCNWEFVP3Q+V8xaCJmGtzxmjOZd69fwX3se72V6FglcXM6pM6vpmumwKj
+rckWtc7dXpl4fho5frLABaTAgqWjR56M6ly2vGfb5ipN0gTco65F97yLnByn1tUD
+3AjLLhbKXEAz6GfDLuemROoRRRw1ZS0eRWEkG4IupZ0zXWX4Qfkuy5Q/H6MMMSRE
+7cderVC6xkGbrPAXZcD4XW9boAo0PO7X6oifmPmvTiT6l7Jkdtqr9O3jw2Dv1fkC
+yC2fg69naQanMVXVz0tv/wQFx1isXxYb5dKj6zHbHzMVTdDypVP1y+E9Tmgt2BLd
+qvLmTZtJ5cUoobqwWsagtQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud
+DwEB/wQEAwIBBjAdBgNVHQ4EFgQUiQq0OJMa5qvum5EY+fU8PjXQ04IwDQYJKoZI
+hvcNAQEFBQADggIBADKL9p1Kyb4U5YysOMo6CdQbzoaz3evUuii+Eq5FLAR0rBNR
+xVgYZk2C2tXck8An4b58n1KeElb21Zyp9HWc+jcSjxyT7Ff+Bw+r1RL3D65hXlaA
+SfX8MPWbTx9BLxyE04nH4toCdu0Jz2zBuByDHBb6lM19oMgY0sidbvW9adRtPTXo
+HqJPYNcHKfyyo6SdbhWSVhlMCrDpfNIZTUJG7L399ldb3Zh+pE3McgODWF3vkzpB
+emOqfDqo9ayk0d2iLbYq/J8BjuIQscTK5GfbVSUZP/3oNn6z4eGBrxEWi1CXYBmC
+AMBrTXO40RMHPuq2MU/wQppt4hF05ZSsjYSVPCGvxdpHyN85YmLLW1AL14FABZyb
+7bq2ix4Eb5YgOe2kfSnbSM6C3NQCjR0EMVrHS/BsYVLXtFHCgWzN4funodKSds+x
+DzdYpPJScWc/DIh4gInByLUfkmO+p3qKViwaqKactV2zY9ATIKHrkWzQjX2v3wvk
+F7mGnjixlAxYjOBVqjtjbZqJYLhkKpLGN/R+Q0O3c+gB53+XD9fyexn9GtePyfqF
+a3qdnom2piiZk4hA9z7NUaPK6u95RyG1/jLix8NRb76AdPCkwzryT+lf3xkK8jsT
+Q6wxpLPn6/wY1gGp8yqPNg7rtLG8t0zJa7+h89n07eLw4+1knj0vllJPgFOL
+-----END CERTIFICATE-----
+
+# Issuer: CN=CA Disig Root R2 O=Disig a.s.
+# Subject: CN=CA Disig Root R2 O=Disig a.s.
+# Label: "CA Disig Root R2"
+# Serial: 10572350602393338211
+# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03
+# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71
+# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03
+-----BEGIN CERTIFICATE-----
+MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV
+BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu
+MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy
+MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx
+EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw
+ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe
+NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH
+PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I
+x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe
+QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR
+yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO
+QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912
+H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ
+QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD
+i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs
+nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1
+rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud
+DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI
+hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM
+tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf
+GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb
+lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka
++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal
+TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i
+nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3
+gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr
+G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os
+zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x
+L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL
+-----END CERTIFICATE-----
+
+# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV
+# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV
+# Label: "ACCVRAIZ1"
+# Serial: 6828503384748696800
+# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02
+# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17
+# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13
+-----BEGIN CERTIFICATE-----
+MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE
+AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw
+CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ
+BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND
+VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb
+qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY
+HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo
+G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA
+lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr
+IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/
+0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH
+k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47
+4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO
+m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa
+cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl
+uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI
+KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls
+ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG
+AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2
+VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT
+VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG
+CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA
+cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA
+QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA
+7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA
+cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA
+QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA
+czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu
+aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt
+aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud
+DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF
+BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp
+D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU
+JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m
+AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD
+vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms
+tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH
+7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h
+I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA
+h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF
+d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H
+pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7
+-----END CERTIFICATE-----
+
+# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA
+# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA
+# Label: "TWCA Global Root CA"
+# Serial: 3262
+# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96
+# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65
+# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b
+-----BEGIN CERTIFICATE-----
+MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx
+EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT
+VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5
+NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT
+B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF
+10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz
+0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh
+MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH
+zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc
+46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2
+yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi
+laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP
+oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA
+BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE
+qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm
+4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL
+1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn
+LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF
+H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo
+RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+
+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh
+15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW
+6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW
+nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j
+wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz
+aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy
+KwbQBM0=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera
+# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera
+# Label: "TeliaSonera Root CA v1"
+# Serial: 199041966741090107964904287217786801558
+# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c
+# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37
+# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89
+-----BEGIN CERTIFICATE-----
+MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw
+NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv
+b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD
+VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F
+VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1
+7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X
+Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+
+/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs
+81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm
+dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe
+Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu
+sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4
+pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs
+slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ
+arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD
+VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG
+9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl
+dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx
+0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj
+TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed
+Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7
+Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI
+OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7
+vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW
+t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn
+HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx
+SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=E-Tugra Certification Authority O=E-Tuğra EBG Bilişim Teknolojileri ve Hizmetleri A.Ş. OU=E-Tugra Sertifikasyon Merkezi
+# Subject: CN=E-Tugra Certification Authority O=E-Tuğra EBG Bilişim Teknolojileri ve Hizmetleri A.Ş. OU=E-Tugra Sertifikasyon Merkezi
+# Label: "E-Tugra Certification Authority"
+# Serial: 7667447206703254355
+# MD5 Fingerprint: b8:a1:03:63:b0:bd:21:71:70:8a:6f:13:3a:bb:79:49
+# SHA1 Fingerprint: 51:c6:e7:08:49:06:6e:f3:92:d4:5c:a0:0d:6d:a3:62:8f:c3:52:39
+# SHA256 Fingerprint: b0:bf:d5:2b:b0:d7:d9:bd:92:bf:5d:4d:c1:3d:a2:55:c0:2c:54:2f:37:83:65:ea:89:39:11:f5:5e:55:f2:3c
+-----BEGIN CERTIFICATE-----
+MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV
+BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC
+aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV
+BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1
+Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz
+MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+
+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp
+em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN
+ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY
+B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH
+D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF
+Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo
+q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D
+k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH
+fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut
+dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM
+ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8
+zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn
+rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX
+U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6
+Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5
+XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF
+Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR
+HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY
+GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c
+77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3
++GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK
+vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6
+FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl
+yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P
+AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD
+y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d
+NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Label: "T-TeleSec GlobalRoot Class 2"
+# Serial: 1
+# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a
+# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9
+# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52
+-----BEGIN CERTIFICATE-----
+MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
+KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
+BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
+YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1
+OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
+aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
+ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd
+AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC
+FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi
+1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq
+jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ
+wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/
+WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy
+NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC
+uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw
+IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6
+g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN
+9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP
+BSeOE6Fuwg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Atos TrustedRoot 2011 O=Atos
+# Subject: CN=Atos TrustedRoot 2011 O=Atos
+# Label: "Atos TrustedRoot 2011"
+# Serial: 6643877497813316402
+# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56
+# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21
+# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE
+AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG
+EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM
+FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC
+REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp
+Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM
+VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+
+SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ
+4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L
+cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi
+eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV
+HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG
+A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3
+DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j
+vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP
+DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc
+maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D
+lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv
+KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 1 G3"
+# Serial: 687049649626669250736271037606554624078720034195
+# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab
+# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67
+# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00
+MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV
+wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe
+rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341
+68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh
+4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp
+UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o
+abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc
+3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G
+KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt
+hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO
+Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt
+zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD
+ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC
+MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2
+cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN
+qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5
+YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv
+b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2
+8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k
+NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj
+ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp
+q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt
+nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 2 G3"
+# Serial: 390156079458959257446133169266079962026824725800
+# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06
+# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36
+# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00
+MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf
+qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW
+n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym
+c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+
+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1
+o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j
+IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq
+IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz
+8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh
+vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l
+7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG
+cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD
+ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66
+AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC
+roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga
+W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n
+lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE
++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV
+csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd
+dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg
+KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM
+HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4
+WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 3 G3"
+# Serial: 268090761170461462463995952157327242137089239581
+# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7
+# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d
+# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00
+MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR
+/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu
+FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR
+U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c
+ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR
+FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k
+A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw
+eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl
+sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp
+VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q
+A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+
+ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD
+ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px
+KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI
+FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv
+oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg
+u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP
+0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf
+3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl
+8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+
+DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN
+PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/
+ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root G2"
+# Serial: 15385348160840213938643033620894905419
+# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d
+# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f
+# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85
+-----BEGIN CERTIFICATE-----
+MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
+b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
+cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA
+n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc
+biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp
+EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA
+bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu
+YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB
+AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW
+BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI
+QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I
+0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni
+lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9
+B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv
+ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo
IhNzbM8m9Yop5w==
-----END CERTIFICATE-----
-DigiCert Assured ID Root G3
-===========================
------BEGIN CERTIFICATE-----
-MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQswCQYDVQQGEwJV
-UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQwIgYD
-VQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1
-MTIwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
-d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQ
-BgcqhkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJfZn4f5dwb
-RXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17QRSAPWXYQ1qAk8C3eNvJs
-KTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgF
-UaFNN6KDec6NHSrkhDAKBggqhkjOPQQDAwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5Fy
-YZ5eEJJZVrmDxxDnOOlYJjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy
-1vUhZscv6pZjamVFkpUBtA==
------END CERTIFICATE-----
-
-DigiCert Global Root G2
-=======================
------BEGIN CERTIFICATE-----
-MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBhMQswCQYDVQQG
-EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw
-HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUx
-MjAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3
-dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkq
-hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI2/Ou8jqJ
-kTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx1x7e/dfgy5SDN67sH0NO
-3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQq2EGnI/yuum06ZIya7XzV+hdG82MHauV
-BJVJ8zUtluNJbd134/tJS7SsVQepj5WztCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyM
-UNGPHgm+F6HmIcr9g+UQvIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQAB
-o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV5uNu
-5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY1Yl9PMWLSn/pvtsr
-F9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4NeF22d+mQrvHRAiGfzZ0JFrabA0U
-WTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NGFdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBH
-QRFXGU7Aj64GxJUTFy8bJZ918rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/
-iyK5S9kJRaTepLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl
+# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root G3"
+# Serial: 15459312981008553731928384953135426796
+# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb
+# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89
+# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2
+-----BEGIN CERTIFICATE-----
+MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw
+CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
+ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg
+RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV
+UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
+Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq
+hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf
+Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q
+RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD
+AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY
+JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv
+6pZjamVFkpUBtA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root G2"
+# Serial: 4293743540046975378534879503202253541
+# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44
+# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4
+# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f
+-----BEGIN CERTIFICATE-----
+MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH
+MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI
+2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx
+1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ
+q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz
+tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ
+vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP
+BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV
+5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY
+1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4
+NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG
+Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91
+8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe
+pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl
MrY=
-----END CERTIFICATE-----
-DigiCert Global Root G3
-=======================
------BEGIN CERTIFICATE-----
-MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQswCQYDVQQGEwJV
-UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAwHgYD
-VQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAw
-MDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5k
-aWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0C
-AQYFK4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FGfp4tn+6O
-YwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPOZ9wj/wMco+I+o0IwQDAP
-BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNp
-Yim8S8YwCgYIKoZIzj0EAwMDaAAwZQIxAK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y
-3maTD/HMsQmP3Wyr+mt/oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34
-VOKa5Vt8sycX
------END CERTIFICATE-----
-
-DigiCert Trusted Root G4
-========================
------BEGIN CERTIFICATE-----
-MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBiMQswCQYDVQQG
-EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSEw
-HwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1
-MTIwMDAwWjBiMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
-d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0G
-CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3yithZwuEp
-pz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1Ifxp4VpX6+n6lXFllVcq9o
-k3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDVySAdYyktzuxeTsiT+CFhmzTrBcZe7Fsa
-vOvJz82sNEBfsXpm7nfISKhmV1efVFiODCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGY
-QJB5w3jHtrHEtWoYOAMQjdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6
-MUSaM0C/CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCiEhtm
-mnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADMfRyVw4/3IbKyEbe7
-f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QYuKZ3AeEPlAwhHbJUKSWJbOUOUlFH
-dL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXKchYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8
-oR7FwI+isX4KJpn15GkvmB0t9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud
-DwEB/wQEAwIBhjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD
-ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2SV1EY+CtnJYY
-ZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd+SeuMIW59mdNOj6PWTkiU0Tr
-yF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWcfFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy
-7zBZLq7gcfJW5GqXb5JQbZaNaHqasjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iah
-ixTXTBmyUEFxPT9NcCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN
-5r5N0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie4u1Ki7wb
-/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mIr/OSmbaz5mEP0oUA51Aa
-5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tK
-G48BtieVU+i2iW1bvGjUI+iLUaJW+fCmgKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP
-82Z+
------END CERTIFICATE-----
-
-COMODO RSA Certification Authority
-==================================
------BEGIN CERTIFICATE-----
-MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCBhTELMAkGA1UE
-BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG
-A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlv
-biBBdXRob3JpdHkwHhcNMTAwMTE5MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMC
-R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE
-ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBB
-dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR6FSS0gpWsawNJN3Fz0Rn
-dJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8Xpz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZ
-FGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+
-5eNu/Nio5JIk2kNrYrhV/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pG
-x8cgoLEfZd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z+pUX
-2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7wqP/0uK3pN/u6uPQL
-OvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZahSL0896+1DSJMwBGB7FY79tOi4lu3
-sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVICu9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+C
-GCe01a60y1Dma/RMhnEw6abfFobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5
-WdYgGq/yapiqcrxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E
-FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w
-DQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvlwFTPoCWOAvn9sKIN9SCYPBMt
-rFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+
-nq6PK7o9mfjYcwlYRm6mnPTXJ9OV2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSg
-tZx8jb8uk2IntznaFxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwW
-sRqZCuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiKboHGhfKp
-pC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmckejkk9u+UJueBPSZI9FoJA
-zMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yLS0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHq
-ZJx64SIDqZxubw5lT2yHh17zbqD5daWbQOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk52
-7RH89elWsn2/x20Kk4yl0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7I
-LaZRfyHBNVOFBkpdn627G190
------END CERTIFICATE-----
-
-USERTrust RSA Certification Authority
-=====================================
------BEGIN CERTIFICATE-----
-MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCBiDELMAkGA1UE
-BhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQK
-ExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNh
-dGlvbiBBdXRob3JpdHkwHhcNMTAwMjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UE
-BhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQK
-ExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNh
-dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCAEmUXNg7D2wiz
-0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2j
-Y0K2dvKpOyuR+OJv0OwWIJAJPuLodMkYtJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFn
-RghRy4YUVD+8M/5+bJz/Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O
-+T23LLb2VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT79uq
-/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6c0Plfg6lZrEpfDKE
-Y1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmTYo61Zs8liM2EuLE/pDkP2QKe6xJM
-lXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97lc6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8
-yexDJtC/QV9AqURE9JnnV4eeUB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+
-eLf8ZxXhyVeEHg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd
-BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF
-MAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPFUp/L+M+ZBn8b2kMVn54CVVeW
-FPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KOVWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ
-7l8wXEskEVX/JJpuXior7gtNn3/3ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQ
-Eg9zKC7F4iRO/Fjs8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM
-8WcRiQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYzeSf7dNXGi
-FSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZXHlKYC6SQK5MNyosycdi
-yA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9c
-J2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRBVXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGw
-sAvgnEzDHNb842m1R0aBL6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gx
-Q+6IHdfGjjxDah2nGN59PRbxYvnKkKj9
------END CERTIFICATE-----
-
-USERTrust ECC Certification Authority
-=====================================
------BEGIN CERTIFICATE-----
-MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDELMAkGA1UEBhMC
-VVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU
-aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlv
-biBBdXRob3JpdHkwHhcNMTAwMjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMC
-VVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU
-aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlv
-biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqfloI+d61SRvU8Za2EurxtW2
-0eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinngo4N+LZfQYcTxmdwlkWOrfzCjtHDix6Ez
-nPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0GA1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNV
-HQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBB
-HU6+4WMBzzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbWRNZu
-9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg=
------END CERTIFICATE-----
-
-GlobalSign ECC Root CA - R4
-===========================
------BEGIN CERTIFICATE-----
-MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEkMCIGA1UECxMb
-R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD
-EwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoXDTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMb
-R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD
-EwpHbG9iYWxTaWduMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprl
-OQcJFspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAwDgYDVR0P
-AQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61FuOJAf/sKbvu+M8k8o4TV
-MAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGXkPoUVy0D7O48027KqGx2vKLeuwIgJ6iF
-JzWbVsaj8kfSt24bAgAXqmemFZHe+pTsewv4n4Q=
------END CERTIFICATE-----
-
-GlobalSign ECC Root CA - R5
-===========================
------BEGIN CERTIFICATE-----
-MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEkMCIGA1UECxMb
-R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD
-EwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoXDTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMb
-R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD
-EwpHbG9iYWxTaWduMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6
-SFkc8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8kehOvRnkmS
-h5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAd
-BgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYIKoZIzj0EAwMDaAAwZQIxAOVpEslu28Yx
-uglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7
-yFz9SO8NdCKoCOJuxUnOxwy8p2Fp8fc74SrL+SvzZpA3
------END CERTIFICATE-----
-
-Staat der Nederlanden Root CA - G3
-==================================
------BEGIN CERTIFICATE-----
-MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE
-CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g
-Um9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloXDTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMC
-TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l
-ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4y
-olQPcPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WWIkYFsO2t
-x1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqXxz8ecAgwoNzFs21v0IJy
-EavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFyKJLZWyNtZrVtB0LrpjPOktvA9mxjeM3K
-Tj215VKb8b475lRgsGYeCasH/lSJEULR9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUur
-mkVLoR9BvUhTFXFkC4az5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU5
-1nus6+N86U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7Ngzp
-07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHPbMk7ccHViLVlvMDo
-FxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXtBznaqB16nzaeErAMZRKQFWDZJkBE
-41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTtXUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMB
-AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleu
-yjWcLhL75LpdINyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD
-U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwpLiniyMMB8jPq
-KqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8Ipf3YF3qKS9Ysr1YvY2WTxB1
-v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixpgZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA
-8KCWAg8zxXHzniN9lLf9OtMJgwYh/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b
-8KKaa8MFSu1BYBQw0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0r
-mj1AfsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq4BZ+Extq
-1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR1VmiiXTTn74eS9fGbbeI
-JG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/QFH1T/U67cjF68IeHRaVesd+QnGTbksV
-tzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM94B7IWcnMFk=
------END CERTIFICATE-----
-
-Staat der Nederlanden EV Root CA
-================================
------BEGIN CERTIFICATE-----
-MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJOTDEeMBwGA1UE
-CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFhdCBkZXIgTmVkZXJsYW5kZW4g
-RVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0yMjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5M
-MR4wHAYDVQQKDBVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRl
-cmxhbmRlbiBFViBSb290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkk
-SzrSM4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nCUiY4iKTW
-O0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3dZ//BYY1jTw+bbRcwJu+r
-0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46prfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8
-Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13lpJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gV
-XJrm0w912fxBmJc+qiXbj5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr
-08C+eKxCKFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS/ZbV
-0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0XcgOPvZuM5l5Tnrmd
-74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH1vI4gnPah1vlPNOePqc7nvQDs/nx
-fRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrPpx9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNC
-MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwa
-ivsnuL8wbqg7MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI
-eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u2dfOWBfoqSmu
-c0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHSv4ilf0X8rLiltTMMgsT7B/Zq
-5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTCwPTxGfARKbalGAKb12NMcIxHowNDXLldRqAN
-b/9Zjr7dn3LDWyvfjFvO5QxGbJKyCqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tN
-f1zuacpzEPuKqf2evTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi
-5Dp6Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIaGl6I6lD4
-WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeLeG9QgkRQP2YGiqtDhFZK
-DyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGy
-eUN51q1veieQA6TqJIc/2b3Z6fJfUEkc7uzXLg==
------END CERTIFICATE-----
-
-IdenTrust Commercial Root CA 1
-==============================
------BEGIN CERTIFICATE-----
-MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBKMQswCQYDVQQG
-EwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBS
-b290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQwMTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzES
-MBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENB
-IDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ld
-hNlT3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU+ehcCuz/
-mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gpS0l4PJNgiCL8mdo2yMKi
-1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1bVoE/c40yiTcdCMbXTMTEl3EASX2MN0C
-XZ/g1Ue9tOsbobtJSdifWwLziuQkkORiT0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl
-3ZBWzvurpWCdxJ35UrCLvYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzy
-NeVJSQjKVsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZKdHzV
-WYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHTc+XvvqDtMwt0viAg
-xGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hvl7yTmvmcEpB4eoCHFddydJxVdHix
-uuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5NiGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC
-AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZI
-hvcNAQELBQADggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH
-6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwtLRvM7Kqas6pg
-ghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93nAbowacYXVKV7cndJZ5t+qnt
-ozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3+wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmV
-YjzlVYA211QC//G5Xc7UI2/YRYRKW2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUX
-feu+h1sXIFRRk0pTAwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/ro
-kTLql1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG4iZZRHUe
-2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZmUlO+KWA2yUPHGNiiskz
-Z2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7R
-cGzM7vRX+Bi6hG6H
------END CERTIFICATE-----
-
-IdenTrust Public Sector Root CA 1
-=================================
------BEGIN CERTIFICATE-----
-MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBNMQswCQYDVQQG
-EwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3Rv
-ciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcNMzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJV
-UzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBS
-b290IENBIDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTy
-P4o7ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGyRBb06tD6
-Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlSbdsHyo+1W/CD80/HLaXI
-rcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF/YTLNiCBWS2ab21ISGHKTN9T0a9SvESf
-qy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoS
-mJxZZoY+rfGwyj4GD3vwEUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFn
-ol57plzy9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9VGxyh
-LrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ2fjXctscvG29ZV/v
-iDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsVWaFHVCkugyhfHMKiq3IXAAaOReyL
-4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gDW/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8B
-Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMw
-DQYJKoZIhvcNAQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj
-t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHVDRDtfULAj+7A
-mgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9TaDKQGXSc3z1i9kKlT/YPyNt
-GtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8GlwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFt
-m6/n6J91eEyrRjuazr8FGF1NFTwWmhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMx
-NRF4eKLg6TCMf4DfWN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4
-Mhn5+bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJtshquDDI
-ajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhAGaQdp/lLQzfcaFpPz+vC
-ZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ
-3Wl9af0AVqW3rLatt8o+Ae+c
------END CERTIFICATE-----
-
-Entrust Root Certification Authority - G2
-=========================================
------BEGIN CERTIFICATE-----
-MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMCVVMxFjAUBgNV
-BAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVy
-bXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ug
-b25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIw
-HhcNMDkwNzA3MTcyNTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoT
-DUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMx
-OTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25s
-eTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwggEi
-MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP
-/vaCeb9zYQYKpSfYs1/TRU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXz
-HHfV1IWNcCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hWwcKU
-s/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1U1+cPvQXLOZprE4y
-TGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0jaWvYkxN4FisZDQSA/i2jZRjJKRx
-AgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ6
-0B7vfec7aVHUbI2fkBJmqzANBgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5Z
-iXMRrEPR9RP/jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ
-Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v1fN2D807iDgi
-nWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4RnAuknZoh8/CbCzB428Hch0P+
-vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmHVHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xO
-e4pIb4tF9g==
------END CERTIFICATE-----
-
-Entrust Root Certification Authority - EC1
-==========================================
------BEGIN CERTIFICATE-----
-MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkGA1UEBhMCVVMx
-FjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVn
-YWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXpl
-ZCB1c2Ugb25seTEzMDEGA1UEAxMqRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5
-IC0gRUMxMB4XDTEyMTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYw
-FAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2Fs
-LXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQg
-dXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAt
-IEVDMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHy
-AsWfoPZb1YsGGYZPUxBtByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef
-9eNi1KlHBz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE
-FLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVCR98crlOZF7ZvHH3h
-vxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nXhTcGtXsI/esni0qU+eH6p44mCOh8
-kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G
------END CERTIFICATE-----
-
-CFCA EV ROOT
-============
------BEGIN CERTIFICATE-----
-MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJDTjEwMC4GA1UE
-CgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNB
-IEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkxMjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEw
-MC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQD
-DAxDRkNBIEVWIFJPT1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnV
-BU03sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpLTIpTUnrD
-7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5/ZOkVIBMUtRSqy5J35DN
-uF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp7hZZLDRJGqgG16iI0gNyejLi6mhNbiyW
-ZXvKWfry4t3uMCz7zEasxGPrb382KzRzEpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7
-xzbh72fROdOXW3NiGUgthxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9f
-py25IGvPa931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqotaK8K
-gWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNgTnYGmE69g60dWIol
-hdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfVPKPtl8MeNPo4+QgO48BdK4PRVmrJ
-tqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hvcWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAf
-BgNVHSMEGDAWgBTj/i39KNALtbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB
-/wQEAwIBBjAdBgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB
-ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObTej/tUxPQ4i9q
-ecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdLjOztUmCypAbqTuv0axn96/Ua
-4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBSESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sG
-E5uPhnEFtC+NiWYzKXZUmhH4J/qyP5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfX
-BDrDMlI1Dlb4pd19xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjn
-aH9dCi77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN5mydLIhy
-PDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe/v5WOaHIz16eGWRGENoX
-kbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+ZAAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3C
-ekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su
------END CERTIFICATE-----
-
-TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H5
-====================================================
------BEGIN CERTIFICATE-----
-MIIEJzCCAw+gAwIBAgIHAI4X/iQggTANBgkqhkiG9w0BAQsFADCBsTELMAkGA1UEBhMCVFIxDzAN
-BgNVBAcMBkFua2FyYTFNMEsGA1UECgxEVMOcUktUUlVTVCBCaWxnaSDEsGxldGnFn2ltIHZlIEJp
-bGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7Fni4xQjBABgNVBAMMOVTDnFJLVFJVU1Qg
-RWxla3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSBINTAeFw0xMzA0MzAw
-ODA3MDFaFw0yMzA0MjgwODA3MDFaMIGxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMU0w
-SwYDVQQKDERUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnE
-n2kgSGl6bWV0bGVyaSBBLsWeLjFCMEAGA1UEAww5VMOcUktUUlVTVCBFbGVrdHJvbmlrIFNlcnRp
-ZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIEg1MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
-CgKCAQEApCUZ4WWe60ghUEoI5RHwWrom/4NZzkQqL/7hzmAD/I0Dpe3/a6i6zDQGn1k19uwsu537
-jVJp45wnEFPzpALFp/kRGml1bsMdi9GYjZOHp3GXDSHHmflS0yxjXVW86B8BSLlg/kJK9siArs1m
-ep5Fimh34khon6La8eHBEJ/rPCmBp+EyCNSgBbGM+42WAA4+Jd9ThiI7/PS98wl+d+yG6w8z5UNP
-9FR1bSmZLmZaQ9/LXMrI5Tjxfjs1nQ/0xVqhzPMggCTTV+wVunUlm+hkS7M0hO8EuPbJbKoCPrZV
-4jI3X/xml1/N1p7HIL9Nxqw/dV8c7TKcfGkAaZHjIxhT6QIDAQABo0IwQDAdBgNVHQ4EFgQUVpkH
-HtOsDGlktAxQR95DLL4gwPswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
-hvcNAQELBQADggEBAJ5FdnsXSDLyOIspve6WSk6BGLFRRyDN0GSxDsnZAdkJzsiZ3GglE9Rc8qPo
-BP5yCccLqh0lVX6Wmle3usURehnmp349hQ71+S4pL+f5bFgWV1Al9j4uPqrtd3GqqpmWRgqujuwq
-URawXs3qZwQcWDD1YIq9pr1N5Za0/EKJAWv2cMhQOQwt1WbZyNKzMrcbGW3LM/nfpeYVhDfwwvJl
-lpKQd/Ct9JDpEXjXk4nAPQu6KfTomZ1yju2dL+6SfaHx/126M2CFYv4HAqGEVka+lgqaE9chTLd8
-B59OTj+RdPsnnRHM3eaxynFNExc5JsUpISuTKWqW+qtB4Uu2NQvAmxU=
------END CERTIFICATE-----
-
-Certinomis - Root CA
-====================
------BEGIN CERTIFICATE-----
-MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjETMBEGA1UEChMK
-Q2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAbBgNVBAMTFENlcnRpbm9taXMg
-LSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMzMTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIx
-EzARBgNVBAoTCkNlcnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRD
-ZXJ0aW5vbWlzIC0gUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQos
-P5L2fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJflLieY6pOo
-d5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQVWZUKxkd8aRi5pwP5ynap
-z8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDFTKWrteoB4owuZH9kb/2jJZOLyKIOSY00
-8B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09x
-RLWtwHkziOC/7aOgFLScCbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE
-6OXWk6RiwsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJwx3t
-FvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SGm/lg0h9tkQPTYKbV
-PZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4F2iw4lNVYC2vPsKD2NkJK/DAZNuH
-i5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZngWVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGj
-YzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I
-6tNxIqSSaHh02TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF
-AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/0KGRHCwPT5iV
-WVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWwF6YSjNRieOpWauwK0kDDPAUw
-Pk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZSg081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAX
-lCOotQqSD7J6wWAsOMwaplv/8gzjqh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJ
-y29SWwNyhlCVCNSNh4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9
-Iff/ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8Vbtaw5Bng
-DwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwjY/M50n92Uaf0yKHxDHYi
-I0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nM
-cyrDflOR1m749fPH0FFNjkulW+YZFzvWgQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVr
-hkIGuUE=
------END CERTIFICATE-----
-
-OISTE WISeKey Global Root GB CA
-===============================
------BEGIN CERTIFICATE-----
-MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBtMQswCQYDVQQG
-EwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl
-ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAw
-MzJaFw0zOTEyMDExNTEwMzFaMG0xCzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYD
-VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEds
-b2JhbCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3HEokKtaX
-scriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGxWuR51jIjK+FTzJlFXHtP
-rby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk
-9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNku7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4o
-Qnc/nSMbsrY9gBQHTC5P99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvg
-GUpuuy9rM2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB
-/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZI
-hvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrghcViXfa43FK8+5/ea4n32cZiZBKpD
-dHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0
-VQreUGdNZtGn//3ZwLWoo4rOZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEui
-HZeeevJuQHHfaPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic
-Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM=
------END CERTIFICATE-----
-
-SZAFIR ROOT CA2
-===============
------BEGIN CERTIFICATE-----
-MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQELBQAwUTELMAkG
-A1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6ZW5pb3dhIFMuQS4xGDAWBgNV
-BAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkwNzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJ
-BgNVBAYTAlBMMSgwJgYDVQQKDB9LcmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYD
-VQQDDA9TWkFGSVIgUk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5Q
-qEvNQLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT3PSQ1hNK
-DJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw3gAeqDRHu5rr/gsUvTaE
-2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr63fE9biCloBK0TXC5ztdyO4mTp4CEHCdJ
-ckm1/zuVnsHMyAHs6A6KCpbns6aH5db5BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwi
-ieDhZNRnvDF5YTy7ykHNXGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0P
-AQH/BAQDAgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsFAAOC
-AQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw8PRBEew/R40/cof5
-O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOGnXkZ7/e7DDWQw4rtTw/1zBLZpD67
-oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCPoky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul
-4+vJhaAlIDf7js4MNIThPIGyd05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6
-+/NNIxuZMzSgLvWpCz/UXeHPhJ/iGcJfitYgHuNztw==
------END CERTIFICATE-----
-
-Certum Trusted Network CA 2
-===========================
------BEGIN CERTIFICATE-----
-MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCBgDELMAkGA1UE
-BhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMuQS4xJzAlBgNVBAsTHkNlcnR1
-bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIGA1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29y
-ayBDQSAyMCIYDzIwMTExMDA2MDgzOTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQ
-TDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENl
-cnRpZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENB
-IDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWADGSdhhuWZGc/IjoedQF9
-7/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+o
-CgCXhVqqndwpyeI1B+twTUrWwbNWuKFBOJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40b
-Rr5HMNUuctHFY9rnY3lEfktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2p
-uTRZCr+ESv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1mo130
-GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02isx7QBlrd9pPPV3WZ
-9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOWOZV7bIBaTxNyxtd9KXpEulKkKtVB
-Rgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgezTv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pye
-hizKV/Ma5ciSixqClnrDvFASadgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vM
-BhBgu4M1t15n3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
-AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZI
-hvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQF/xlhMcQSZDe28cmk4gmb3DW
-Al45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTfCVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuA
-L55MYIR4PSFk1vtBHxgP58l1cb29XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMo
-clm2q8KMZiYcdywmdjWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tM
-pkT/WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jbAoJnwTnb
-w3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksqP/ujmv5zMnHCnsZy4Ypo
-J/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Kob7a6bINDd82Kkhehnlt4Fj1F4jNy3eFm
-ypnTycUm/Q1oBEauttmbjL4ZvrHG8hnjXALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLX
-is7VmFxWlgPF7ncGNf/P5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7
-zAYspsbiDrW5viSP
------END CERTIFICATE-----
-
-Hellenic Academic and Research Institutions RootCA 2015
-=======================================================
------BEGIN CERTIFICATE-----
-MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1IxDzANBgNVBAcT
-BkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0
-aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl
-YXJjaCBJbnN0aXR1dGlvbnMgUm9vdENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAx
-MTIxWjCBpjELMAkGA1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMg
-QWNhZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNV
-BAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9vdENBIDIw
-MTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDC+Kk/G4n8PDwEXT2QNrCROnk8Zlrv
-bTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+eh
-iGsxr/CL0BgzuNtFajT0AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+
-6PAQZe104S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06CojXd
-FPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV9Cz82XBST3i4vTwr
-i5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrDgfgXy5I2XdGj2HUb4Ysn6npIQf1F
-GQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2
-fu/Z8VFRfS0myGlZYeCsargqNhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9mu
-iNX6hME6wGkoLfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc
-Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
-AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVdctA4GGqd83EkVAswDQYJKoZI
-hvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0IXtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+
-D1hYc2Ryx+hFjtyp8iY/xnmMsVMIM4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrM
-d/K4kPFox/la/vot9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+y
-d+2VZ5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/eaj8GsGsVn
-82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnhX9izjFk0WaSrT2y7Hxjb
-davYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQl033DlZdwJVqwjbDG2jJ9SrcR5q+ss7F
-Jej6A7na+RZukYT1HCjI/CbM1xyQVqdfbzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVt
-J94Cj8rDtSvK6evIIVM4pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGa
-JI7ZjnHKe7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0vm9q
-p/UsQu0yrbYhnr68
------END CERTIFICATE-----
-
-Hellenic Academic and Research Institutions ECC RootCA 2015
-===========================================================
------BEGIN CERTIFICATE-----
-MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzANBgNVBAcTBkF0
-aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9u
-cyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj
-aCBJbnN0aXR1dGlvbnMgRUNDIFJvb3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEw
-MzcxMlowgaoxCzAJBgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmlj
-IEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUQwQgYD
-VQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIEVDQyBSb290
-Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKgQehLgoRc4vgxEZmGZE4JJS+dQS8KrjVP
-dJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJajq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoK
-Vlp8aQuqgAkkbH7BRqNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O
-BBYEFLQiC4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaeplSTA
-GiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7SofTUwJCA3sS61kFyjn
-dc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR
------END CERTIFICATE-----
-
-Certplus Root CA G1
-===================
------BEGIN CERTIFICATE-----
-MIIFazCCA1OgAwIBAgISESBVg+QtPlRWhS2DN7cs3EYRMA0GCSqGSIb3DQEBDQUAMD4xCzAJBgNV
-BAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBsdXMgUm9vdCBDQSBHMTAe
-Fw0xNDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBaMD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhD
-ZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBsdXMgUm9vdCBDQSBHMTCCAiIwDQYJKoZIhvcNAQEBBQAD
-ggIPADCCAgoCggIBANpQh7bauKk+nWT6VjOaVj0W5QOVsjQcmm1iBdTYj+eJZJ+622SLZOZ5KmHN
-r49aiZFluVj8tANfkT8tEBXgfs+8/H9DZ6itXjYj2JizTfNDnjl8KvzsiNWI7nC9hRYt6kuJPKNx
-Qv4c/dMcLRC4hlTqQ7jbxofaqK6AJc96Jh2qkbBIb6613p7Y1/oA/caP0FG7Yn2ksYyy/yARujVj
-BYZHYEMzkPZHogNPlk2dT8Hq6pyi/jQu3rfKG3akt62f6ajUeD94/vI4CTYd0hYCyOwqaK/1jpTv
-LRN6HkJKHRUxrgwEV/xhc/MxVoYxgKDEEW4wduOU8F8ExKyHcomYxZ3MVwia9Az8fXoFOvpHgDm2
-z4QTd28n6v+WZxcIbekN1iNQMLAVdBM+5S//Ds3EC0pd8NgAM0lm66EYfFkuPSi5YXHLtaW6uOrc
-4nBvCGrch2c0798wct3zyT8j/zXhviEpIDCB5BmlIOklynMxdCm+4kLV87ImZsdo/Rmz5yCTmehd
-4F6H50boJZwKKSTUzViGUkAksnsPmBIgJPaQbEfIDbsYIC7Z/fyL8inqh3SV4EJQeIQEQWGw9CEj
-jy3LKCHyamz0GqbFFLQ3ZU+V/YDI+HLlJWvEYLF7bY5KinPOWftwenMGE9nTdDckQQoRb5fc5+R+
-ob0V8rqHDz1oihYHAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G
-A1UdDgQWBBSowcCbkahDFXxdBie0KlHYlwuBsTAfBgNVHSMEGDAWgBSowcCbkahDFXxdBie0KlHY
-lwuBsTANBgkqhkiG9w0BAQ0FAAOCAgEAnFZvAX7RvUz1isbwJh/k4DgYzDLDKTudQSk0YcbX8ACh
-66Ryj5QXvBMsdbRX7gp8CXrc1cqh0DQT+Hern+X+2B50ioUHj3/MeXrKls3N/U/7/SMNkPX0XtPG
-YX2eEeAC7gkE2Qfdpoq3DIMku4NQkv5gdRE+2J2winq14J2by5BSS7CTKtQ+FjPlnsZlFT5kOwQ/
-2wyPX1wdaR+v8+khjPPvl/aatxm2hHSco1S1cE5j2FddUyGbQJJD+tZ3VTNPZNX70Cxqjm0lpu+F
-6ALEUz65noe8zDUa3qHpimOHZR4RKttjd5cUvpoUmRGywO6wT/gUITJDT5+rosuoD6o7BlXGEilX
-CNQ314cnrUlZp5GrRHpejXDbl85IULFzk/bwg2D5zfHhMf1bfHEhYxQUqq/F3pN+aLHsIqKqkHWe
-tUNy6mSjhEv9DKgma3GX7lZjZuhCVPnHHd/Qj1vfyDBviP4NxDMcU6ij/UgQ8uQKTuEVV/xuZDDC
-VRHc6qnNSlSsKWNEz0pAoNZoWRsz+e86i9sgktxChL8Bq4fA1SCC28a5g4VCXA9DO2pJNdWY9BW/
-+mGBDAkgGNLQFwzLSABQ6XaCjGTXOqAHVcweMcDvOrRl++O/QmueD6i9a5jc2NvLi6Td11n0bt3+
-qsOR0C5CB8AMTVPNJLFMWx5R9N/pkvo=
------END CERTIFICATE-----
-
-Certplus Root CA G2
-===================
------BEGIN CERTIFICATE-----
-MIICHDCCAaKgAwIBAgISESDZkc6uo+jF5//pAq/Pc7xVMAoGCCqGSM49BAMDMD4xCzAJBgNVBAYT
-AkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBsdXMgUm9vdCBDQSBHMjAeFw0x
-NDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBaMD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0
-cGx1czEcMBoGA1UEAwwTQ2VydHBsdXMgUm9vdCBDQSBHMjB2MBAGByqGSM49AgEGBSuBBAAiA2IA
-BM0PW1aC3/BFGtat93nwHcmsltaeTpwftEIRyoa/bfuFo8XlGVzX7qY/aWfYeOKmycTbLXku54uN
-Am8xIk0G42ByRZ0OQneezs/lf4WbGOT8zC5y0xaTTsqZY1yhBSpsBqNjMGEwDgYDVR0PAQH/BAQD
-AgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNqDYwJ5jtpMxjwjFNiPwyCrKGBZMB8GA1Ud
-IwQYMBaAFNqDYwJ5jtpMxjwjFNiPwyCrKGBZMAoGCCqGSM49BAMDA2gAMGUCMHD+sAvZ94OX7PNV
-HdTcswYO/jOYnYs5kGuUIe22113WTNchp+e/IQ8rzfcq3IUHnQIxAIYUFuXcsGXCwI4Un78kFmjl
-vPl5adytRSv3tjFzzAalU5ORGpOucGpnutee5WEaXw==
------END CERTIFICATE-----
-
-OpenTrust Root CA G1
-====================
------BEGIN CERTIFICATE-----
-MIIFbzCCA1egAwIBAgISESCzkFU5fX82bWTCp59rY45nMA0GCSqGSIb3DQEBCwUAMEAxCzAJBgNV
-BAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9wZW5UcnVzdCBSb290IENBIEcx
-MB4XDTE0MDUyNjA4NDU1MFoXDTM4MDExNTAwMDAwMFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoM
-CU9wZW5UcnVzdDEdMBsGA1UEAwwUT3BlblRydXN0IFJvb3QgQ0EgRzEwggIiMA0GCSqGSIb3DQEB
-AQUAA4ICDwAwggIKAoICAQD4eUbalsUwXopxAy1wpLuwxQjczeY1wICkES3d5oeuXT2R0odsN7fa
-Yp6bwiTXj/HbpqbfRm9RpnHLPhsxZ2L3EVs0J9V5ToybWL0iEA1cJwzdMOWo010hOHQX/uMftk87
-ay3bfWAfjH1MBcLrARYVmBSO0ZB3Ij/swjm4eTrwSSTilZHcYTSSjFR077F9jAHiOH3BX2pfJLKO
-YheteSCtqx234LSWSE9mQxAGFiQD4eCcjsZGT44ameGPuY4zbGneWK2gDqdkVBFpRGZPTBKnjix9
-xNRbxQA0MMHZmf4yzgeEtE7NCv82TWLxp2NX5Ntqp66/K7nJ5rInieV+mhxNaMbBGN4zK1FGSxyO
-9z0M+Yo0FMT7MzUj8czxKselu7Cizv5Ta01BG2Yospb6p64KTrk5M0ScdMGTHPjgniQlQ/GbI4Kq
-3ywgsNw2TgOzfALU5nsaqocTvz6hdLubDuHAk5/XpGbKuxs74zD0M1mKB3IDVedzagMxbm+WG+Oi
-n6+Sx+31QrclTDsTBM8clq8cIqPQqwWyTBIjUtz9GVsnnB47ev1CI9sjgBPwvFEVVJSmdz7QdFG9
-URQIOTfLHzSpMJ1ShC5VkLG631UAC9hWLbFJSXKAqWLXwPYYEQRVzXR7z2FwefR7LFxckvzluFqr
-TJOVoSfupb7PcSNCupt2LQIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
-/zAdBgNVHQ4EFgQUl0YhVyE12jZVx/PxN3DlCPaTKbYwHwYDVR0jBBgwFoAUl0YhVyE12jZVx/Px
-N3DlCPaTKbYwDQYJKoZIhvcNAQELBQADggIBAB3dAmB84DWn5ph76kTOZ0BP8pNuZtQ5iSas000E
-PLuHIT839HEl2ku6q5aCgZG27dmxpGWX4m9kWaSW7mDKHyP7Rbr/jyTwyqkxf3kfgLMtMrpkZ2Cv
-uVnN35pJ06iCsfmYlIrM4LvgBBuZYLFGZdwIorJGnkSI6pN+VxbSFXJfLkur1J1juONI5f6ELlgK
-n0Md/rcYkoZDSw6cMoYsYPXpSOqV7XAp8dUv/TW0V8/bhUiZucJvbI/NeJWsZCj9VrDDb8O+WVLh
-X4SPgPL0DTatdrOjteFkdjpY3H1PXlZs5VVZV6Xf8YpmMIzUUmI4d7S+KNfKNsSbBfD4Fdvb8e80
-nR14SohWZ25g/4/Ii+GOvUKpMwpZQhISKvqxnUOOBZuZ2mKtVzazHbYNeS2WuOvyDEsMpZTGMKcm
-GS3tTAZQMPH9WD25SxdfGbRqhFS0OE85og2WaMMolP3tLR9Ka0OWLpABEPs4poEL0L9109S5zvE/
-bw4cHjdx5RiHdRk/ULlepEU0rbDK5uUTdg8xFKmOLZTW1YVNcxVPS/KyPu1svf0OnWZzsD2097+o
-4BGkxK51CUpjAEggpsadCwmKtODmzj7HPiY46SvepghJAwSQiumPv+i2tCqjI40cHLI5kqiPAlxA
-OXXUc0ECd97N4EOH1uS6SsNsEn/+KuYj1oxx
------END CERTIFICATE-----
-
-OpenTrust Root CA G2
-====================
------BEGIN CERTIFICATE-----
-MIIFbzCCA1egAwIBAgISESChaRu/vbm9UpaPI+hIvyYRMA0GCSqGSIb3DQEBDQUAMEAxCzAJBgNV
-BAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9wZW5UcnVzdCBSb290IENBIEcy
-MB4XDTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAwMFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoM
-CU9wZW5UcnVzdDEdMBsGA1UEAwwUT3BlblRydXN0IFJvb3QgQ0EgRzIwggIiMA0GCSqGSIb3DQEB
-AQUAA4ICDwAwggIKAoICAQDMtlelM5QQgTJT32F+D3Y5z1zCU3UdSXqWON2ic2rxb95eolq5cSG+
-Ntmh/LzubKh8NBpxGuga2F8ORAbtp+Dz0mEL4DKiltE48MLaARf85KxP6O6JHnSrT78eCbY2albz
-4e6WiWYkBuTNQjpK3eCasMSCRbP+yatcfD7J6xcvDH1urqWPyKwlCm/61UWY0jUJ9gNDlP7ZvyCV
-eYCYitmJNbtRG6Q3ffyZO6v/v6wNj0OxmXsWEH4db0fEFY8ElggGQgT4hNYdvJGmQr5J1WqIP7wt
-UdGejeBSzFfdNTVY27SPJIjki9/ca1TSgSuyzpJLHB9G+h3Ykst2Z7UJmQnlrBcUVXDGPKBWCgOz
-3GIZ38i1MH/1PCZ1Eb3XG7OHngevZXHloM8apwkQHZOJZlvoPGIytbU6bumFAYueQ4xncyhZW+vj
-3CzMpSZyYhK05pyDRPZRpOLAeiRXyg6lPzq1O4vldu5w5pLeFlwoW5cZJ5L+epJUzpM5ChaHvGOz
-9bGTXOBut9Dq+WIyiET7vycotjCVXRIouZW+j1MY5aIYFuJWpLIsEPUdN6b4t/bQWVyJ98LVtZR0
-0dX+G7bw5tYee9I8y6jj9RjzIR9u701oBnstXW5DiabA+aC/gh7PU3+06yzbXfZqfUAkBXKJOAGT
-y3HCOV0GEfZvePg3DTmEJwIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
-/zAdBgNVHQ4EFgQUajn6QiL35okATV59M4PLuG53hq8wHwYDVR0jBBgwFoAUajn6QiL35okATV59
-M4PLuG53hq8wDQYJKoZIhvcNAQENBQADggIBAJjLq0A85TMCl38th6aP1F5Kr7ge57tx+4BkJamz
-Gj5oXScmp7oq4fBXgwpkTx4idBvpkF/wrM//T2h6OKQQbA2xx6R3gBi2oihEdqc0nXGEL8pZ0keI
-mUEiyTCYYW49qKgFbdEfwFFEVn8nNQLdXpgKQuswv42hm1GqO+qTRmTFAHneIWv2V6CG1wZy7HBG
-S4tz3aAhdT7cHcCP009zHIXZ/n9iyJVvttN7jLpTwm+bREx50B1ws9efAvSyB7DH5fitIw6mVskp
-EndI2S9G/Tvw/HRwkqWOOAgfZDC2t0v7NqwQjqBSM2OdAzVWxWm9xiNaJ5T2pBL4LTM8oValX9YZ
-6e18CL13zSdkzJTaTkZQh+D5wVOAHrut+0dSixv9ovneDiK3PTNZbNTe9ZUGMg1RGUFcPk8G97kr
-gCf2o6p6fAbhQ8MTOWIaNr3gKC6UAuQpLmBVrkA9sHSSXvAgZJY/X0VdiLWK2gKgW0VU3jg9CcCo
-SmVGFvyqv1ROTVu+OEO3KMqLM6oaJbolXCkvW0pujOotnCr2BXbgd5eAiN1nE28daCSLT7d0geX0
-YJ96Vdc+N9oWaz53rK4YcJUIeSkDiv7BO7M/Gg+kO14fWKGVyasvc0rQLW6aWQ9VGHgtPFGml4vm
-u7JwqkwR3v98KzfUetF3NI/n+UL3PIEMS1IK
------END CERTIFICATE-----
-
-OpenTrust Root CA G3
-====================
------BEGIN CERTIFICATE-----
-MIICITCCAaagAwIBAgISESDm+Ez8JLC+BUCs2oMbNGA/MAoGCCqGSM49BAMDMEAxCzAJBgNVBAYT
-AkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9wZW5UcnVzdCBSb290IENBIEczMB4X
-DTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAwMFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9w
-ZW5UcnVzdDEdMBsGA1UEAwwUT3BlblRydXN0IFJvb3QgQ0EgRzMwdjAQBgcqhkjOPQIBBgUrgQQA
-IgNiAARK7liuTcpm3gY6oxH84Bjwbhy6LTAMidnW7ptzg6kjFYwvWYpa3RTqnVkrQ7cG7DK2uu5B
-ta1doYXM6h0UZqNnfkbilPPntlahFVmhTzeXuSIevRHr9LIfXsMUmuXZl5mjYzBhMA4GA1UdDwEB
-/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRHd8MUi2I5DMlv4VBN0BBY3JWIbTAf
-BgNVHSMEGDAWgBRHd8MUi2I5DMlv4VBN0BBY3JWIbTAKBggqhkjOPQQDAwNpADBmAjEAj6jcnboM
-BBf6Fek9LykBl7+BFjNAk2z8+e2AcG+qj9uEwov1NcoG3GRvaBbhj5G5AjEA2Euly8LQCGzpGPta
-3U1fJAuwACEl74+nBCZx4nxp5V2a+EEfOzmTk51V6s2N8fvB
------END CERTIFICATE-----
-
-ISRG Root X1
-============
------BEGIN CERTIFICATE-----
-MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAwTzELMAkGA1UE
-BhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2VhcmNoIEdyb3VwMRUwEwYDVQQD
-EwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQG
-EwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMT
-DElTUkcgUm9vdCBYMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54r
-Vygch77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+0TM8ukj1
-3Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6UA5/TR5d8mUgjU+g4rk8K
-b4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sWT8KOEUt+zwvo/7V3LvSye0rgTBIlDHCN
-Aymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyHB5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ
-4Q7e2RCOFvu396j3x+UCB5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf
-1b0SHzUvKBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWnOlFu
-hjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTnjh8BCNAw1FtxNrQH
-usEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbwqHyGO0aoSCqI3Haadr8faqU9GY/r
-OPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CIrU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4G
-A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY
-9umbbjANBgkqhkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
-ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ3BebYhtF8GaV
-0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KKNFtY2PwByVS5uCbMiogziUwt
-hDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJw
-TdwJx4nLCgdNbOhdjsnvzqvHu7UrTkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nx
-e5AW0wdeRlN8NwdCjNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZA
-JzVcoyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq4RgqsahD
-YVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPAmRGunUHBcnWEvgJBQl9n
-JEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57demyPxgcYxn/eR44/KJ4EBs+lVDR3veyJ
-m+kXQ99b21/+jh5Xos1AnX5iItreGCc=
------END CERTIFICATE-----
-
-AC RAIZ FNMT-RCM
-================
------BEGIN CERTIFICATE-----
-MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsxCzAJBgNVBAYT
-AkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBGTk1ULVJDTTAeFw0wODEw
-MjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJD
-TTEZMBcGA1UECwwQQUMgUkFJWiBGTk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC
-ggIBALpxgHpMhm5/yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcf
-qQgfBBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAzWHFctPVr
-btQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxFtBDXaEAUwED653cXeuYL
-j2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z374jNUUeAlz+taibmSXaXvMiwzn15Cou
-08YfxGyqxRxqAQVKL9LFwag0Jl1mpdICIfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mw
-WsXmo8RZZUc1g16p6DULmbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnT
-tOmlcYF7wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peSMKGJ
-47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2ZSysV4999AeU14EC
-ll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMetUqIJ5G+GR4of6ygnXYMgrwTJbFaa
-i0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
-FPd9xf3E6Jobd2Sn9R2gzL+HYJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1o
-dHRwOi8vd3d3LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD
-nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1RXxlDPiyN8+s
-D8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYMLVN0V2Ue1bLdI4E7pWYjJ2cJ
-j+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrT
-Qfv6MooqtyuGC2mDOL7Nii4LcK2NJpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW
-+YJF1DngoABd15jmfZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7
-Ixjp6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp1txyM/1d
-8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B9kiABdcPUXmsEKvU7ANm
-5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wokRqEIr9baRRmW1FMdW4R58MD3R++Lj8UG
-rp1MYp3/RgT408m2ECVAdf4WqslKYIYvuu8wd+RU4riEmViAqhOLUTpPSPaLtrM=
------END CERTIFICATE-----
-
-Amazon Root CA 1
-================
------BEGIN CERTIFICATE-----
-MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsFADA5MQswCQYD
-VQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24gUm9vdCBDQSAxMB4XDTE1
-MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTELMAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpv
-bjEZMBcGA1UEAxMQQW1hem9uIFJvb3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
-ggEBALJ4gHHKeNXjca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgH
-FzZM9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qwIFAGbHrQ
-gLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6VOujw5H5SNz/0egwLX0t
-dHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L93FcXmn/6pUCyziKrlA4b9v7LWIbxcce
-VOF34GfID5yHI9Y/QCB/IIDEgEw+OyQmjgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB
-/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3
-DQEBCwUAA4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDIU5PM
-CCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUsN+gDS63pYaACbvXy
-8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vvo/ufQJVtMVT8QtPHRh8jrdkPSHCa
-2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2
-xJNDd2ZhwLnoQdeXeGADbkpyrqXRfboQnoZsG4q5WTP468SQvvG5
------END CERTIFICATE-----
-
-Amazon Root CA 2
-================
------BEGIN CERTIFICATE-----
-MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwFADA5MQswCQYD
-VQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24gUm9vdCBDQSAyMB4XDTE1
-MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpv
-bjEZMBcGA1UEAxMQQW1hem9uIFJvb3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC
-ggIBAK2Wny2cSkxKgXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4
-kHbZW0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg1dKmSYXp
-N+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K8nu+NQWpEjTj82R0Yiw9
-AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvd
-fLC6HM783k81ds8P+HgfajZRRidhW+mez/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAEx
-kv8LV/SasrlX6avvDXbR8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSS
-btqDT6ZjmUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz7Mt0
-Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6+XUyo05f7O0oYtlN
-c/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI0u1ufm8/0i2BWSlmy5A5lREedCf+
-3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSw
-DPBMMPQFWAJI/TPlUq9LhONmUjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oA
-A7CXDpO8Wqj2LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY
-+gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kSk5Nrp+gvU5LE
-YFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl7uxMMne0nxrpS10gxdr9HIcW
-xkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygmbtmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQ
-gj9sAq+uEjonljYE1x2igGOpm/HlurR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbW
-aQbLU8uz/mtBzUF+fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoV
-Yh63n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE76KlXIx3
-KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H9jVlpNMKVv/1F2Rs76gi
-JUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT4PsJYGw=
------END CERTIFICATE-----
-
-Amazon Root CA 3
-================
------BEGIN CERTIFICATE-----
-MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5MQswCQYDVQQG
-EwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24gUm9vdCBDQSAzMB4XDTE1MDUy
-NjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZ
-MBcGA1UEAxMQQW1hem9uIFJvb3QgQ0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZB
-f8ANm+gBG1bG8lKlui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjr
-Zt6jQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSrttvXBp43
-rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkrBqWTrBqYaGFy+uGh0Psc
-eGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteMYyRIHN8wfdVoOw==
------END CERTIFICATE-----
-
-Amazon Root CA 4
-================
------BEGIN CERTIFICATE-----
-MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5MQswCQYDVQQG
-EwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24gUm9vdCBDQSA0MB4XDTE1MDUy
-NjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZ
-MBcGA1UEAxMQQW1hem9uIFJvb3QgQ0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN
-/sGKe0uoe0ZLY7Bi9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri
-83BkM6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV
-HQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WBMAoGCCqGSM49BAMDA2gA
-MGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlwCkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1
-AE47xDqUEpHJWEadIRNyp4iciuRMStuW1KyLa2tJElMzrdfkviT8tQp21KW8EA==
------END CERTIFICATE-----
-
-LuxTrust Global Root 2
-======================
------BEGIN CERTIFICATE-----
-MIIFwzCCA6ugAwIBAgIUCn6m30tEntpqJIWe5rgV0xZ/u7EwDQYJKoZIhvcNAQELBQAwRjELMAkG
-A1UEBhMCTFUxFjAUBgNVBAoMDUx1eFRydXN0IFMuQS4xHzAdBgNVBAMMFkx1eFRydXN0IEdsb2Jh
-bCBSb290IDIwHhcNMTUwMzA1MTMyMTU3WhcNMzUwMzA1MTMyMTU3WjBGMQswCQYDVQQGEwJMVTEW
-MBQGA1UECgwNTHV4VHJ1c3QgUy5BLjEfMB0GA1UEAwwWTHV4VHJ1c3QgR2xvYmFsIFJvb3QgMjCC
-AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANeFl78RmOnwYoNMPIf5U2o3C/IPPIfOb9wm
-Kb3FibrJgz337spbxm1Jc7TJRqMbNBM/wYlFV/TZsfs2ZUv7COJIcRHIbjuend+JZTemhfY7RBi2
-xjcwYkSSl2l9QjAk5A0MiWtj3sXh306pFGxT4GHO9hcvHTy95iJMHZP1EMShduxq3sVs35a0VkBC
-wGKSMKEtFZSg0iAGCW5qbeXrt77U8PEVfIvmTroTzEsnXpk8F12PgX8zPU/TPxvsXD/wPEx1bvKm
-1Z3aLQdjAsZy6ZS8TEmVT4hSyNvoaYL4zDRbIvCGp4m9SAptZoFtyMhk+wHh9OHe2Z7d21vUKpkm
-FRseTJIpgp7VkoGSQXAZ96Tlk0u8d2cx3Rz9MXANF5kM+Qw5GSoXtTBxVdUPrljhPS80m8+f9niF
-wpN6cj5mj5wWEWCPnolvZ77gR1o7DJpni89Gxq44o/KnvObWhWszJHAiS8sIm7vI+AIpHb4gDEa/
-a4ebsypmQjVGbKq6rfmYe+lQVRQxv7HaLe2ArWgk+2mr2HETMOZns4dA/Yl+8kPREd8vZS9kzl8U
-ubG/Mb2HeFpZZYiq/FkySIbWTLkpS5XTdvN3JW1CHDiDTf2jX5t/Lax5Gw5CMZdjpPuKadUiDTSQ
-MC6otOBttpSsvItO13D8xTiOZCXhTTmQzsmHhFhxAgMBAAGjgagwgaUwDwYDVR0TAQH/BAUwAwEB
-/zBCBgNVHSAEOzA5MDcGByuBKwEBAQowLDAqBggrBgEFBQcCARYeaHR0cHM6Ly9yZXBvc2l0b3J5
-Lmx1eHRydXN0Lmx1MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBT/GCh2+UgFLKGu8SsbK7JT
-+Et8szAdBgNVHQ4EFgQU/xgodvlIBSyhrvErGyuyU/hLfLMwDQYJKoZIhvcNAQELBQADggIBAGoZ
-FO1uecEsh9QNcH7X9njJCwROxLHOk3D+sFTAMs2ZMGQXvw/l4jP9BzZAcg4atmpZ1gDlaCDdLnIN
-H2pkMSCEfUmmWjfrRcmF9dTHF5kH5ptV5AzoqbTOjFu1EVzPig4N1qx3gf4ynCSecs5U89BvolbW
-7MM3LGVYvlcAGvI1+ut7MV3CwRI9loGIlonBWVx65n9wNOeD4rHh4bhY79SV5GCc8JaXcozrhAIu
-ZY+kt9J/Z93I055cqqmkoCUUBpvsT34tC38ddfEz2O3OuHVtPlu5mB0xDVbYQw8wkbIEa91WvpWA
-VWe+2M2D2RjuLg+GLZKecBPs3lHJQ3gCpU3I+V/EkVhGFndadKpAvAefMLmx9xIX3eP/JEAdemrR
-TxgKqpAd60Ae36EeRJIQmvKN4dFLRp7oRUKX6kWZ8+xm1QL68qZKJKrezrnK+T+Tb/mjuuqlPpmt
-/f97mfVl7vBZKGfXkJWkE4SphMHozs51k2MavDzq1WQfLSoSOcbDWjLtR5EWDrw4wVDej8oqkDQc
-7kGUnF4ZLvhFSZl0kbAEb+MEWrGrKqv+x9CWttrhSmQGbmBNvUJO/3jaJMobtNeWOWyu8Q6qp31I
-iyBMz2TWuJdGsE7RKlY6oJO9r4Ak4Ap+58rVyuiFVdw2KuGUaJPHZnJED4AhMmwlxyOAgwrr
------END CERTIFICATE-----
-
-TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1
-=============================================
------BEGIN CERTIFICATE-----
-MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIxGDAWBgNVBAcT
-D0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxpbXNlbCB2ZSBUZWtub2xvamlr
-IEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0wKwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24g
-TWVya2V6aSAtIEthbXUgU00xNjA0BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRp
-ZmlrYXNpIC0gU3VydW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYD
-VQQGEwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXllIEJpbGlt
-c2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklUQUsxLTArBgNVBAsTJEth
-bXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBTTTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11
-IFNNIFNTTCBLb2sgU2VydGlmaWthc2kgLSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-MIIBCgKCAQEAr3UwM6q7a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y8
-6Ij5iySrLqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INrN3wc
-wv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2XYacQuFWQfw4tJzh0
-3+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/iSIzL+aFCr2lqBs23tPcLG07xxO9
-WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4fAJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQU
-ZT/HiobGPN08VFw1+DrtUgxHV8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJ
-KoZIhvcNAQELBQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh
-AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPfIPP54+M638yc
-lNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4lzwDGrpDxpa5RXI4s6ehlj2R
-e37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0j
-q5Rm+K37DwhuJi1/FwcJsoz7UMCflo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM=
------END CERTIFICATE-----
-
-GDCA TrustAUTH R5 ROOT
-======================
------BEGIN CERTIFICATE-----
-MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UEBhMCQ04xMjAw
-BgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8wHQYDVQQD
-DBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVow
-YjELMAkGA1UEBhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ
-IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0B
-AQEFAAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJjDp6L3TQs
-AlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBjTnnEt1u9ol2x8kECK62p
-OqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+uKU49tm7srsHwJ5uu4/Ts765/94Y9cnrr
-pftZTqfrlYwiOXnhLQiPzLyRuEH3FMEjqcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ
-9Cy5WmYqsBebnh52nUpmMUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQ
-xXABZG12ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloPzgsM
-R6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3GkL30SgLdTMEZeS1SZ
-D2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeCjGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4
-oR24qoAATILnsn8JuLwwoC8N9VKejveSswoAHQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx
-9hoh49pwBiFYFIeFd3mqgnkCAwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlR
-MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg
-p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZmDRd9FBUb1Ov9
-H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5COmSdI31R9KrO9b7eGZONn35
-6ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ryL3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd
-+PwyvzeG5LuOmCd+uh8W4XAR8gPfJWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQ
-HtZa37dG/OaG+svgIHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBD
-F8Io2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV09tL7ECQ
-8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQXR4EzzffHqhmsYzmIGrv
-/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrqT8p+ck0LcIymSLumoRT2+1hEmRSuqguT
-aaApJUqlyyvdimYHFngVV3Eb7PVHhPOeMTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g==
------END CERTIFICATE-----
-
-TrustCor RootCert CA-1
-======================
------BEGIN CERTIFICATE-----
-MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYDVQQGEwJQQTEP
-MA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEkMCIGA1UECgwbVHJ1c3RDb3Ig
-U3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3Jp
-dHkxHzAdBgNVBAMMFlRydXN0Q29yIFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkx
-MjMxMTcyMzE2WjCBpDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFu
-YW1hIENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUGA1UECwwe
-VHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZUcnVzdENvciBSb290Q2Vy
-dCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv463leLCJhJrMxnHQFgKq1mq
-jQCj/IDHUHuO1CAmujIS2CNUSSUQIpidRtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4
-pQa81QBeCQryJ3pS/C3Vseq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0
-JEsq1pme9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CVEY4h
-gLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorWhnAbJN7+KIor0Gqw
-/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/DeOxCbeKyKsZn3MzUOcwHwYDVR0j
-BBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
-AYYwDQYJKoZIhvcNAQELBQADggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5
-mDo4Nvu7Zp5I/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf
-ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZyonnMlo2HD6C
-qFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djtsL1Ac59v2Z3kf9YKVmgenFK+P
-3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdNzl/HHk484IkzlQsPpTLWPFp5LBk=
------END CERTIFICATE-----
-
-TrustCor RootCert CA-2
-======================
------BEGIN CERTIFICATE-----
-MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNVBAYTAlBBMQ8w
-DQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQwIgYDVQQKDBtUcnVzdENvciBT
-eXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRydXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0
-eTEfMB0GA1UEAwwWVHJ1c3RDb3IgUm9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEy
-MzExNzI2MzlaMIGkMQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5h
-bWEgQ2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U
-cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29yIFJvb3RDZXJ0
-IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCnIG7CKqJiJJWQdsg4foDSq8Gb
-ZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9Nk
-RvRUqdw6VC0xK5mC8tkq1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1
-oYxOdqHp2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nKDOOb
-XUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hapeaz6LMvYHL1cEksr1
-/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF3wP+TfSvPd9cW436cOGlfifHhi5q
-jxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQP
-eSghYA2FFn3XVDjxklb9tTNMg9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+Ctg
-rKAmrhQhJ8Z3mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh
-8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAdBgNVHQ4EFgQU
-2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6UnrybPZx9mCAZ5YwwYrIwDwYD
-VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/h
-Osh80QA9z+LqBrWyOrsGS2h60COXdKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnp
-kpfbsEZC89NiqpX+MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv
-2wnL/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RXCI/hOWB3
-S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYaZH9bDTMJBzN7Bj8RpFxw
-PIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dv
-DDqPys/cA8GiCcjl/YBeyGBCARsaU1q7N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYU
-RpFHmygk71dSTlxCnKr3Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANE
-xdqtvArBAs8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp5KeX
-RKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu1uwJ
------END CERTIFICATE-----
-
-TrustCor ECA-1
-==============
------BEGIN CERTIFICATE-----
-MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYDVQQGEwJQQTEP
-MA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEkMCIGA1UECgwbVHJ1c3RDb3Ig
-U3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3Jp
-dHkxFzAVBgNVBAMMDlRydXN0Q29yIEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3Mjgw
-N1owgZwxCzAJBgNVBAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5
-MSQwIgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRydXN0Q29y
-IENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3IgRUNBLTEwggEiMA0GCSqG
-SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb3w9U73NjKYKtR8aja+3+XzP4Q1HpGjOR
-MRegdMTUpwHmspI+ap3tDvl0mEDTPwOABoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23
-xFUfJ3zSCNV2HykVh0A53ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmc
-p0yJF4OuowReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/wZ0+
-fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZFZtS6mFjBAgMBAAGj
-YzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAfBgNVHSMEGDAWgBREnkj1zG1I1KBL
-f/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsF
-AAOCAQEABT41XBVwm8nHc2FvcivUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u
-/ukZMjgDfxT2AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F
-hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50soIipX1TH0Xs
-J5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BIWJZpTdwHjFGTot+fDz2LYLSC
-jaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1WitJ/X5g==
------END CERTIFICATE-----
-
-SSL.com Root Certification Authority RSA
-========================================
------BEGIN CERTIFICATE-----
-MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UEBhMCVVMxDjAM
-BgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9TU0wgQ29ycG9yYXRpb24x
-MTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYw
-MjEyMTczOTM5WhcNNDEwMjEyMTczOTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMx
-EDAOBgNVBAcMB0hvdXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NM
-LmNvbSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcNAQEBBQAD
-ggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2RxFdHaxh3a3by/ZPkPQ/C
-Fp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aXqhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8
-P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcCC52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/ge
-oeOy3ZExqysdBP+lSgQ36YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkp
-k8zruFvh/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrFYD3Z
-fBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93EJNyAKoFBbZQ+yODJ
-gUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVcUS4cK38acijnALXRdMbX5J+tB5O2
-UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi8
-1xtZPCvM8hnIk2snYxnP/Okm+Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4s
-bE6x/c+cCbqiM+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV
-HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4GA1UdDwEB/wQE
-AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGVcpNxJK1ok1iOMq8bs3AD/CUr
-dIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBcHadm47GUBwwyOabqG7B52B2ccETjit3E+ZUf
-ijhDPwGFpUenPUayvOUiaPd7nNgsPgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAsl
-u1OJD7OAUN5F7kR/q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjq
-erQ0cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jra6x+3uxj
-MxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90IH37hVZkLId6Tngr75qNJ
-vTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/YK9f1JmzJBjSWFupwWRoyeXkLtoh/D1JI
-Pb9s2KJELtFOt3JY04kTlf5Eq/jXixtunLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406y
-wKBjYZC6VWg3dGq2ktufoYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NI
-WuuA8ShYIc2wBlX7Jz9TkHCpBB5XJ7k=
------END CERTIFICATE-----
-
-SSL.com Root Certification Authority ECC
-========================================
------BEGIN CERTIFICATE-----
-MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMCVVMxDjAMBgNV
-BAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9TU0wgQ29ycG9yYXRpb24xMTAv
-BgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEy
-MTgxNDAzWhcNNDEwMjEyMTgxNDAzWjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAO
-BgNVBAcMB0hvdXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv
-bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuBBAAiA2IA
-BEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI7Z4INcgn64mMU1jrYor+
-8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPgCemB+vNH06NjMGEwHQYDVR0OBBYEFILR
-hXMw5zUE044CkvvlpNHEIejNMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTT
-jgKS++Wk0cQh6M0wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCW
-e+0F+S8Tkdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+gA0z
-5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl
------END CERTIFICATE-----
-
-SSL.com EV Root Certification Authority RSA R2
-==============================================
------BEGIN CERTIFICATE-----
-MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNVBAYTAlVTMQ4w
-DAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9u
-MTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy
-MB4XDTE3MDUzMTE4MTQzN1oXDTQyMDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQI
-DAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYD
-VQQDDC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMIICIjAN
-BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvqM0fNTPl9fb69LT3w23jh
-hqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssufOePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7w
-cXHswxzpY6IXFJ3vG2fThVUCAtZJycxa4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTO
-Zw+oz12WGQvE43LrrdF9HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+
-B6KjBSYRaZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcAb9Zh
-CBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQGp8hLH94t2S42Oim
-9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQVPWKchjgGAGYS5Fl2WlPAApiiECto
-RHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMOpgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+Slm
-JuwgUHfbSguPvuUCYHBBXtSuUDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48
-+qvWBkofZ6aYMBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV
-HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa49QaAJadz20Zp
-qJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBWs47LCp1Jjr+kxJG7ZhcFUZh1
-++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nx
-Y/hoLVUE0fKNsKTPvDxeH3jnpaAgcLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2G
-guDKBAdRUNf/ktUM79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDz
-OFSz/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXtll9ldDz7
-CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEmKf7GUmG6sXP/wwyc5Wxq
-lD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKKQbNmC1r7fSOl8hqw/96bg5Qu0T/fkreR
-rwU7ZcegbLHNYhLDkBvjJc40vG93drEQw/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1
-hlMYegouCRw2n5H9gooiS9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX
-9hwJ1C07mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w==
------END CERTIFICATE-----
-
-SSL.com EV Root Certification Authority ECC
-===========================================
------BEGIN CERTIFICATE-----
-MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMCVVMxDjAMBgNV
-BAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9TU0wgQ29ycG9yYXRpb24xNDAy
-BgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYw
-MjEyMTgxNTIzWhcNNDEwMjEyMTgxNTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMx
-EDAOBgNVBAcMB0hvdXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NM
-LmNvbSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB
-BAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMAVIbc/R/fALhBYlzccBYy
-3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1KthkuWnBaBu2+8KGwytAJKaNjMGEwHQYDVR0O
-BBYEFFvKXuXe0oGqzagtZFG22XKbl+ZPMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe
-5d7SgarNqC1kUbbZcpuX5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJ
-N+vp1RPZytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZgh5Mm
-m7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg==
------END CERTIFICATE-----
-"""
\ No newline at end of file
+# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root G3"
+# Serial: 7089244469030293291760083333884364146
+# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca
+# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e
+# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0
+-----BEGIN CERTIFICATE-----
+MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw
+CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
+ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe
+Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw
+EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x
+IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF
+K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG
+fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO
+Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd
+BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx
+AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/
+oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8
+sycX
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Trusted Root G4"
+# Serial: 7451500558977370777930084869016614236
+# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49
+# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4
+# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88
+-----BEGIN CERTIFICATE-----
+MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg
+RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV
+UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
+Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y
+ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If
+xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV
+ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO
+DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ
+jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/
+CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi
+EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM
+fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY
+uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK
+chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t
+9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD
+ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2
+SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd
++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc
+fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa
+sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N
+cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N
+0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie
+4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI
+r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1
+/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm
+gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certification Authority of WoSign O=WoSign CA Limited
+# Subject: CN=Certification Authority of WoSign O=WoSign CA Limited
+# Label: "WoSign"
+# Serial: 125491772294754854453622855443212256657
+# MD5 Fingerprint: a1:f2:f9:b5:d2:c8:7a:74:b8:f3:05:f1:d7:e1:84:8d
+# SHA1 Fingerprint: b9:42:94:bf:91:ea:8f:b6:4b:e6:10:97:c7:fb:00:13:59:b6:76:cb
+# SHA256 Fingerprint: 4b:22:d5:a6:ae:c9:9f:3c:db:79:aa:5e:c0:68:38:47:9c:d5:ec:ba:71:64:f7:f2:2d:c1:d6:5f:63:d8:57:08
+-----BEGIN CERTIFICATE-----
+MIIFdjCCA16gAwIBAgIQXmjWEXGUY1BWAGjzPsnFkTANBgkqhkiG9w0BAQUFADBV
+MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxKjAoBgNV
+BAMTIUNlcnRpZmljYXRpb24gQXV0aG9yaXR5IG9mIFdvU2lnbjAeFw0wOTA4MDgw
+MTAwMDFaFw0zOTA4MDgwMTAwMDFaMFUxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFX
+b1NpZ24gQ0EgTGltaXRlZDEqMCgGA1UEAxMhQ2VydGlmaWNhdGlvbiBBdXRob3Jp
+dHkgb2YgV29TaWduMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAvcqN
+rLiRFVaXe2tcesLea9mhsMMQI/qnobLMMfo+2aYpbxY94Gv4uEBf2zmoAHqLoE1U
+fcIiePyOCbiohdfMlZdLdNiefvAA5A6JrkkoRBoQmTIPJYhTpA2zDxIIFgsDcScc
+f+Hb0v1naMQFXQoOXXDX2JegvFNBmpGN9J42Znp+VsGQX+axaCA2pIwkLCxHC1l2
+ZjC1vt7tj/id07sBMOby8w7gLJKA84X5KIq0VC6a7fd2/BVoFutKbOsuEo/Uz/4M
+x1wdC34FMr5esAkqQtXJTpCzWQ27en7N1QhatH/YHGkR+ScPewavVIMYe+HdVHpR
+aG53/Ma/UkpmRqGyZxq7o093oL5d//xWC0Nyd5DKnvnyOfUNqfTq1+ezEC8wQjch
+zDBwyYaYD8xYTYO7feUapTeNtqwylwA6Y3EkHp43xP901DfA4v6IRmAR3Qg/UDar
+uHqklWJqbrDKaiFaafPz+x1wOZXzp26mgYmhiMU7ccqjUu6Du/2gd/Tkb+dC221K
+mYo0SLwX3OSACCK28jHAPwQ+658geda4BmRkAjHXqc1S+4RFaQkAKtxVi8QGRkvA
+Sh0JWzko/amrzgD5LkhLJuYwTKVYyrREgk/nkR4zw7CT/xH8gdLKH3Ep3XZPkiWv
+HYG3Dy+MwwbMLyejSuQOmbp8HkUff6oZRZb9/D0CAwEAAaNCMEAwDgYDVR0PAQH/
+BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOFmzw7R8bNLtwYgFP6H
+EtX2/vs+MA0GCSqGSIb3DQEBBQUAA4ICAQCoy3JAsnbBfnv8rWTjMnvMPLZdRtP1
+LOJwXcgu2AZ9mNELIaCJWSQBnfmvCX0KI4I01fx8cpm5o9dU9OpScA7F9dY74ToJ
+MuYhOZO9sxXqT2r09Ys/L3yNWC7F4TmgPsc9SnOeQHrAK2GpZ8nzJLmzbVUsWh2e
+JXLOC62qx1ViC777Y7NhRCOjy+EaDveaBk3e1CNOIZZbOVtXHS9dCF4Jef98l7VN
+g64N1uajeeAz0JmWAjCnPv/So0M/BVoG6kQC2nz4SNAzqfkHx5Xh9T71XXG68pWp
+dIhhWeO/yloTunK0jF02h+mmxTwTv97QRCbut+wucPrXnbes5cVAWubXbHssw1ab
+R80LzvobtCHXt2a49CUwi1wNuepnsvRtrtWhnk/Yn+knArAdBtaP4/tIEp9/EaEQ
+PkxROpaw0RPxx9gmrjrKkcRpnd8BKWRRb2jaFOwIQZeQjdCygPLPwj2/kWjFgGce
+xGATVdVhmVd8upUPYUk6ynW8yQqTP2cOEvIo4jEbwFcW3wh8GcF+Dx+FHgo2fFt+
+J7x6v+Db9NpSvd4MVHAxkUOVyLzwPt0JfjBkUO1/AaQzZ01oT74V77D2AhGiGxMl
+OtzCWfHjXEa7ZywCRuoeSKbmW9m1vFGikpbbqsY3Iqb+zCB0oy2pLmvLwIIRIbWT
+ee5Ehr7XHuQe+w==
+-----END CERTIFICATE-----
+
+# Issuer: CN=CA 沃通根证书 O=WoSign CA Limited
+# Subject: CN=CA 沃通根证书 O=WoSign CA Limited
+# Label: "WoSign China"
+# Serial: 106921963437422998931660691310149453965
+# MD5 Fingerprint: 78:83:5b:52:16:76:c4:24:3b:83:78:e8:ac:da:9a:93
+# SHA1 Fingerprint: 16:32:47:8d:89:f9:21:3a:92:00:85:63:f5:a4:a7:d3:12:40:8a:d6
+# SHA256 Fingerprint: d6:f0:34:bd:94:aa:23:3f:02:97:ec:a4:24:5b:28:39:73:e4:47:aa:59:0f:31:0c:77:f4:8f:df:83:11:22:54
+-----BEGIN CERTIFICATE-----
+MIIFWDCCA0CgAwIBAgIQUHBrzdgT/BtOOzNy0hFIjTANBgkqhkiG9w0BAQsFADBG
+MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNV
+BAMMEkNBIOayg+mAmuagueivgeS5pjAeFw0wOTA4MDgwMTAwMDFaFw0zOTA4MDgw
+MTAwMDFaMEYxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRl
+ZDEbMBkGA1UEAwwSQ0Eg5rKD6YCa5qC56K+B5LmmMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEA0EkhHiX8h8EqwqzbdoYGTufQdDTc7WU1/FDWiD+k8H/r
+D195L4mx/bxjWDeTmzj4t1up+thxx7S8gJeNbEvxUNUqKaqoGXqW5pWOdO2XCld1
+9AXbbQs5uQF/qvbW2mzmBeCkTVL829B0txGMe41P/4eDrv8FAxNXUDf+jJZSEExf
+v5RxadmWPgxDT74wwJ85dE8GRV2j1lY5aAfMh09Qd5Nx2UQIsYo06Yms25tO4dnk
+UkWMLhQfkWsZHWgpLFbE4h4TV2TwYeO5Ed+w4VegG63XX9Gv2ystP9Bojg/qnw+L
+NVgbExz03jWhCl3W6t8Sb8D7aQdGctyB9gQjF+BNdeFyb7Ao65vh4YOhn0pdr8yb
++gIgthhid5E7o9Vlrdx8kHccREGkSovrlXLp9glk3Kgtn3R46MGiCWOc76DbT52V
+qyBPt7D3h1ymoOQ3OMdc4zUPLK2jgKLsLl3Az+2LBcLmc272idX10kaO6m1jGx6K
+yX2m+Jzr5dVjhU1zZmkR/sgO9MHHZklTfuQZa/HpelmjbX7FF+Ynxu8b22/8DU0G
+AbQOXDBGVWCvOGU6yke6rCzMRh+yRpY/8+0mBe53oWprfi1tWFxK1I5nuPHa1UaK
+J/kR8slC/k7e3x9cxKSGhxYzoacXGKUN5AXlK8IrC6KVkLn9YDxOiT7nnO4fuwEC
+AwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O
+BBYEFOBNv9ybQV0T6GTwp+kVpOGBwboxMA0GCSqGSIb3DQEBCwUAA4ICAQBqinA4
+WbbaixjIvirTthnVZil6Xc1bL3McJk6jfW+rtylNpumlEYOnOXOvEESS5iVdT2H6
+yAa+Tkvv/vMx/sZ8cApBWNromUuWyXi8mHwCKe0JgOYKOoICKuLJL8hWGSbueBwj
+/feTZU7n85iYr83d2Z5AiDEoOqsuC7CsDCT6eiaY8xJhEPRdF/d+4niXVOKM6Cm6
+jBAyvd0zaziGfjk9DgNyp115j0WKWa5bIW4xRtVZjc8VX90xJc/bYNaBRHIpAlf2
+ltTW/+op2znFuCyKGo3Oy+dCMYYFaA6eFN0AkLppRQjbbpCBhqcqBT/mhDn4t/lX
+X0ykeVoQDF7Va/81XwVRHmyjdanPUIPTfPRm94KNPQx96N97qA4bLJyuQHCH2u2n
+FoJavjVsIE4iYdm8UXrNemHcSxH5/mc0zy4EZmFcV5cjjPOGG0jfKq+nwf/Yjj4D
+u9gqsPoUJbJRa4ZDhS4HIxaAjUz7tGM7zMN07RujHv41D198HRaG9Q7DlfEvr10l
+O1Hm13ZBONFLAzkopR6RctR9q5czxNM+4Gm2KHmgCY0c0f9BckgG/Jou5yD5m6Le
+ie2uPAmvylezkolwQOQvT8Jwg0DXJCxr5wkf09XHwQj02w47HAcLQxGEIYbpgNR1
+2KvxAmLBsX5VYc8T1yaw15zLKYs4SgsOkI26oQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited
+# Label: "COMODO RSA Certification Authority"
+# Serial: 101909084537582093308941363524873193117
+# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18
+# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4
+# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34
+-----BEGIN CERTIFICATE-----
+MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB
+hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV
+BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5
+MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT
+EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR
+Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR
+6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X
+pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC
+9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV
+/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf
+Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z
++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w
+qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah
+SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC
+u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf
+Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq
+crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E
+FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB
+/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl
+wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM
+4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV
+2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna
+FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ
+CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK
+boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke
+jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL
+S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb
+QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl
+0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB
+NVOFBkpdn627G190
+-----END CERTIFICATE-----
+
+# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network
+# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network
+# Label: "USERTrust RSA Certification Authority"
+# Serial: 2645093764781058787591871645665788717
+# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5
+# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e
+# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2
+-----BEGIN CERTIFICATE-----
+MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB
+iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl
+cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV
+BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw
+MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV
+BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU
+aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy
+dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B
+3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY
+tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/
+Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2
+VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT
+79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6
+c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT
+Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l
+c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee
+UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE
+Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd
+BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G
+A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF
+Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO
+VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3
+ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs
+8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR
+iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze
+Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ
+XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/
+qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB
+VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB
+L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG
+jjxDah2nGN59PRbxYvnKkKj9
+-----END CERTIFICATE-----
+
+# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network
+# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network
+# Label: "USERTrust ECC Certification Authority"
+# Serial: 123013823720199481456569720443997572134
+# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1
+# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0
+# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a
+-----BEGIN CERTIFICATE-----
+MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL
+MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl
+eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT
+JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx
+MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT
+Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg
+VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo
+I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng
+o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G
+A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB
+zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW
+RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
+# Label: "GlobalSign ECC Root CA - R4"
+# Serial: 14367148294922964480859022125800977897474
+# MD5 Fingerprint: 20:f0:27:68:d1:7e:a0:9d:0e:e6:2a:ca:df:5c:89:8e
+# SHA1 Fingerprint: 69:69:56:2e:40:80:f4:24:a1:e7:19:9f:14:ba:f3:ee:58:ab:6a:bb
+# SHA256 Fingerprint: be:c9:49:11:c2:95:56:76:db:6c:0a:55:09:86:d7:6e:3b:a0:05:66:7c:44:2c:97:62:b4:fb:b7:73:de:22:8c
+-----BEGIN CERTIFICATE-----
+MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk
+MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH
+bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX
+DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD
+QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu
+MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ
+FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F
+uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX
+kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs
+ewv4n4Q=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
+# Label: "GlobalSign ECC Root CA - R5"
+# Serial: 32785792099990507226680698011560947931244
+# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08
+# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa
+# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24
+-----BEGIN CERTIFICATE-----
+MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk
+MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH
+bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX
+DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD
+QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu
+MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc
+8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke
+hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI
+KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg
+515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO
+xwy8p2Fp8fc74SrL+SvzZpA3
+-----END CERTIFICATE-----
+
+# Issuer: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden
+# Subject: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden
+# Label: "Staat der Nederlanden Root CA - G3"
+# Serial: 10003001
+# MD5 Fingerprint: 0b:46:67:07:db:10:2f:19:8c:35:50:60:d1:0b:f4:37
+# SHA1 Fingerprint: d8:eb:6b:41:51:92:59:e0:f3:e7:85:00:c0:3d:b6:88:97:c9:ee:fc
+# SHA256 Fingerprint: 3c:4f:b0:b9:5a:b8:b3:00:32:f4:32:b8:6f:53:5f:e1:72:c1:85:d0:fd:39:86:58:37:cf:36:18:7f:a6:f4:28
+-----BEGIN CERTIFICATE-----
+MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO
+TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh
+dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloX
+DTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl
+ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv
+b3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4yolQP
+cPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WW
+IkYFsO2tx1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqX
+xz8ecAgwoNzFs21v0IJyEavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFy
+KJLZWyNtZrVtB0LrpjPOktvA9mxjeM3KTj215VKb8b475lRgsGYeCasH/lSJEULR
+9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUurmkVLoR9BvUhTFXFkC4az
+5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU51nus6+N8
+6U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7
+Ngzp07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHP
+bMk7ccHViLVlvMDoFxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXt
+BznaqB16nzaeErAMZRKQFWDZJkBE41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTt
+XUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMBAAGjQjBAMA8GA1UdEwEB/wQF
+MAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleuyjWcLhL75Lpd
+INyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD
+U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwp
+LiniyMMB8jPqKqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8
+Ipf3YF3qKS9Ysr1YvY2WTxB1v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixp
+gZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA8KCWAg8zxXHzniN9lLf9OtMJgwYh
+/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b8KKaa8MFSu1BYBQw
+0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0rmj1A
+fsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq
+4BZ+Extq1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR
+1VmiiXTTn74eS9fGbbeIJG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/
+QFH1T/U67cjF68IeHRaVesd+QnGTbksVtzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM
+94B7IWcnMFk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden
+# Subject: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden
+# Label: "Staat der Nederlanden EV Root CA"
+# Serial: 10000013
+# MD5 Fingerprint: fc:06:af:7b:e8:1a:f1:9a:b4:e8:d2:70:1f:c0:f5:ba
+# SHA1 Fingerprint: 76:e2:7e:c1:4f:db:82:c1:c0:a6:75:b5:05:be:3d:29:b4:ed:db:bb
+# SHA256 Fingerprint: 4d:24:91:41:4c:fe:95:67:46:ec:4c:ef:a6:cf:6f:72:e2:8a:13:29:43:2f:9d:8a:90:7a:c4:cb:5d:ad:c1:5a
+-----BEGIN CERTIFICATE-----
+MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO
+TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh
+dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y
+MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg
+TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS
+b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS
+M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC
+UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d
+Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p
+rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l
+pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb
+j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC
+KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS
+/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X
+cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH
+1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP
+px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB
+/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7
+MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI
+eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u
+2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS
+v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC
+wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy
+CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e
+vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6
+Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa
+Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL
+eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8
+FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc
+7uzXLg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
+# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
+# Label: "IdenTrust Commercial Root CA 1"
+# Serial: 13298821034946342390520003877796839426
+# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7
+# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25
+# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu
+VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw
+MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw
+JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT
+3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU
++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp
+S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1
+bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi
+T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL
+vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK
+Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK
+dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT
+c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv
+l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N
+iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD
+ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH
+6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt
+LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93
+nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3
++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK
+W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT
+AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq
+l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG
+4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ
+mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A
+7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H
+-----END CERTIFICATE-----
+
+# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust
+# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust
+# Label: "IdenTrust Public Sector Root CA 1"
+# Serial: 13298821034946342390521976156843933698
+# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba
+# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd
+# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f
+-----BEGIN CERTIFICATE-----
+MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu
+VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN
+MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0
+MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7
+ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy
+RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS
+bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF
+/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R
+3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw
+EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy
+9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V
+GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ
+2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV
+WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD
+W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN
+AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj
+t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV
+DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9
+TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G
+lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW
+mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df
+WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5
++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ
+tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA
+GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv
+8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only
+# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only
+# Label: "Entrust Root Certification Authority - G2"
+# Serial: 1246989352
+# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2
+# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4
+# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39
+-----BEGIN CERTIFICATE-----
+MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50
+cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs
+IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz
+dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy
+NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu
+dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt
+dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0
+aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T
+RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN
+cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW
+wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1
+U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0
+jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP
+BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN
+BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/
+jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ
+Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v
+1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R
+nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH
+VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only
+# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only
+# Label: "Entrust Root Certification Authority - EC1"
+# Serial: 51543124481930649114116133369
+# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc
+# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47
+# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5
+-----BEGIN CERTIFICATE-----
+MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG
+A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3
+d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu
+dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq
+RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy
+MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD
+VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0
+L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g
+Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD
+ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi
+A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt
+ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH
+Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O
+BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC
+R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX
+hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G
+-----END CERTIFICATE-----
+
+# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority
+# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority
+# Label: "CFCA EV ROOT"
+# Serial: 407555286
+# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30
+# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83
+# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd
+-----BEGIN CERTIFICATE-----
+MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD
+TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx
+MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j
+aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP
+T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03
+sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL
+TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5
+/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp
+7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz
+EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt
+hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP
+a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot
+aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg
+TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV
+PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv
+cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL
+tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd
+BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB
+ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT
+ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL
+jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS
+ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy
+P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19
+xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d
+Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN
+5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe
+/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z
+AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ
+5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su
+-----END CERTIFICATE-----
+# Issuer: CN=Entrust.net Secure Server Certification Authority O=Entrust.net OU=www.entrust.net/CPS incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Subject: CN=Entrust.net Secure Server Certification Authority O=Entrust.net OU=www.entrust.net/CPS incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Label: "Entrust.net Secure Server CA"
+# Serial: 927650371
+# MD5 Fingerprint: df:f2:80:73:cc:f1:e6:61:73:fc:f5:42:e9:c5:7c:ee
+# SHA1 Fingerprint: 99:a6:9b:e6:1a:fe:88:6b:4d:2b:82:00:7c:b8:54:fc:31:7e:15:39
+# SHA256 Fingerprint: 62:f2:40:27:8c:56:4c:4d:d8:bf:7d:9d:4f:6f:36:6e:a8:94:d2:2f:5f:34:d9:89:a9:83:ac:ec:2f:ff:ed:50
+-----BEGIN CERTIFICATE-----
+MIIE2DCCBEGgAwIBAgIEN0rSQzANBgkqhkiG9w0BAQUFADCBwzELMAkGA1UEBhMC
+VVMxFDASBgNVBAoTC0VudHJ1c3QubmV0MTswOQYDVQQLEzJ3d3cuZW50cnVzdC5u
+ZXQvQ1BTIGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTElMCMGA1UECxMc
+KGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDE6MDgGA1UEAxMxRW50cnVzdC5u
+ZXQgU2VjdXJlIFNlcnZlciBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw05OTA1
+MjUxNjA5NDBaFw0xOTA1MjUxNjM5NDBaMIHDMQswCQYDVQQGEwJVUzEUMBIGA1UE
+ChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5j
+b3JwLiBieSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBF
+bnRydXN0Lm5ldCBMaW1pdGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUg
+U2VydmVyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGdMA0GCSqGSIb3DQEBAQUA
+A4GLADCBhwKBgQDNKIM0VBuJ8w+vN5Ex/68xYMmo6LIQaO2f55M28Qpku0f1BBc/
+I0dNxScZgSYMVHINiC3ZH5oSn7yzcdOAGT9HZnuMNSjSuQrfJNqc1lB5gXpa0zf3
+wkrYKZImZNHkmGw6AIr1NJtl+O3jEP/9uElY3KDegjlrgbEWGWG5VLbmQwIBA6OC
+AdcwggHTMBEGCWCGSAGG+EIBAQQEAwIABzCCARkGA1UdHwSCARAwggEMMIHeoIHb
+oIHYpIHVMIHSMQswCQYDVQQGEwJVUzEUMBIGA1UEChMLRW50cnVzdC5uZXQxOzA5
+BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5jb3JwLiBieSByZWYuIChsaW1p
+dHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBFbnRydXN0Lm5ldCBMaW1pdGVk
+MTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENlcnRpZmljYXRp
+b24gQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMCmgJ6AlhiNodHRwOi8vd3d3LmVu
+dHJ1c3QubmV0L0NSTC9uZXQxLmNybDArBgNVHRAEJDAigA8xOTk5MDUyNTE2MDk0
+MFqBDzIwMTkwNTI1MTYwOTQwWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAU8Bdi
+E1U9s/8KAGv7UISX8+1i0BowHQYDVR0OBBYEFPAXYhNVPbP/CgBr+1CEl/PtYtAa
+MAwGA1UdEwQFMAMBAf8wGQYJKoZIhvZ9B0EABAwwChsEVjQuMAMCBJAwDQYJKoZI
+hvcNAQEFBQADgYEAkNwwAvpkdMKnCqV8IY00F6j7Rw7/JXyNEwr75Ji174z4xRAN
+95K+8cPV1ZVqBLssziY2ZcgxxufuP+NXdYR6Ee9GTxj005i7qIcyunL2POI9n9cd
+2cNgQ4xYDiKWL2KjLB+6rQXvqzJ4h6BUcxm1XAX5Uj5tLUUL9wqT6u0G+bI=
+-----END CERTIFICATE-----
+
+# Issuer: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 2 Policy Validation Authority
+# Subject: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 2 Policy Validation Authority
+# Label: "ValiCert Class 2 VA"
+# Serial: 1
+# MD5 Fingerprint: a9:23:75:9b:ba:49:36:6e:31:c2:db:f2:e7:66:ba:87
+# SHA1 Fingerprint: 31:7a:2a:d0:7f:2b:33:5e:f5:a1:c3:4e:4b:57:e8:b7:d8:f1:fc:a6
+# SHA256 Fingerprint: 58:d0:17:27:9c:d4:dc:63:ab:dd:b1:96:a6:c9:90:6c:30:c4:e0:87:83:ea:e8:c1:60:99:54:d6:93:55:59:6b
+-----BEGIN CERTIFICATE-----
+MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0
+IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz
+BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y
+aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG
+9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMTk1NFoXDTE5MDYy
+NjAwMTk1NFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y
+azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs
+YXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw
+Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl
+cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDOOnHK5avIWZJV16vY
+dA757tn2VUdZZUcOBVXc65g2PFxTXdMwzzjsvUGJ7SVCCSRrCl6zfN1SLUzm1NZ9
+WlmpZdRJEy0kTRxQb7XBhVQ7/nHk01xC+YDgkRoKWzk2Z/M/VXwbP7RfZHM047QS
+v4dk+NoS/zcnwbNDu+97bi5p9wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBADt/UG9v
+UJSZSWI4OB9L+KXIPqeCgfYrx+jFzug6EILLGACOTb2oWH+heQC1u+mNr0HZDzTu
+IYEZoDJJKPTEjlbVUjP9UNV+mWwD5MlM/Mtsq2azSiGM5bUMMj4QssxsodyamEwC
+W/POuZ6lcg5Ktz885hZo+L7tdEy8W9ViH0Pd
+-----END CERTIFICATE-----
+
+# Issuer: CN=NetLock Expressz (Class C) Tanusitvanykiado O=NetLock Halozatbiztonsagi Kft. OU=Tanusitvanykiadok
+# Subject: CN=NetLock Expressz (Class C) Tanusitvanykiado O=NetLock Halozatbiztonsagi Kft. OU=Tanusitvanykiadok
+# Label: "NetLock Express (Class C) Root"
+# Serial: 104
+# MD5 Fingerprint: 4f:eb:f1:f0:70:c2:80:63:5d:58:9f:da:12:3c:a9:c4
+# SHA1 Fingerprint: e3:92:51:2f:0a:cf:f5:05:df:f6:de:06:7f:75:37:e1:65:ea:57:4b
+# SHA256 Fingerprint: 0b:5e:ed:4e:84:64:03:cf:55:e0:65:84:84:40:ed:2a:82:75:8b:f5:b9:aa:1f:25:3d:46:13:cf:a0:80:ff:3f
+-----BEGIN CERTIFICATE-----
+MIIFTzCCBLigAwIBAgIBaDANBgkqhkiG9w0BAQQFADCBmzELMAkGA1UEBhMCSFUx
+ETAPBgNVBAcTCEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0
+b25zYWdpIEtmdC4xGjAYBgNVBAsTEVRhbnVzaXR2YW55a2lhZG9rMTQwMgYDVQQD
+EytOZXRMb2NrIEV4cHJlc3N6IChDbGFzcyBDKSBUYW51c2l0dmFueWtpYWRvMB4X
+DTk5MDIyNTE0MDgxMVoXDTE5MDIyMDE0MDgxMVowgZsxCzAJBgNVBAYTAkhVMREw
+DwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6dG9u
+c2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE0MDIGA1UEAxMr
+TmV0TG9jayBFeHByZXNzeiAoQ2xhc3MgQykgVGFudXNpdHZhbnlraWFkbzCBnzAN
+BgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA6+ywbGGKIyWvYCDj2Z/8kwvbXY2wobNA
+OoLO/XXgeDIDhlqGlZHtU/qdQPzm6N3ZW3oDvV3zOwzDUXmbrVWg6dADEK8KuhRC
+2VImESLH0iDMgqSaqf64gXadarfSNnU+sYYJ9m5tfk63euyucYT2BDMIJTLrdKwW
+RMbkQJMdf60CAwEAAaOCAp8wggKbMBIGA1UdEwEB/wQIMAYBAf8CAQQwDgYDVR0P
+AQH/BAQDAgAGMBEGCWCGSAGG+EIBAQQEAwIABzCCAmAGCWCGSAGG+EIBDQSCAlEW
+ggJNRklHWUVMRU0hIEV6ZW4gdGFudXNpdHZhbnkgYSBOZXRMb2NrIEtmdC4gQWx0
+YWxhbm9zIFN6b2xnYWx0YXRhc2kgRmVsdGV0ZWxlaWJlbiBsZWlydCBlbGphcmFz
+b2sgYWxhcGphbiBrZXN6dWx0LiBBIGhpdGVsZXNpdGVzIGZvbHlhbWF0YXQgYSBO
+ZXRMb2NrIEtmdC4gdGVybWVrZmVsZWxvc3NlZy1iaXp0b3NpdGFzYSB2ZWRpLiBB
+IGRpZ2l0YWxpcyBhbGFpcmFzIGVsZm9nYWRhc2FuYWsgZmVsdGV0ZWxlIGF6IGVs
+b2lydCBlbGxlbm9yemVzaSBlbGphcmFzIG1lZ3RldGVsZS4gQXogZWxqYXJhcyBs
+ZWlyYXNhIG1lZ3RhbGFsaGF0byBhIE5ldExvY2sgS2Z0LiBJbnRlcm5ldCBob25s
+YXBqYW4gYSBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIGNpbWVuIHZhZ3kg
+a2VyaGV0byBheiBlbGxlbm9yemVzQG5ldGxvY2submV0IGUtbWFpbCBjaW1lbi4g
+SU1QT1JUQU5UISBUaGUgaXNzdWFuY2UgYW5kIHRoZSB1c2Ugb2YgdGhpcyBjZXJ0
+aWZpY2F0ZSBpcyBzdWJqZWN0IHRvIHRoZSBOZXRMb2NrIENQUyBhdmFpbGFibGUg
+YXQgaHR0cHM6Ly93d3cubmV0bG9jay5uZXQvZG9jcyBvciBieSBlLW1haWwgYXQg
+Y3BzQG5ldGxvY2submV0LjANBgkqhkiG9w0BAQQFAAOBgQAQrX/XDDKACtiG8XmY
+ta3UzbM2xJZIwVzNmtkFLp++UOv0JhQQLdRmF/iewSf98e3ke0ugbLWrmldwpu2g
+pO0u9f38vf5NNwgMvOOWgyL1SRt/Syu0VMGAfJlOHdCM7tCs5ZL6dVb+ZKATj7i4
+Fp1hBWeAyNDYpQcCNJgEjTME1A==
+-----END CERTIFICATE-----
+
+# Issuer: CN=NetLock Uzleti (Class B) Tanusitvanykiado O=NetLock Halozatbiztonsagi Kft. OU=Tanusitvanykiadok
+# Subject: CN=NetLock Uzleti (Class B) Tanusitvanykiado O=NetLock Halozatbiztonsagi Kft. OU=Tanusitvanykiadok
+# Label: "NetLock Business (Class B) Root"
+# Serial: 105
+# MD5 Fingerprint: 39:16:aa:b9:6a:41:e1:14:69:df:9e:6c:3b:72:dc:b6
+# SHA1 Fingerprint: 87:9f:4b:ee:05:df:98:58:3b:e3:60:d6:33:e7:0d:3f:fe:98:71:af
+# SHA256 Fingerprint: 39:df:7b:68:2b:7b:93:8f:84:71:54:81:cc:de:8d:60:d8:f2:2e:c5:98:87:7d:0a:aa:c1:2b:59:18:2b:03:12
+-----BEGIN CERTIFICATE-----
+MIIFSzCCBLSgAwIBAgIBaTANBgkqhkiG9w0BAQQFADCBmTELMAkGA1UEBhMCSFUx
+ETAPBgNVBAcTCEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0
+b25zYWdpIEtmdC4xGjAYBgNVBAsTEVRhbnVzaXR2YW55a2lhZG9rMTIwMAYDVQQD
+EylOZXRMb2NrIFV6bGV0aSAoQ2xhc3MgQikgVGFudXNpdHZhbnlraWFkbzAeFw05
+OTAyMjUxNDEwMjJaFw0xOTAyMjAxNDEwMjJaMIGZMQswCQYDVQQGEwJIVTERMA8G
+A1UEBxMIQnVkYXBlc3QxJzAlBgNVBAoTHk5ldExvY2sgSGFsb3phdGJpenRvbnNh
+Z2kgS2Z0LjEaMBgGA1UECxMRVGFudXNpdHZhbnlraWFkb2sxMjAwBgNVBAMTKU5l
+dExvY2sgVXpsZXRpIChDbGFzcyBCKSBUYW51c2l0dmFueWtpYWRvMIGfMA0GCSqG
+SIb3DQEBAQUAA4GNADCBiQKBgQCx6gTsIKAjwo84YM/HRrPVG/77uZmeBNwcf4xK
+gZjupNTKihe5In+DCnVMm8Bp2GQ5o+2So/1bXHQawEfKOml2mrriRBf8TKPV/riX
+iK+IA4kfpPIEPsgHC+b5sy96YhQJRhTKZPWLgLViqNhr1nGTLbO/CVRY7QbrqHvc
+Q7GhaQIDAQABo4ICnzCCApswEgYDVR0TAQH/BAgwBgEB/wIBBDAOBgNVHQ8BAf8E
+BAMCAAYwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaCAk1G
+SUdZRUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFu
+b3MgU3pvbGdhbHRhdGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBh
+bGFwamFuIGtlc3p1bHQuIEEgaGl0ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExv
+Y2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2VnLWJpenRvc2l0YXNhIHZlZGkuIEEgZGln
+aXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0ZXRlbGUgYXogZWxvaXJ0
+IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFzIGxlaXJh
+c2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGph
+biBhIGh0dHBzOi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJo
+ZXRvIGF6IGVsbGVub3J6ZXNAbmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBP
+UlRBTlQhIFRoZSBpc3N1YW5jZSBhbmQgdGhlIHVzZSBvZiB0aGlzIGNlcnRpZmlj
+YXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sgQ1BTIGF2YWlsYWJsZSBhdCBo
+dHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFpbCBhdCBjcHNA
+bmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4GBAATbrowXr/gOkDFOzT4JwG06
+sPgzTEdM43WIEJessDgVkcYplswhwG08pXTP2IKlOcNl40JwuyKQ433bNXbhoLXa
+n3BukxowOR0w2y7jfLKRstE3Kfq51hdcR0/jHTjrn9V7lagonhVK0dHQKwCXoOKS
+NitjrFgBazMpUIaD8QFI
+-----END CERTIFICATE-----
+
+# Issuer: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 3 Policy Validation Authority
+# Subject: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 3 Policy Validation Authority
+# Label: "RSA Root Certificate 1"
+# Serial: 1
+# MD5 Fingerprint: a2:6f:53:b7:ee:40:db:4a:68:e7:fa:18:d9:10:4b:72
+# SHA1 Fingerprint: 69:bd:8c:f4:9c:d3:00:fb:59:2e:17:93:ca:55:6a:f3:ec:aa:35:fb
+# SHA256 Fingerprint: bc:23:f9:8a:31:3c:b9:2d:e3:bb:fc:3a:5a:9f:44:61:ac:39:49:4c:4a:e1:5a:9e:9d:f1:31:e9:9b:73:01:9a
+-----BEGIN CERTIFICATE-----
+MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0
+IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz
+BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y
+aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG
+9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMjIzM1oXDTE5MDYy
+NjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y
+azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs
+YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw
+Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl
+cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDjmFGWHOjVsQaBalfD
+cnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td3zZxFJmP3MKS8edgkpfs
+2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89HBFx1cQqY
+JJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliE
+Zwgs3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJ
+n0WuPIqpsHEzXcjFV9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/A
+PhmcGcwTTYJBtYze4D1gCCAPRX5ron+jjBXu
+-----END CERTIFICATE-----
+
+# Issuer: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 1 Policy Validation Authority
+# Subject: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 1 Policy Validation Authority
+# Label: "ValiCert Class 1 VA"
+# Serial: 1
+# MD5 Fingerprint: 65:58:ab:15:ad:57:6c:1e:a8:a7:b5:69:ac:bf:ff:eb
+# SHA1 Fingerprint: e5:df:74:3c:b6:01:c4:9b:98:43:dc:ab:8c:e8:6a:81:10:9f:e4:8e
+# SHA256 Fingerprint: f4:c1:49:55:1a:30:13:a3:5b:c7:bf:fe:17:a7:f3:44:9b:c1:ab:5b:5a:0a:e7:4b:06:c2:3b:90:00:4c:01:04
+-----BEGIN CERTIFICATE-----
+MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0
+IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz
+BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y
+aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG
+9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNTIyMjM0OFoXDTE5MDYy
+NTIyMjM0OFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y
+azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs
+YXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw
+Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl
+cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDYWYJ6ibiWuqYvaG9Y
+LqdUHAZu9OqNSLwxlBfw8068srg1knaw0KWlAdcAAxIiGQj4/xEjm84H9b9pGib+
+TunRf50sQB1ZaG6m+FiwnRqP0z/x3BkGgagO4DrdyFNFCQbmD3DD+kCmDuJWBQ8Y
+TfwggtFzVXSNdnKgHZ0dwN0/cQIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFBoPUn0
+LBwGlN+VYH+Wexf+T3GtZMjdd9LvWVXoP+iOBSoh8gfStadS/pyxtuJbdxdA6nLW
+I8sogTLDAHkY7FkXicnGah5xyf23dKUlRWnFSKsZ4UWKJWsZ7uW7EvV/96aNUcPw
+nXS3qT6gpf+2SQMT2iLM7XGCK5nPOrf1LXLI
+-----END CERTIFICATE-----
+
+# Issuer: CN=Equifax Secure eBusiness CA-1 O=Equifax Secure Inc.
+# Subject: CN=Equifax Secure eBusiness CA-1 O=Equifax Secure Inc.
+# Label: "Equifax Secure eBusiness CA 1"
+# Serial: 4
+# MD5 Fingerprint: 64:9c:ef:2e:44:fc:c6:8f:52:07:d0:51:73:8f:cb:3d
+# SHA1 Fingerprint: da:40:18:8b:91:89:a3:ed:ee:ae:da:97:fe:2f:9d:f5:b7:d1:8a:41
+# SHA256 Fingerprint: cf:56:ff:46:a4:a1:86:10:9d:d9:65:84:b5:ee:b5:8a:51:0c:42:75:b0:e5:f9:4f:40:bb:ae:86:5e:19:f6:73
+-----BEGIN CERTIFICATE-----
+MIICgjCCAeugAwIBAgIBBDANBgkqhkiG9w0BAQQFADBTMQswCQYDVQQGEwJVUzEc
+MBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBT
+ZWN1cmUgZUJ1c2luZXNzIENBLTEwHhcNOTkwNjIxMDQwMDAwWhcNMjAwNjIxMDQw
+MDAwWjBTMQswCQYDVQQGEwJVUzEcMBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5j
+LjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNzIENBLTEwgZ8wDQYJ
+KoZIhvcNAQEBBQADgY0AMIGJAoGBAM4vGbwXt3fek6lfWg0XTzQaDJj0ItlZ1MRo
+RvC0NcWFAyDGr0WlIVFFQesWWDYyb+JQYmT5/VGcqiTZ9J2DKocKIdMSODRsjQBu
+WqDZQu4aIZX5UkxVWsUPOE9G+m34LjXWHXzr4vCwdYDIqROsvojvOm6rXyo4YgKw
+Env+j6YDAgMBAAGjZjBkMBEGCWCGSAGG+EIBAQQEAwIABzAPBgNVHRMBAf8EBTAD
+AQH/MB8GA1UdIwQYMBaAFEp4MlIR21kWNl7fwRQ2QGpHfEyhMB0GA1UdDgQWBBRK
+eDJSEdtZFjZe38EUNkBqR3xMoTANBgkqhkiG9w0BAQQFAAOBgQB1W6ibAxHm6VZM
+zfmpTMANmvPMZWnmJXbMWbfWVMMdzZmsGd20hdXgPfxiIKeES1hl8eL5lSE/9dR+
+WB5Hh1Q+WKG1tfgq73HnvMP2sUlG4tega+VWeponmHxGYhTnyfxuAxJ5gDgdSIKN
+/Bf+KpYrtWKmpj29f5JZzVoqgrI3eQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Equifax Secure Global eBusiness CA-1 O=Equifax Secure Inc.
+# Subject: CN=Equifax Secure Global eBusiness CA-1 O=Equifax Secure Inc.
+# Label: "Equifax Secure Global eBusiness CA"
+# Serial: 1
+# MD5 Fingerprint: 8f:5d:77:06:27:c4:98:3c:5b:93:78:e7:d7:7d:9b:cc
+# SHA1 Fingerprint: 7e:78:4a:10:1c:82:65:cc:2d:e1:f1:6d:47:b4:40:ca:d9:0a:19:45
+# SHA256 Fingerprint: 5f:0b:62:ea:b5:e3:53:ea:65:21:65:16:58:fb:b6:53:59:f4:43:28:0a:4a:fb:d1:04:d7:7d:10:f9:f0:4c:07
+-----BEGIN CERTIFICATE-----
+MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJVUzEc
+MBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1aWZheCBT
+ZWN1cmUgR2xvYmFsIGVCdXNpbmVzcyBDQS0xMB4XDTk5MDYyMTA0MDAwMFoXDTIw
+MDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0VxdWlmYXggU2Vj
+dXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJlIEdsb2JhbCBlQnVzaW5l
+c3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAuucXkAJlsTRVPEnC
+UdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQytd4zjTov2/KaelpzmKNc6fuKcxtc
+58O/gGzNqfTWK8D3+ZmqY6KxRwIP1ORROhI8bIpaVIRw28HFkM9yRcuoWcDNM50/
+o5brhTMhHD4ePmBudpxnhcXIw2ECAwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAH
+MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUvqigdHJQa0S3ySPY+6j/s1dr
+aGwwHQYDVR0OBBYEFL6ooHRyUGtEt8kj2Puo/7NXa2hsMA0GCSqGSIb3DQEBBAUA
+A4GBADDiAVGqx+pf2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okENI7SS+RkA
+Z70Br83gcfxaz2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv
+8qIYNMR1pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV
+-----END CERTIFICATE-----
+
+# Issuer: CN=Thawte Premium Server CA O=Thawte Consulting cc OU=Certification Services Division
+# Subject: CN=Thawte Premium Server CA O=Thawte Consulting cc OU=Certification Services Division
+# Label: "Thawte Premium Server CA"
+# Serial: 1
+# MD5 Fingerprint: 06:9f:69:79:16:66:90:02:1b:8c:8c:a2:c3:07:6f:3a
+# SHA1 Fingerprint: 62:7f:8d:78:27:65:63:99:d2:7d:7f:90:44:c9:fe:b3:f3:3e:fa:9a
+# SHA256 Fingerprint: ab:70:36:36:5c:71:54:aa:29:c2:c2:9f:5d:41:91:16:3b:16:2a:22:25:01:13:57:d5:6d:07:ff:a7:bc:1f:72
+-----BEGIN CERTIFICATE-----
+MIIDJzCCApCgAwIBAgIBATANBgkqhkiG9w0BAQQFADCBzjELMAkGA1UEBhMCWkEx
+FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYD
+VQQKExRUaGF3dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlv
+biBTZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UEAxMYVGhhd3RlIFByZW1pdW0gU2Vy
+dmVyIENBMSgwJgYJKoZIhvcNAQkBFhlwcmVtaXVtLXNlcnZlckB0aGF3dGUuY29t
+MB4XDTk2MDgwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgc4xCzAJBgNVBAYTAlpB
+MRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEdMBsG
+A1UEChMUVGhhd3RlIENvbnN1bHRpbmcgY2MxKDAmBgNVBAsTH0NlcnRpZmljYXRp
+b24gU2VydmljZXMgRGl2aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQcmVtaXVtIFNl
+cnZlciBDQTEoMCYGCSqGSIb3DQEJARYZcHJlbWl1bS1zZXJ2ZXJAdGhhd3RlLmNv
+bTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA0jY2aovXwlue2oFBYo847kkE
+VdbQ7xwblRZH7xhINTpS9CtqBo87L+pW46+GjZ4X9560ZXUCTe/LCaIhUdib0GfQ
+ug2SBhRz1JPLlyoAnFxODLz6FVL88kRu2hFKbgifLy3j+ao6hnO2RlNYyIkFvYMR
+uHM/qgeN9EJN50CdHDcCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG
+9w0BAQQFAAOBgQAmSCwWwlj66BZ0DKqqX1Q/8tfJeGBeXm43YyJ3Nn6yF8Q0ufUI
+hfzJATj/Tb7yFkJD57taRvvBxhEf8UqwKEbJw8RCfbz6q1lu1bdRiBHjpIUZa4JM
+pAwSremkrj/xw0llmozFyD4lt5SZu5IycQfwhl7tUCemDaYj+bvLpgcUQg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Thawte Server CA O=Thawte Consulting cc OU=Certification Services Division
+# Subject: CN=Thawte Server CA O=Thawte Consulting cc OU=Certification Services Division
+# Label: "Thawte Server CA"
+# Serial: 1
+# MD5 Fingerprint: c5:70:c4:a2:ed:53:78:0c:c8:10:53:81:64:cb:d0:1d
+# SHA1 Fingerprint: 23:e5:94:94:51:95:f2:41:48:03:b4:d5:64:d2:a3:a3:f5:d8:8b:8c
+# SHA256 Fingerprint: b4:41:0b:73:e2:e6:ea:ca:47:fb:c4:2f:8f:a4:01:8a:f4:38:1d:c5:4c:fa:a8:44:50:46:1e:ed:09:45:4d:e9
+-----BEGIN CERTIFICATE-----
+MIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkEx
+FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYD
+VQQKExRUaGF3dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlv
+biBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEm
+MCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wHhcNOTYwODAx
+MDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT
+DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3
+dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNl
+cyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3
+DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQAD
+gY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl/Kj0R1HahbUgdJSGHg91
+yekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg71CcEJRCX
+L+eQbcAoQpnXTEPew/UhbVSfXcNY4cDk2VuwuNy0e982OsK1ZiIS1ocNAgMBAAGj
+EzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEEBQADgYEAB/pMaVz7lcxG
+7oWDTSEwjsrZqG9JGubaUeNgcGyEYRGhGshIPllDfU+VPaGLtwtimHp1it2ITk6e
+QNuozDJ0uW8NxuOzRAvZim+aKZuZGCg70eNAKJpaPNW15yAbi8qkq43pUdniTCxZ
+qdq5snUb9kLy78fyGPmJvKP/iiMucEc=
+-----END CERTIFICATE-----
+
+# Issuer: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority
+# Subject: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority
+# Label: "Verisign Class 3 Public Primary Certification Authority"
+# Serial: 149843929435818692848040365716851702463
+# MD5 Fingerprint: 10:fc:63:5d:f6:26:3e:0d:f3:25:be:5f:79:cd:67:67
+# SHA1 Fingerprint: 74:2c:31:92:e6:07:e4:24:eb:45:49:54:2b:e1:bb:c5:3e:61:74:e2
+# SHA256 Fingerprint: e7:68:56:34:ef:ac:f6:9a:ce:93:9a:6b:25:5b:7b:4f:ab:ef:42:93:5b:50:a2:65:ac:b5:cb:60:27:e4:4e:70
+-----BEGIN CERTIFICATE-----
+MIICPDCCAaUCEHC65B0Q2Sk0tjjKewPMur8wDQYJKoZIhvcNAQECBQAwXzELMAkG
+A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFz
+cyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2
+MDEyOTAwMDAwMFoXDTI4MDgwMTIzNTk1OVowXzELMAkGA1UEBhMCVVMxFzAVBgNV
+BAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmlt
+YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUAA4GN
+ADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhE
+BarsAx94f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/is
+I19wKTakyYbnsZogy1Olhec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0G
+CSqGSIb3DQEBAgUAA4GBALtMEivPLCYATxQT3ab7/AoRhIzzKBxnki98tsX63/Do
+lbwdj2wsqFHMc9ikwFPwTtYmwHYBV4GSXiHx0bH/59AhWM1pF+NEHJwZRDmJXNyc
+AA9WjQKZ7aKQRUzkuxCkPfAyAw7xzvjoyVGM5mKf5p/AfbdynMk2OmufTqj/ZA1k
+-----END CERTIFICATE-----
+
+# Issuer: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority
+# Subject: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority
+# Label: "Verisign Class 3 Public Primary Certification Authority"
+# Serial: 80507572722862485515306429940691309246
+# MD5 Fingerprint: ef:5a:f1:33:ef:f1:cd:bb:51:02:ee:12:14:4b:96:c4
+# SHA1 Fingerprint: a1:db:63:93:91:6f:17:e4:18:55:09:40:04:15:c7:02:40:b0:ae:6b
+# SHA256 Fingerprint: a4:b6:b3:99:6f:c2:f3:06:b3:fd:86:81:bd:63:41:3d:8c:50:09:cc:4f:a3:29:c2:cc:f0:e2:fa:1b:14:03:05
+-----BEGIN CERTIFICATE-----
+MIICPDCCAaUCEDyRMcsf9tAbDpq40ES/Er4wDQYJKoZIhvcNAQEFBQAwXzELMAkG
+A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFz
+cyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2
+MDEyOTAwMDAwMFoXDTI4MDgwMjIzNTk1OVowXzELMAkGA1UEBhMCVVMxFzAVBgNV
+BAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmlt
+YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUAA4GN
+ADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhE
+BarsAx94f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/is
+I19wKTakyYbnsZogy1Olhec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0G
+CSqGSIb3DQEBBQUAA4GBABByUqkFFBkyCEHwxWsKzH4PIRnN5GfcX6kb5sroc50i
+2JhucwNhkcV8sEVAbkSdjbCxlnRhLQ2pRdKkkirWmnWXbj9T/UWZYB2oK0z5XqcJ
+2HUw19JlYD1n1khVdWk/kfVIC0dpImmClr7JyDiGSnoscxlIaU5rfGW/D/xwzoiQ
+-----END CERTIFICATE-----
+
+# Issuer: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority - G2/(c) 1998 VeriSign, Inc. - For authorized use only/VeriSign Trust Network
+# Subject: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority - G2/(c) 1998 VeriSign, Inc. - For authorized use only/VeriSign Trust Network
+# Label: "Verisign Class 3 Public Primary Certification Authority - G2"
+# Serial: 167285380242319648451154478808036881606
+# MD5 Fingerprint: a2:33:9b:4c:74:78:73:d4:6c:e7:c1:f3:8d:cb:5c:e9
+# SHA1 Fingerprint: 85:37:1c:a6:e5:50:14:3d:ce:28:03:47:1b:de:3a:09:e8:f8:77:0f
+# SHA256 Fingerprint: 83:ce:3c:12:29:68:8a:59:3d:48:5f:81:97:3c:0f:91:95:43:1e:da:37:cc:5e:36:43:0e:79:c7:a8:88:63:8b
+-----BEGIN CERTIFICATE-----
+MIIDAjCCAmsCEH3Z/gfPqB63EHln+6eJNMYwDQYJKoZIhvcNAQEFBQAwgcExCzAJ
+BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xh
+c3MgMyBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcy
+MTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3Jp
+emVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMB4X
+DTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVTMRcw
+FQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMg
+UHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEo
+YykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5
+MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEB
+AQUAA4GNADCBiQKBgQDMXtERXVxp0KvTuWpMmR9ZmDCOFoUgRm1HP9SFIIThbbP4
+pO0M8RcPO/mn+SXXwc+EY/J8Y8+iR/LGWzOOZEAEaMGAuWQcRXfH2G71lSk8UOg0
+13gfqLptQ5GVj0VXXn7F+8qkBOvqlzdUMG+7AUcyM83cV5tkaWH4mx0ciU9cZwID
+AQABMA0GCSqGSIb3DQEBBQUAA4GBAFFNzb5cy5gZnBWyATl4Lk0PZ3BwmcYQWpSk
+U01UbSuvDV1Ai2TT1+7eVmGSX6bEHRBhNtMsJzzoKQm5EWR0zLVznxxIqbxhAe7i
+F6YM40AIOw7n60RzKprxaZLvcRTDOaxxp5EJb+RxBrO6WVcmeQD2+A2iMzAo1KpY
+oJ2daZH9
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTE CyberTrust Global Root O=GTE Corporation OU=GTE CyberTrust Solutions, Inc.
+# Subject: CN=GTE CyberTrust Global Root O=GTE Corporation OU=GTE CyberTrust Solutions, Inc.
+# Label: "GTE CyberTrust Global Root"
+# Serial: 421
+# MD5 Fingerprint: ca:3d:d3:68:f1:03:5c:d0:32:fa:b8:2b:59:e8:5a:db
+# SHA1 Fingerprint: 97:81:79:50:d8:1c:96:70:cc:34:d8:09:cf:79:44:31:36:7e:f4:74
+# SHA256 Fingerprint: a5:31:25:18:8d:21:10:aa:96:4b:02:c7:b7:c6:da:32:03:17:08:94:e5:fb:71:ff:fb:66:67:d5:e6:81:0a:36
+-----BEGIN CERTIFICATE-----
+MIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYD
+VQQKEw9HVEUgQ29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNv
+bHV0aW9ucywgSW5jLjEjMCEGA1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJv
+b3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEzMjM1OTAwWjB1MQswCQYDVQQGEwJV
+UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU
+cnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0IEds
+b2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrH
+iM3dFw4usJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTS
+r41tiGeA5u2ylc9yMcqlHHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X4
+04Wqk2kmhXBIgD8SFcd5tB8FLztimQIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAG3r
+GwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMWM4ETCJ57NE7fQMh017l9
+3PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OFNMQkpw0P
+lZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIGCDCCA/CgAwIBAgIQKy5u6tl1NmwUim7bo3yMBzANBgkqhkiG9w0BAQwFADCB
+hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV
+BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTQwMjEy
+MDAwMDAwWhcNMjkwMjExMjM1OTU5WjCBkDELMAkGA1UEBhMCR0IxGzAZBgNVBAgT
+EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR
+Q09NT0RPIENBIExpbWl0ZWQxNjA0BgNVBAMTLUNPTU9ETyBSU0EgRG9tYWluIFZh
+bGlkYXRpb24gU2VjdXJlIFNlcnZlciBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBAI7CAhnhoFmk6zg1jSz9AdDTScBkxwtiBUUWOqigwAwCfx3M28Sh
+bXcDow+G+eMGnD4LgYqbSRutA776S9uMIO3Vzl5ljj4Nr0zCsLdFXlIvNN5IJGS0
+Qa4Al/e+Z96e0HqnU4A7fK31llVvl0cKfIWLIpeNs4TgllfQcBhglo/uLQeTnaG6
+ytHNe+nEKpooIZFNb5JPJaXyejXdJtxGpdCsWTWM/06RQ1A/WZMebFEh7lgUq/51
+UHg+TLAchhP6a5i84DuUHoVS3AOTJBhuyydRReZw3iVDpA3hSqXttn7IzW3uLh0n
+c13cRTCAquOyQQuvvUSH2rnlG51/ruWFgqUCAwEAAaOCAWUwggFhMB8GA1UdIwQY
+MBaAFLuvfgI9+qbxPISOre44mOzZMjLUMB0GA1UdDgQWBBSQr2o6lFoL2JDqElZz
+30O0Oija5zAOBgNVHQ8BAf8EBAMCAYYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNV
+HSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwGwYDVR0gBBQwEjAGBgRVHSAAMAgG
+BmeBDAECATBMBgNVHR8ERTBDMEGgP6A9hjtodHRwOi8vY3JsLmNvbW9kb2NhLmNv
+bS9DT01PRE9SU0FDZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDBxBggrBgEFBQcB
+AQRlMGMwOwYIKwYBBQUHMAKGL2h0dHA6Ly9jcnQuY29tb2RvY2EuY29tL0NPTU9E
+T1JTQUFkZFRydXN0Q0EuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5jb21v
+ZG9jYS5jb20wDQYJKoZIhvcNAQEMBQADggIBAE4rdk+SHGI2ibp3wScF9BzWRJ2p
+mj6q1WZmAT7qSeaiNbz69t2Vjpk1mA42GHWx3d1Qcnyu3HeIzg/3kCDKo2cuH1Z/
+e+FE6kKVxF0NAVBGFfKBiVlsit2M8RKhjTpCipj4SzR7JzsItG8kO3KdY3RYPBps
+P0/HEZrIqPW1N+8QRcZs2eBelSaz662jue5/DJpmNXMyYE7l3YphLG5SEXdoltMY
+dVEVABt0iN3hxzgEQyjpFv3ZBdRdRydg1vs4O2xyopT4Qhrf7W8GjEXCBgCq5Ojc
+2bXhc3js9iPc0d1sjhqPpepUfJa3w/5Vjo1JXvxku88+vZbrac2/4EjxYoIQ5QxG
+V/Iz2tDIY+3GH5QFlkoakdH368+PUq4NCNk+qKBR6cGHdNXJ93SrLlP7u3r7l+L4
+HyaPs9Kg4DdbKDsx5Q5XLVq4rXmsXiBmGqW5prU5wfWYQ//u+aen/e7KJD2AFsQX
+j4rBYKEMrltDR5FL1ZoXX/nUh8HCjLfn4g8wGTeGrODcQgPmlKidrv0PJFGUzpII
+0fxQ8ANAe4hZ7Q7drNJ3gjTcBpUC2JD5Leo31Rpg0Gcg19hCC0Wvgmje3WYkN5Ap
+lBlGGSW4gNfL1IYoakRwJiNiqZ+Gb7+6kHDSVneFeO/qJakXzlByjAA6quPbYzSf
++AZxAeKCINT+b72x
+-----END CERTIFICATE-----
+"""
diff --git a/conans/util/windows.py b/conans/util/windows.py
index 72112433c..e67448a10 100644
--- a/conans/util/windows.py
+++ b/conans/util/windows.py
@@ -1,4 +1,6 @@
import os
+import subprocess
+
from conans.util.files import load, mkdir, save, rmdir
import tempfile
@@ -48,6 +50,16 @@ def path_shortener(path, short_paths):
drive = os.path.splitdrive(path)[0]
short_home = drive + "/.conan"
mkdir(short_home)
+
+ # Workaround for short_home living in NTFS file systems. Give full control permission to current user to avoid
+ # access problems in cygwin/msys2 windows subsystems when using short_home folder
+ try:
+ cmd = r'cacls %s /E /G "%s\%s":F' % (short_home, os.environ['USERDOMAIN'], os.environ['USERNAME'])
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT) # Ignoring any returned output, make command quiet
+ except subprocess.CalledProcessError:
+ # cmd can fail if trying to set ACL in non NTFS drives, ignoring it.
+ pass
+
redirect = tempfile.mkdtemp(dir=short_home, prefix="")
# This "1" is the way to have a non-existing directory, so commands like
# shutil.copytree() to it, works. It can be removed without compromising the
| conan user -r=remote shows all the user, not the one for the given remote
To help us debug your issue please explain:
- [x] I've specified the Conan version, operating system version and any tool that can be relevant.
- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
Conan 1.0
Steps: Define several remotes, type ``conan user -r=asdasdas`` (whatever remote name). The remote name is not checked at all.
| conan-io/conan | diff --git a/conans/test/command/download_test.py b/conans/test/command/download_test.py
index 247055065..06c5f3023 100644
--- a/conans/test/command/download_test.py
+++ b/conans/test/command/download_test.py
@@ -7,6 +7,47 @@ from conans.util.files import load
class DownloadTest(unittest.TestCase):
+ def download_recipe_test(self):
+ server = TestServer()
+ servers = {"default": server}
+ client = TestClient(servers=servers, users={"default": [("lasote", "mypass")]})
+
+ # Test argument --package and --recipe cannot be together
+ error = client.run("download eigen/3.3.4@conan/stable --recipe --package fake_id",
+ ignore_error=True)
+ self.assertTrue(error)
+ self.assertIn("ERROR: recipe parameter cannot be used together with package", client.out)
+
+ # Test download of the recipe only
+ conanfile = """from conans import ConanFile
+class Pkg(ConanFile):
+ name = "pkg"
+ version = "0.1"
+ exports_sources = "*"
+"""
+ client.save({"conanfile.py": conanfile,
+ "file.h": "myfile.h"})
+ client.run("create . lasote/stable")
+ ref = ConanFileReference.loads("pkg/0.1@lasote/stable")
+ self.assertTrue(os.path.exists(client.paths.conanfile(ref)))
+ conan = client.client_cache.conan(ref)
+ self.assertTrue(os.path.exists(os.path.join(conan, "package")))
+ client.run("upload pkg/0.1@lasote/stable --all")
+ client.run("remove pkg/0.1@lasote/stable -f")
+ self.assertFalse(os.path.exists(client.paths.export(ref)))
+ client.run("download pkg/0.1@lasote/stable --recipe")
+
+ self.assertIn("Downloading conanfile.py", client.out)
+ self.assertNotIn("Downloading conan_sources.tgz", client.out)
+ self.assertNotIn("Downloading conan_package.tgz", client.out)
+ export = client.client_cache.export(ref)
+ self.assertTrue(os.path.exists(os.path.join(export, "conanfile.py")))
+ self.assertEqual(conanfile, load(os.path.join(export, "conanfile.py")))
+ source = client.client_cache.export_sources(ref)
+ self.assertFalse(os.path.exists(os.path.join(source, "file.h")))
+ conan = client.client_cache.conan(ref)
+ self.assertFalse(os.path.exists(os.path.join(conan, "package")))
+
def download_with_sources_test(self):
server = TestServer()
servers = {"default": server,
diff --git a/conans/test/command/info_folders_test.py b/conans/test/command/info_folders_test.py
index 937b7b01c..0ac662411 100644
--- a/conans/test/command/info_folders_test.py
+++ b/conans/test/command/info_folders_test.py
@@ -2,6 +2,7 @@ import unittest
import os
import platform
import re
+import subprocess
from conans import tools
from conans.test.utils.tools import TestClient
@@ -142,6 +143,48 @@ class InfoFoldersTest(unittest.TestCase):
self.assertFalse(os.path.exists(path))
self.assertTrue(os.path.exists(os.path.dirname(path)))
+ def test_short_paths_home_set_acl(self):
+ """
+ When CONAN_USER_HOME_SHORT is living in NTFS file systems, current user needs to be
+ granted with full control permission to avoid access problems when cygwin/msys2 windows subsystems
+ are mounting/using that folder.
+ """
+ if platform.system() != "Windows":
+ return
+
+ folder = temp_folder(False) # Creates a temporary folder in %HOME%\appdata\local\temp
+ short_folder = os.path.join(folder, ".cnacls")
+
+ self.assertFalse(os.path.exists(short_folder), "short_folder: %s shouldn't exists" % short_folder)
+ os.makedirs(short_folder)
+
+ current_domain = os.environ['USERDOMAIN']
+ current_user = os.environ['USERNAME']
+
+ # Explicitly revoke full control permission to current user
+ cmd = r'cacls %s /E /R "%s\%s"' % (short_folder, current_domain, current_user)
+ try:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ raise Exception("Error %s setting ACL to short_folder: '%s'."
+ "Please check that cacls.exe exists" % (e, short_folder))
+
+ # Run conan export in using short_folder
+ with tools.environment_append({"CONAN_USER_HOME_SHORT": short_folder}):
+ client = TestClient(base_folder=folder)
+ client.save({CONANFILE: conanfile_py.replace("False", "True")})
+ client.run("export . %s" % self.user_channel)
+
+ # Retrieve ACLs from short_folder
+ try:
+ short_folder_acls = subprocess.check_output("cacls %s" % short_folder, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ raise Exception("Error %s getting ACL from short_folder: '%s'." % (e, short_folder))
+
+ # Check user has full control
+ user_acl = "%s\\%s:(OI)(CI)F" % (current_domain, current_user)
+ self.assertIn(user_acl.encode(), short_folder_acls)
+
def test_direct_conanfile(self):
client = TestClient()
client.save({CONANFILE: conanfile_py})
diff --git a/conans/test/command/install_test.py b/conans/test/command/install_test.py
index 9cc8437e9..543241c62 100644
--- a/conans/test/command/install_test.py
+++ b/conans/test/command/install_test.py
@@ -453,3 +453,42 @@ class Pkg(ConanFile):
self.assertTrue(error)
self.assertIn("ERROR: Unable to find 'Hello/0.1@lasote/stable' in remotes",
client.out)
+
+ def install_argument_order_test(self):
+ # https://github.com/conan-io/conan/issues/2520
+
+ conanfile_boost = """from conans import ConanFile
+class BoostConan(ConanFile):
+ name = "boost"
+ version = "0.1"
+ options = {"shared": [True, False]}
+ default_options = "shared=True"
+"""
+ conanfile = """from conans import ConanFile
+class TestConan(ConanFile):
+ name = "Hello"
+ version = "0.1"
+ requires = "boost/0.1@conan/stable"
+"""
+ client = TestClient()
+ client.save({"conanfile.py": conanfile,
+ "conanfile_boost.py": conanfile_boost})
+ client.run("create conanfile_boost.py conan/stable")
+ client.run("install . -o boost:shared=True --build=missing")
+ output_0 = "%s" % client.out
+ client.run("install . -o boost:shared=True --build missing")
+ output_1 = "%s" % client.out
+ client.run("install -o boost:shared=True . --build missing")
+ output_2 = "%s" % client.out
+ client.run("install -o boost:shared=True --build missing .")
+ output_3 = "%s" % client.out
+ self.assertNotIn("ERROR", output_3)
+ self.assertEqual(output_0, output_1)
+ self.assertEqual(output_1, output_2)
+ self.assertEqual(output_2, output_3)
+
+ client.run("install -o boost:shared=True --build boost . --build missing")
+ output_4 = "%s" % client.out
+ client.run("install -o boost:shared=True --build missing --build boost .")
+ output_5 = "%s" % client.out
+ self.assertEqual(output_4, output_5)
diff --git a/conans/test/command/user_test.py b/conans/test/command/user_test.py
index a0a6a8a60..c247b910f 100644
--- a/conans/test/command/user_test.py
+++ b/conans/test/command/user_test.py
@@ -4,13 +4,44 @@ from conans.test.utils.tools import TestClient, TestServer
class UserTest(unittest.TestCase):
- def test_command_user(self):
- """ Test that the user can be shown and changed, and it is reflected in the
- user cache localdb
+ def test_command_user_no_remotes(self):
+ """ Test that proper error is reported when no remotes are defined and conan user is executed
"""
client = TestClient()
- client.run('user')
- self.assertIn("ERROR: No remotes defined", client.user_io.out)
+ with self.assertRaises(Exception):
+ client.run("user")
+ self.assertIn("ERROR: No default remote defined", client.user_io.out)
+
+ with self.assertRaises(Exception):
+ client.run("user -r wrong_remote")
+ self.assertIn("ERROR: No remote 'wrong_remote' defined", client.user_io.out)
+
+ def test_command_user_list(self):
+ """ Test list of user is reported for all remotes or queried remote
+ """
+ servers = {
+ "default": TestServer(),
+ "test_remote_1": TestServer(),
+ }
+ client = TestClient(servers=servers)
+
+ # Test with wrong remote right error is reported
+ with self.assertRaises(Exception):
+ client.run("user -r Test_Wrong_Remote")
+ self.assertIn("ERROR: No remote 'Test_Wrong_Remote' defined", client.user_io.out)
+
+ # Test user list for requested remote reported
+ client.run("user -r test_remote_1")
+ self.assertIn("Current 'test_remote_1' user: None (anonymous)", client.user_io.out)
+ self.assertNotIn("Current 'default' user: None (anonymous)", client.user_io.out)
+
+ # Test user list for all remotes is reported
+ client.run("user")
+ self.assertIn(
+ ("Current 'default' user: None (anonymous)\n"
+ "Current 'test_remote_1' user: None (anonymous)"),
+ client.user_io.out
+ )
def test_with_remote_no_connect(self):
test_server = TestServer()
diff --git a/conans/test/conanfile_extend_test.py b/conans/test/conanfile_extend_test.py
index 4d0961297..dce7b982a 100644
--- a/conans/test/conanfile_extend_test.py
+++ b/conans/test/conanfile_extend_test.py
@@ -94,6 +94,52 @@ class DevConanFile(HelloConan2):
self.assertIn("lib/0.1@user/channel", client.user_io.out)
self.assertIn("otherlib/0.2@user/channel", client.user_io.out)
+ def conanfile_subclass_test(self):
+ base = '''
+from conans import ConanFile
+
+class ConanBase(ConanFile):
+ requires = "lib/0.1@user/channel"
+ options = {"test_option": [1, 2, 3]}
+ default_options="test_option=2"
+ my_flag = False
+
+ def build(self):
+ self.output.info("build() MyFlag: %s" % self.my_flag)
+ '''
+ extension = '''
+from base_conan import ConanBase
+
+class ConanFileToolsTest(ConanBase):
+ name = "test"
+ version = "1.9"
+ exports = "base_conan.py"
+
+ def config(self):
+ self.options["otherlib"].otherlib_option = 1
+
+ def requirements(self):
+ self.requires("otherlib/0.2@user/channel")
+
+ def source(self):
+ self.output.info("source() my_flag: %s" % self.my_flag)
+ '''
+ files = {"base_conan.py": base,
+ "conanfile.py": extension}
+ client = TestClient(self.base_folder)
+
+ client.save(files)
+ client.run("create . conan/testing -o test:test_option=3 --build")
+ self.assertIn("lib/0.1@user/channel from local cache", client.out)
+ self.assertIn("test/1.9@conan/testing: source() my_flag: False", client.out)
+ self.assertIn("test/1.9@conan/testing: build() MyFlag: False", client.out)
+ client.run("install . -o test:test_option=3")
+ conaninfo = load(os.path.join(client.current_folder, "conaninfo.txt"))
+ self.assertIn("lib/0.1@user/channel", conaninfo)
+ self.assertIn("test_option=3", conaninfo)
+ self.assertIn("otherlib/0.2@user/channel", conaninfo)
+ self.assertIn("otherlib:otherlib_option=1", conaninfo)
+
def test_txt(self):
base = '''[requires]
lib/0.1@user/channel
diff --git a/conans/test/integration/private_deps_test.py b/conans/test/integration/private_deps_test.py
index e54714002..91a61482d 100644
--- a/conans/test/integration/private_deps_test.py
+++ b/conans/test/integration/private_deps_test.py
@@ -114,10 +114,10 @@ class PrivateDepsTest(unittest.TestCase):
self.client.run("remove Hello0* -p -f ")
self.client.run("remove Hello1* -p -f")
self.client.run("search Hello0/0.1@lasote/stable")
- self.assertIn("There are no packages for pattern 'Hello0/0.1@lasote/stable'",
+ self.assertIn("There are no packages for reference 'Hello0/0.1@lasote/stable', but package recipe found.",
self.client.user_io.out)
self.client.run("search Hello1/0.1@lasote/stable")
- self.assertIn("There are no packages for pattern 'Hello1/0.1@lasote/stable'",
+ self.assertIn("There are no packages for reference 'Hello1/0.1@lasote/stable', but package recipe found.",
self.client.user_io.out)
self.client.run('install . --build missing')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": -1,
"issue_text_score": 0,
"test_score": -1
},
"num_modified_files": 8
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"nose-cov",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc",
"export PYTHONPATH=$PYTHONPATH:$(pwd)",
"export CONAN_COMPILER=gcc",
"export CONAN_COMPILER_VERSION=4.8"
],
"python": "3.6",
"reqs_path": [
"conans/requirements.txt",
"conans/requirements_osx.txt",
"conans/requirements_server.txt",
"conans/requirements_dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asn1crypto==1.5.1
astroid==1.6.6
attrs==22.2.0
beautifulsoup4==4.12.3
bottle==0.12.25
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
codecov==2.1.13
colorama==0.3.9
-e git+https://github.com/conan-io/conan.git@9d07bc26337c3282a57e59aa6a5c7b8afe9c282c#egg=conan
cov-core==1.15.0
coverage==4.2
cryptography==2.1.4
distro==1.1.0
fasteners==0.19
future==0.16.0
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
mock==1.3.0
ndg-httpsclient==0.4.4
node-semver==0.2.0
nose==1.3.7
nose-cov==1.6
packaging==21.3
parameterized==0.8.1
patch==1.16
pbr==6.1.1
pluggy==1.0.0
pluginbase==0.7
py==1.11.0
pyasn==1.5.0b7
pyasn1==0.5.1
pycparser==2.21
Pygments==2.14.0
PyJWT==1.7.1
pylint==1.8.4
pyOpenSSL==17.5.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==3.12
requests==2.27.1
six==1.17.0
soupsieve==2.3.2.post1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
waitress==2.0.0
WebOb==1.8.9
WebTest==2.0.35
wrapt==1.16.0
zipp==3.6.0
| name: conan
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asn1crypto==1.5.1
- astroid==1.6.6
- attrs==22.2.0
- beautifulsoup4==4.12.3
- bottle==0.12.25
- cffi==1.15.1
- charset-normalizer==2.0.12
- codecov==2.1.13
- colorama==0.3.9
- cov-core==1.15.0
- coverage==4.2
- cryptography==2.1.4
- distro==1.1.0
- fasteners==0.19
- future==0.16.0
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- mock==1.3.0
- ndg-httpsclient==0.4.4
- node-semver==0.2.0
- nose==1.3.7
- nose-cov==1.6
- packaging==21.3
- parameterized==0.8.1
- patch==1.16
- pbr==6.1.1
- pluggy==1.0.0
- pluginbase==0.7
- py==1.11.0
- pyasn==1.5.0b7
- pyasn1==0.5.1
- pycparser==2.21
- pygments==2.14.0
- pyjwt==1.7.1
- pylint==1.8.4
- pyopenssl==17.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==3.12
- requests==2.27.1
- six==1.17.0
- soupsieve==2.3.2.post1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- waitress==2.0.0
- webob==1.8.9
- webtest==2.0.35
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/conan
| [
"conans/test/command/user_test.py::UserTest::test_command_user_list",
"conans/test/command/user_test.py::UserTest::test_command_user_no_remotes"
]
| [
"conans/test/command/info_folders_test.py::InfoFoldersTest::test_basic",
"conans/test/command/info_folders_test.py::InfoFoldersTest::test_deps_basic",
"conans/test/command/info_folders_test.py::InfoFoldersTest::test_deps_specific_information",
"conans/test/command/info_folders_test.py::InfoFoldersTest::test_direct_conanfile",
"conans/test/command/info_folders_test.py::InfoFoldersTest::test_single_field",
"conans/test/command/user_test.py::UserTest::test_clean",
"conans/test/conanfile_extend_test.py::ConanfileExtendTest::test_base",
"conans/test/conanfile_extend_test.py::ConanfileExtendTest::test_txt"
]
| [
"conans/test/command/info_folders_test.py::InfoFoldersTest::test_short_paths",
"conans/test/command/info_folders_test.py::InfoFoldersTest::test_short_paths_home_set_acl",
"conans/test/command/user_test.py::UserTest::test_command_interactive_only",
"conans/test/command/user_test.py::UserTest::test_command_user_with_interactive_password",
"conans/test/command/user_test.py::UserTest::test_command_user_with_password",
"conans/test/command/user_test.py::UserTest::test_command_user_with_password_spaces",
"conans/test/command/user_test.py::UserTest::test_with_remote_no_connect"
]
| []
| MIT License | 2,311 | [
"conans/client/printer.py",
"conans/client/manager.py",
"conans/client/command.py",
"conans/client/rest/cacert.py",
"conans/client/conan_api.py",
"conans/client/proxy.py",
"conans/client/installer.py",
"conans/util/windows.py"
]
| [
"conans/client/printer.py",
"conans/client/manager.py",
"conans/client/command.py",
"conans/client/rest/cacert.py",
"conans/client/conan_api.py",
"conans/client/proxy.py",
"conans/client/installer.py",
"conans/util/windows.py"
]
|
|
srsudar__eg-72 | 8efa97140cab25eac23129104c5e60bd45d34679 | 2018-03-20 03:02:39 | 8efa97140cab25eac23129104c5e60bd45d34679 | diff --git a/.gitignore b/.gitignore
index 04df878..4d76ba6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,3 +10,5 @@ syntax: glob
*.egg-info
dist
build
+
+.pytest_cache/
diff --git a/.travis.yml b/.travis.yml
index 5ae51e4..b04d5f9 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -7,4 +7,4 @@ python:
- "3.4"
# Command to run tests
-script: nosetests
+script: py.test
diff --git a/README.md b/README.md
index a630ec7..fe0fa0f 100644
--- a/README.md
+++ b/README.md
@@ -49,9 +49,8 @@ change, but you'll have to update your links to point at the new location. Or
you can install with `pip`.
`eg` doesn't ship with a binary. Dependencies are very modest and should not
-require you to install anything (other than
-[Nose](https://github.com/nose-devs/nose/) if you want to run the tests).
-If you find otherwise, open an issue.
+require you to install anything (other than [pytest](https://docs.pytest.org) if
+you want to run the tests). If you find otherwise, open an issue.
## Usage
@@ -95,10 +94,10 @@ like everybody else? You can create an example for untarring and unzipping
bzipped tarballs, stick it in a file called `tar.md`, and tell `eg` where to
find it.
-The way to think about what `eg` does is just that it takes a program name, for
-example `find`, and looks for two files named `find.md` in the default
-directory and a custom directory. If it finds them, it pipes them through
-`less`, with the custom file at the top. Easy.
+The way to think about what `eg` does is that it takes a program name, for
+example `find`, and looks for files named `find.md` in the default and custom
+directories (including subdirectories). If it finds them, it pipes them through
+`less`, with the custom files at the top. Easy.
The default and custom directories can be specified at the command line like
so:
@@ -353,8 +352,9 @@ available.
`eg` depends only on standard libraries and Python 2.x, so building should be a
simple matter of cloning the repo and running the executable `eg/eg.py`.
-`eg` uses Nose for testing, so you'll have to have Nose installed to run tests.
-Once you have Nose, run `nosetests` from **the root directory of the repo**.
+`eg` uses pytest for testing, so you'll have to have Nose installed to run
+tests. Once you have Nose, run `py.test` from **the root directory of the
+repo**.
Tests should always be expected to pass. If they fail, please open an issue,
even if only so that we can better elucidate `eg`'s dependencies.
diff --git a/eg/util.py b/eg/util.py
index 56a2637..9ccae04 100644
--- a/eg/util.py
+++ b/eg/util.py
@@ -46,6 +46,24 @@ def _inform_cannot_edit_no_custom_dir():
print(msg)
+def _recursive_get_all_file_names(dir):
+ """
+ Get all the file names in the directory. Gets all the top level file names
+ only, not the full path.
+
+ dir: a directory or string, as to hand to os.walk(). If None, returns empty
+ list.
+ """
+ if not dir:
+ return []
+
+ result = []
+ for basedir, dirs, files in os.walk(dir):
+ result.extend(files)
+
+ return result
+
+
def edit_custom_examples(program, config):
"""
Edit custom examples for the given program, creating the file if it does
@@ -57,34 +75,37 @@ def edit_custom_examples(program, config):
# resolve aliases
resolved_program = get_resolved_program(program, config)
- custom_file_path = get_file_path_for_program(
+ custom_file_paths = get_file_paths_for_program(
resolved_program,
config.custom_dir
)
- subprocess.call([config.editor_cmd, custom_file_path])
+ if (len(custom_file_paths) > 0):
+ path_to_edit = custom_file_paths[0]
+ else:
+ # A new file.
+ path_to_edit = os.path.join(config.custom_dir, resolved_program + '.md')
-def handle_program(program, config):
- default_file_path = None
- custom_file_path = None
+ # Edit the first. Handles the base case.
+ subprocess.call([config.editor_cmd, path_to_edit])
+
+def handle_program(program, config):
# try to resolve any aliases
resolved_program = get_resolved_program(program, config)
- if has_default_entry_for_program(resolved_program, config):
- default_file_path = get_file_path_for_program(
- resolved_program,
- config.examples_dir
- )
+ default_file_paths = get_file_paths_for_program(
+ resolved_program,
+ config.examples_dir
+ )
- if has_custom_entry_for_program(resolved_program, config):
- custom_file_path = get_file_path_for_program(
- resolved_program,
- config.custom_dir
- )
+ custom_file_paths = get_file_paths_for_program(
+ resolved_program,
+ config.custom_dir
+ )
# Handle the case where we have nothing for them.
- if default_file_path is None and custom_file_path is None:
+ if len(default_file_paths) == 0 and len(custom_file_paths) == 0:
print(
'No entry found for ' +
program +
@@ -92,10 +113,9 @@ def handle_program(program, config):
)
return
- raw_contents = get_contents_from_files(
- default_file_path,
- custom_file_path
- )
+ paths = [path for path in custom_file_paths]
+ paths.extend(default_file_paths)
+ raw_contents = get_contents_from_files(*paths)
formatted_contents = get_formatted_contents(
raw_contents,
@@ -108,48 +128,29 @@ def handle_program(program, config):
page_string(formatted_contents, config.pager_cmd)
-def get_file_path_for_program(program, dir_to_search):
+def get_file_paths_for_program(program, dir_to_search):
"""
- Return the file name and path for the program.
-
- examples_dir cannot be None
+ Return an array of full paths matching the given program. If no directory is
+ present, returns an empty list.
Path is not guaranteed to exist. Just says where it should be if it
existed. Paths must be fully expanded before being passed in (i.e. no ~ or
variables).
"""
if dir_to_search is None:
- raise TypeError('examples_dir cannot be None')
+ return []
else:
- result = os.path.join(dir_to_search, program + EXAMPLE_FILE_SUFFIX)
- return result
-
+ wanted_file_name = program + EXAMPLE_FILE_SUFFIX
+ result = []
+ for basedir, dirs, file_names in os.walk(dir_to_search):
+ for file_name in file_names:
+ if file_name == wanted_file_name:
+ result.append(os.path.join(basedir, file_name))
-def has_default_entry_for_program(program, config):
- """Return True if has standard examples for program, else False."""
- if config.examples_dir:
- file_path = get_file_path_for_program(
- program,
- config.examples_dir
- )
- return os.path.isfile(file_path)
- else:
- return False
-
-
-def has_custom_entry_for_program(program, config):
- """Return True if has custom examples for a program, else false."""
- if config.custom_dir:
- custom_path = get_file_path_for_program(
- program,
- config.custom_dir
- )
- return os.path.isfile(custom_path)
- else:
- return False
+ return result
-def get_contents_from_files(default_file_path, custom_file_path):
+def get_contents_from_files(*paths):
"""
Take the paths to two files and return the contents as a string. If
custom_file_path is valid, it will be shown before the contents of the
@@ -157,11 +158,8 @@ def get_contents_from_files(default_file_path, custom_file_path):
"""
file_data = ''
- if custom_file_path:
- file_data += _get_contents_of_file(custom_file_path)
-
- if default_file_path:
- file_data += _get_contents_of_file(default_file_path)
+ for path in paths:
+ file_data += _get_contents_of_file(path)
return file_data
@@ -224,18 +222,14 @@ def get_list_of_all_supported_commands(config):
custom file names. This is intentional, as that is the behavior for file
resolution--an alias will hide a custom file.
"""
- default_files = []
- custom_files = []
-
- if config.examples_dir and os.path.isdir(config.examples_dir):
- default_files = os.listdir(config.examples_dir)
- if config.custom_dir and os.path.isdir(config.custom_dir):
- custom_files = os.listdir(config.custom_dir)
+ default_files = _recursive_get_all_file_names(config.examples_dir)
+ custom_files = _recursive_get_all_file_names(config.custom_dir)
# Now filter so we only have example files, not things like aliases.json.
default_files = [path for path in default_files if _is_example_file(path)]
custom_files = [path for path in custom_files if _is_example_file(path)]
+
def get_without_suffix(file_name):
"""
Return the file name without the suffix, or the file name itself
diff --git a/setup.py b/setup.py
index df9d3c3..926d174 100644
--- a/setup.py
+++ b/setup.py
@@ -40,14 +40,13 @@ config = {
'author_email': '[email protected]',
'version': VERSION,
'install_requires': [],
- 'test_requires': ['nose', 'mock'],
+ 'test_requires': ['mock', 'pytest'],
'packages': ['eg'],
'scripts': ['bin/eg'],
'package_data': {
'eg': ['examples/*']
},
'zip_safe': False,
- 'test_suite': 'nose.collector'
}
setup(**config)
| Recursive custom-dir
Hi,
I would like to classify my md files in directories like:
```
~/eg
~/eg/glossary
~/eg/tech
```
and in my config file, just having something like:
```
custom-dir=~/eg
```
Actually, when a md file is in a subdirectory, it is not found. Is there a way to change that ?
Thanks. | srsudar/eg | diff --git a/test/color_test.py b/test/color_test.py
index df64352..985b712 100644
--- a/test/color_test.py
+++ b/test/color_test.py
@@ -4,7 +4,6 @@ from collections import namedtuple
from eg import color
from eg import config
from mock import patch
-from nose.tools import assert_equal
# Some hardcoded real colors.
_YELLOW = '\x1b[33m'
@@ -138,8 +137,7 @@ def test_colorize_heading():
colorizer = color.EgColorizer(color_config)
actual = colorizer.colorize_heading(clean)
-
- assert_equal(actual, target)
+ assert actual == target
def test_colorize_block_indents():
@@ -171,8 +169,7 @@ def test_colorize_block_indents():
colorizer = color.EgColorizer(color_config)
actual = colorizer.colorize_block_indent(clean)
-
- assert_equal(actual, target)
+ assert actual == target
def test_colorize_backticks():
@@ -202,28 +199,19 @@ def test_colorize_backticks():
colorizer = color.EgColorizer(color_config)
actual = colorizer.colorize_backticks(clean)
-
- assert_equal(actual, target)
-
-
-def test_colorize_text_calls_all_sub_methods():
+ assert actual == target
+
+@patch('eg.color.EgColorizer.colorize_backticks',
+ return_value='text-heading-indent-backticks')
+@patch('eg.color.EgColorizer.colorize_block_indent',
+ return_value='text-heading-indent')
+@patch('eg.color.EgColorizer.colorize_heading', return_value='text-heading')
+def test_colorize_text_calls_all_sub_methods(heading, indent, backticks):
"""colorize_text should call all of the helper colorize methods."""
- with patch(
- 'eg.color.EgColorizer.colorize_heading',
- return_value='text-heading'
- ) as heading:
- with patch(
- 'eg.color.EgColorizer.colorize_block_indent',
- return_value='text-heading-indent'
- ) as indent:
- with patch(
- 'eg.color.EgColorizer.colorize_backticks',
- return_value='text-heading-indent-backticks'
- ) as backticks:
- colorizer = color.EgColorizer(None)
- text = 'text'
- actual = colorizer.colorize_text(text)
- heading.assert_called_once_with(text)
- indent.assert_called_once_with('text-heading')
- backticks.assert_called_once_with('text-heading-indent')
- assert_equal('text-heading-indent-backticks', actual)
+ colorizer = color.EgColorizer(None)
+ text = 'text'
+ actual = colorizer.colorize_text(text)
+ heading.assert_called_once_with(text)
+ indent.assert_called_once_with('text-heading')
+ backticks.assert_called_once_with('text-heading-indent')
+ assert 'text-heading-indent-backticks' == actual
diff --git a/test/config_test.py b/test/config_test.py
index 1383dc3..7c06219 100644
--- a/test/config_test.py
+++ b/test/config_test.py
@@ -1,12 +1,9 @@
import os
+import pytest
from eg import config
from eg import substitute
from mock import patch
-from nose.tools import assert_equal
-from nose.tools import assert_false
-from nose.tools import assert_raises
-from nose.tools import assert_true
# Support python 2 and 3
try:
@@ -96,14 +93,14 @@ def test_config_returns_egrc_values_if_present(mock_get_config, mock_isfile):
None,
)
- assert_equal(resolved_config.examples_dir, examples_dir)
- assert_equal(resolved_config.custom_dir, custom_dir)
- assert_equal(resolved_config.color_config, test_color_config)
- assert_equal(resolved_config.use_color, test_use_color)
- assert_equal(resolved_config.pager_cmd, test_pager_cmd)
- assert_equal(resolved_config.editor_cmd, test_editor_cmd)
- assert_equal(resolved_config.squeeze, test_squeeze)
- assert_equal(resolved_config.subs, test_subs)
+ assert resolved_config.examples_dir == examples_dir
+ assert resolved_config.custom_dir == custom_dir
+ assert resolved_config.color_config == test_color_config
+ assert resolved_config.use_color == test_use_color
+ assert resolved_config.pager_cmd == test_pager_cmd
+ assert resolved_config.editor_cmd == test_editor_cmd
+ assert resolved_config.squeeze == test_squeeze
+ assert resolved_config.subs == test_subs
def _call_get_resolved_config_with_defaults(
@@ -137,7 +134,7 @@ def test_inform_if_paths_invalid_selectively_informs(mock_inform):
"""
config.inform_if_paths_invalid(None, None, None)
- assert_equal(mock_inform.call_count, 0)
+ assert mock_inform.call_count == 0
egrc_path = 'egrc'
ex_dir = 'ex dir'
@@ -145,7 +142,7 @@ def test_inform_if_paths_invalid_selectively_informs(mock_inform):
config.inform_if_paths_invalid(egrc_path, ex_dir, cu_dir)
- assert_equal(mock_inform.call_count, 3)
+ assert mock_inform.call_count == 3
mock_inform.assert_any_call(egrc_path)
mock_inform.assert_any_call(ex_dir)
mock_inform.assert_any_call(cu_dir)
@@ -229,7 +226,7 @@ def _assert_about_get_egrc_config(
actual = config.get_egrc_config(cli_path)
- assert_equal(actual, expected_config)
+ assert actual == expected_config
mock_expand.assert_called_once_with(path_to_expand)
mock_isfile.assert_called_once_with(expanded_path)
@@ -263,8 +260,8 @@ def test_get_resolved_config_calls_expand_paths(
actual = _call_get_resolved_config_with_defaults()
- assert_equal(actual.examples_dir, expected_examples_dir)
- assert_equal(actual.custom_dir, expected_custom_dir)
+ assert actual.examples_dir == expected_examples_dir
+ assert actual.custom_dir == expected_custom_dir
@patch('eg.config.get_editor_cmd_from_environment')
@@ -300,14 +297,14 @@ def _assert_about_get_resolved_config(
debug=False
)
- assert_equal(actual.examples_dir, expected_config.examples_dir)
- assert_equal(actual.custom_dir, expected_config.custom_dir)
- assert_equal(actual.use_color, expected_config.use_color)
- assert_equal(actual.color_config, expected_config.color_config)
- assert_equal(actual.pager_cmd, expected_config.pager_cmd)
- assert_equal(actual.squeeze, expected_config.squeeze)
- assert_equal(actual.subs, expected_config.subs)
- assert_equal(actual.editor_cmd, expected_config.editor_cmd)
+ assert actual.examples_dir == expected_config.examples_dir
+ assert actual.custom_dir == expected_config.custom_dir
+ assert actual.use_color == expected_config.use_color
+ assert actual.color_config == expected_config.color_config
+ assert actual.pager_cmd == expected_config.pager_cmd
+ assert actual.squeeze == expected_config.squeeze
+ assert actual.subs == expected_config.subs
+ assert actual.editor_cmd == expected_config.editor_cmd
mock_get_egrc_config.assert_called_once_with(cli_egrc_path)
mock_get_editor.assert_called_once_with()
@@ -413,10 +410,11 @@ def test_get_config_tuple_from_egrc_all_none_when_not_present():
subs=None,
editor_cmd=None,
)
- assert_equal(actual, target)
+ assert actual == target
-def test_get_config_tuple_from_egrc_when_present():
+@patch('eg.config.get_expanded_path')
+def test_get_config_tuple_from_egrc_when_present(mock_expand):
"""
Make sure we extract values correctly from the egrc.
"""
@@ -448,28 +446,25 @@ def test_get_config_tuple_from_egrc_when_present():
egrc_custom_dir
)
- with patch(
- 'eg.config.get_expanded_path',
- side_effect=return_expanded_path
- ) as mock_expand:
+ mock_expand.side_effect = return_expanded_path
- actual = config.get_config_tuple_from_egrc(PATH_EGRC_WITH_DATA)
+ actual = config.get_config_tuple_from_egrc(PATH_EGRC_WITH_DATA)
- expected = config.Config(
- examples_dir=egrc_examples_dir,
- custom_dir=egrc_custom_dir,
- color_config=color_config_from_file,
- use_color=egrc_use_color,
- pager_cmd=egrc_pager_cmd,
- squeeze=egrc_squeeze,
- subs=egrc_subs,
- editor_cmd=egrc_editor_cmd,
- )
+ expected = config.Config(
+ examples_dir=egrc_examples_dir,
+ custom_dir=egrc_custom_dir,
+ color_config=color_config_from_file,
+ use_color=egrc_use_color,
+ pager_cmd=egrc_pager_cmd,
+ squeeze=egrc_squeeze,
+ subs=egrc_subs,
+ editor_cmd=egrc_editor_cmd,
+ )
- assert_equal(actual, expected)
+ assert actual == expected
- mock_expand.assert_any_call(egrc_examples_dir)
- mock_expand.assert_any_call(egrc_custom_dir)
+ mock_expand.assert_any_call(egrc_examples_dir)
+ mock_expand.assert_any_call(egrc_custom_dir)
def _get_color_config_from_egrc_withdata():
@@ -507,7 +502,7 @@ def test_merge_color_configs_first_all_none():
merged = config.merge_color_configs(first, second)
- assert_equal(merged, second)
+ assert merged == second
def test_merge_color_configs_take_all_first():
@@ -528,7 +523,7 @@ def test_merge_color_configs_take_all_first():
merged = config.merge_color_configs(first, second)
- assert_equal(merged, first)
+ assert merged == first
def test_merge_color_configs_mixed():
@@ -562,62 +557,59 @@ def test_merge_color_configs_mixed():
prompt_reset=second.prompt_reset
)
- assert_equal(merged, target)
+ assert merged == target
def test_default_color_config():
"""Make sure the default color config is set to the right values."""
actual = config.get_default_color_config()
- assert_equal(actual.pound, config.DEFAULT_COLOR_POUND)
- assert_equal(actual.heading, config.DEFAULT_COLOR_HEADING)
- assert_equal(actual.code, config.DEFAULT_COLOR_CODE)
- assert_equal(actual.backticks, config.DEFAULT_COLOR_BACKTICKS)
- assert_equal(actual.prompt, config.DEFAULT_COLOR_PROMPT)
-
- assert_equal(actual.pound_reset, config.DEFAULT_COLOR_POUND_RESET)
- assert_equal(actual.heading_reset, config.DEFAULT_COLOR_HEADING_RESET)
- assert_equal(actual.code_reset, config.DEFAULT_COLOR_CODE_RESET)
- assert_equal(
- actual.backticks_reset,
- config.DEFAULT_COLOR_BACKTICKS_RESET
- )
- assert_equal(actual.prompt_reset, config.DEFAULT_COLOR_PROMPT_RESET)
+ assert actual.pound == config.DEFAULT_COLOR_POUND
+ assert actual.heading == config.DEFAULT_COLOR_HEADING
+ assert actual.code == config.DEFAULT_COLOR_CODE
+ assert actual.backticks == config.DEFAULT_COLOR_BACKTICKS
+ assert actual.prompt == config.DEFAULT_COLOR_PROMPT
+
+ assert actual.pound_reset == config.DEFAULT_COLOR_POUND_RESET
+ assert actual.heading_reset == config.DEFAULT_COLOR_HEADING_RESET
+ assert actual.code_reset == config.DEFAULT_COLOR_CODE_RESET
+ assert actual.backticks_reset == config.DEFAULT_COLOR_BACKTICKS_RESET
+ assert actual.prompt_reset == config.DEFAULT_COLOR_PROMPT_RESET
def test_parse_bool_true_for_truthy_values():
"""We should parse both 'True' and 'true' to True."""
- assert_true(config._parse_bool_from_raw_egrc_value('True'))
- assert_true(config._parse_bool_from_raw_egrc_value('true'))
+ assert config._parse_bool_from_raw_egrc_value('True') == True
+ assert config._parse_bool_from_raw_egrc_value('true') == True
def test_parse_bool_false_for_non_truthy_values():
"""Make sure we parse the likely non-truthy things as false."""
- assert_false(config._parse_bool_from_raw_egrc_value(''))
- assert_false(config._parse_bool_from_raw_egrc_value(None))
- assert_false(config._parse_bool_from_raw_egrc_value('false'))
- assert_false(config._parse_bool_from_raw_egrc_value('False'))
+ assert config._parse_bool_from_raw_egrc_value('') == False
+ assert config._parse_bool_from_raw_egrc_value(None) == False
+ assert config._parse_bool_from_raw_egrc_value('false') == False
+ assert config._parse_bool_from_raw_egrc_value('False') == False
def test_get_priority_first():
"""The first non-None value should always be returned."""
target = 'alpha'
actual = config.get_priority(target, 'second', 'third')
- assert_equal(target, actual)
+ assert target == actual
def test_get_priority_second():
"""The second non-None should be returned if the first is None."""
target = 'beta'
actual = config.get_priority(None, target, 'third')
- assert_equal(target, actual)
+ assert target == actual
def test_get_priority_third():
"""The last should be taken if the first two are None."""
target = 'gamma'
actual = config.get_priority(None, None, target)
- assert_equal(target, actual)
+ assert target == actual
def test_get_priority_respect_false():
@@ -628,7 +620,7 @@ def test_get_priority_respect_false():
"""
target = False
actual = config.get_priority(False, 'second', 'third')
- assert_equal(target, actual)
+ assert target == actual
def test_parse_substitution_from_list_without_is_multiline():
@@ -639,7 +631,7 @@ def test_parse_substitution_from_list_without_is_multiline():
target = substitute.Substitution('foo', 'bar', False)
list_rep = ['foo', 'bar']
actual = config.parse_substitution_from_list(list_rep)
- assert_equal(actual, target)
+ assert actual == target
def test_parse_substitution_from_list_with_is_multiline():
@@ -649,36 +641,32 @@ def test_parse_substitution_from_list_with_is_multiline():
target = substitute.Substitution('patt', 'repl', True)
list_rep = ['patt', 'repl', True]
actual = config.parse_substitution_from_list(list_rep)
- assert_equal(actual, target)
+ assert actual == target
def test_parse_substitution_error_if_not_list():
"""
Raise a SyntaxError if the value is not a list.
"""
- assert_raises(SyntaxError, config.parse_substitution_from_list, 'foo_str')
+ with pytest.raises(SyntaxError):
+ config.parse_substitution_from_list('foo_str')
def test_parse_substitution_error_if_wrong_length():
"""
Raise a SyntaxError if the list is less than two long.
"""
- assert_raises(
- SyntaxError,
- config.parse_substitution_from_list,
- ['foo']
- )
+ with pytest.raises(SyntaxError):
+ config.parse_substitution_from_list(['foo'])
def test_parse_substitution_error_if_third_element_not_bool():
"""
Raise a SyntaxError if the third element in the list is not a boolean.
"""
- assert_raises(
- SyntaxError,
- config.parse_substitution_from_list,
- ['foo', 'bar', 'intentionally_not_a_bool']
- )
+ with pytest.raises(SyntaxError):
+ bad_args = ['foo', 'bar', 'intentionally_not_a_bool']
+ config.parse_substitution_from_list(bad_args)
def test_get_substitution_from_config_finds_single_substitution():
@@ -693,7 +681,7 @@ def test_get_substitution_from_config_finds_single_substitution():
config_obj = _get_egrc_config(PATH_EGRC_SINGLE_SUB)
actual = config.get_substitutions_from_config(config_obj)
- assert_equal(actual, target)
+ assert actual == target
def test_get_substitution_from_config_finds_multiple_substitutions():
@@ -710,7 +698,7 @@ def test_get_substitution_from_config_finds_multiple_substitutions():
config_obj = _get_egrc_config(PATH_EGRC_WITH_DATA)
actual = config.get_substitutions_from_config(config_obj)
- assert_equal(actual, target)
+ assert actual == target
def _get_egrc_config(egrc_path):
diff --git a/test/core_test.py b/test/core_test.py
index 9e5d34b..3c9db1e 100644
--- a/test/core_test.py
+++ b/test/core_test.py
@@ -1,6 +1,5 @@
from collections import namedtuple
from mock import patch
-from nose.tools import assert_equal
from eg import core
from test.util_test import _create_config
@@ -109,22 +108,22 @@ def _helper_parses_correctly(
with patch('sys.argv', new=argv):
actual_args = core._parse_arguments()
- assert_equal(actual_args.config_file, expected_args.config_file)
- assert_equal(actual_args.custom_dir, expected_args.custom_dir)
- assert_equal(actual_args.examples_dir, expected_args.examples_dir)
- assert_equal(actual_args.list, expected_args.list)
- assert_equal(actual_args.pager_cmd, expected_args.pager_cmd)
- assert_equal(actual_args.use_color, expected_args.use_color)
- assert_equal(actual_args.squeeze, expected_args.squeeze)
- assert_equal(actual_args.version, expected_args.version)
- assert_equal(actual_args.edit, expected_args.edit)
+ assert actual_args.config_file == expected_args.config_file
+ assert actual_args.custom_dir == expected_args.custom_dir
+ assert actual_args.examples_dir == expected_args.examples_dir
+ assert actual_args.list == expected_args.list
+ assert actual_args.pager_cmd == expected_args.pager_cmd
+ assert actual_args.use_color == expected_args.use_color
+ assert actual_args.squeeze == expected_args.squeeze
+ assert actual_args.version == expected_args.version
+ assert actual_args.edit == expected_args.edit
# Note that here we use the default, as described above.
- assert_equal(actual_args.program, default_program)
+ assert actual_args.program == default_program
# Now make sure we didn't call any other parser methods
- assert_equal(mock_exit.call_args_list, [])
- assert_equal(mock_help.call_args_list, [])
- assert_equal(mock_error.call_args_list, [])
+ assert mock_exit.call_args_list == []
+ assert mock_help.call_args_list == []
+ assert mock_error.call_args_list == []
def test_parses_correctly_if_just_program():
@@ -304,29 +303,29 @@ def _helper_run_eg_responds_to_args_correctly(
if (call_show_list):
mock_show_list.assert_called_once_with(resolved_config)
else:
- assert_equal(mock_show_list.call_args_list, [])
+ assert mock_show_list.call_args_list == []
if (call_show_version):
mock_show_version.assert_called_once_with()
else:
- assert_equal(mock_show_version.call_args_list, [])
+ assert mock_show_version.call_args_list == []
if (call_no_editor):
mock_no_editor.assert_called_once_with()
else:
- assert_equal(mock_no_editor.call_args_list, [])
+ assert mock_no_editor.call_args_list == []
if (call_edit_custom):
mock_edit_custom.assert_called_once_with(args.program, resolved_config)
else:
- assert_equal(mock_edit_custom.call_args_list, [])
+ assert mock_edit_custom.call_args_list == []
if (call_handle_program):
mock_handle_program.assert_called_once_with(
args.program, resolved_config
)
else:
- assert_equal(mock_handle_program.call_args_list, [])
+ assert mock_handle_program.call_args_list == []
def test_shows_version():
diff --git a/test/substitute_test.py b/test/substitute_test.py
index 7c5ffb2..2b636ab 100644
--- a/test/substitute_test.py
+++ b/test/substitute_test.py
@@ -2,22 +2,20 @@ import re
from eg import substitute
from mock import patch
-from nose.tools import assert_equal
-from nose.tools import assert_true
def test_equality():
"""== should work on Substitution objects."""
alpha = substitute.Substitution('foo', 'bar', False)
beta = substitute.Substitution('foo', 'bar', False)
- assert_true(alpha == beta)
+ assert alpha == beta
def test_not_equal():
"""!= should work on Substitution objects."""
alpha = substitute.Substitution('foo', 'bar', True)
beta = substitute.Substitution('foo', 'bar', False)
- assert_true(alpha != beta)
+ assert alpha != beta
def test_applies_multiline_substitution():
@@ -29,7 +27,7 @@ def test_applies_multiline_substitution():
sub = substitute.Substitution(pattern, replacement, True)
actual = sub.apply_and_get_result(raw)
- assert_equal(actual, subbed)
+ assert actual == subbed
def test_applies_normal_mode_substitution():
@@ -41,7 +39,7 @@ def test_applies_normal_mode_substitution():
sub = substitute.Substitution(pattern, replacement, False)
actual = sub.apply_and_get_result(raw)
- assert_equal(actual, subbed)
+ assert actual == subbed
def test_calls_correct_re_methods_for_multiline():
@@ -60,8 +58,12 @@ def test_calls_correct_re_methods_without_multiline():
_helper_assert_about_apply_and_get_result(False)
+@patch('re.sub')
+@patch('re.compile')
def _helper_assert_about_apply_and_get_result(
- is_multiline
+ is_multiline,
+ compile_method,
+ sub_method
):
"""
Helper method to assert about the correct results of calls to
@@ -72,22 +74,24 @@ def _helper_assert_about_apply_and_get_result(
compiled_pattern = 'whoopty compiled'
subbed_result = 'substituted'
starting_string = 'the start'
- with patch('re.compile', return_value=compiled_pattern) as compile_method:
- with patch('re.sub', return_value=subbed_result) as sub_method:
- if is_multiline:
- sub = substitute.Substitution(pattern, repl, True)
- else:
- sub = substitute.Substitution(pattern, repl, False)
- actual = sub.apply_and_get_result(starting_string)
-
- if is_multiline:
- compile_method.assert_called_once_with(pattern, re.MULTILINE)
- else:
- compile_method.assert_called_once_with(pattern)
-
- sub_method.assert_called_once_with(
- compiled_pattern,
- repl,
- starting_string
- )
- assert_equal(actual, subbed_result)
+
+ compile_method.return_value = compiled_pattern
+ sub_method.return_value = subbed_result
+
+ if is_multiline:
+ sub = substitute.Substitution(pattern, repl, True)
+ else:
+ sub = substitute.Substitution(pattern, repl, False)
+ actual = sub.apply_and_get_result(starting_string)
+
+ if is_multiline:
+ compile_method.assert_called_once_with(pattern, re.MULTILINE)
+ else:
+ compile_method.assert_called_once_with(pattern)
+
+ sub_method.assert_called_once_with(
+ compiled_pattern,
+ repl,
+ starting_string
+ )
+ assert actual == subbed_result
diff --git a/test/util_test.py b/test/util_test.py
index af386d2..d874397 100644
--- a/test/util_test.py
+++ b/test/util_test.py
@@ -6,7 +6,6 @@ from eg import substitute
from eg import util
from mock import Mock
from mock import patch
-from nose.tools import assert_equal
PATH_UNSQUEEZED_FILE = os.path.join(
'test',
@@ -46,219 +45,122 @@ def _create_config(
)
-def test_get_file_path_for_program_correct():
+@patch('os.walk')
+def test_get_file_paths_for_program_with_single(mock_walk):
program = 'cp'
- examples_dir = '/Users/tyrion/test/eg_dir'
+ examples_dir = '/Users/tyrion'
program_file = program + util.EXAMPLE_FILE_SUFFIX
- target = os.path.join(examples_dir, program_file)
+ expected = ['/Users/tyrion/cp.md']
- actual = util.get_file_path_for_program(program, examples_dir)
-
- assert_equal(actual, target)
+ mock_walk.return_value = [
+ [examples_dir, [], [program_file, 'cp.txt', 'other_file.md']],
+ ]
+ actual = util.get_file_paths_for_program(program, examples_dir)
+ assert actual == expected
+ mock_walk.assert_called_once_with(examples_dir)
-def test_has_default_entry_for_program_no_examples_dir():
- test_config = _create_config(
- examples_dir=None,
- custom_dir='customdir',
- )
+@patch('os.walk')
+def test_get_file_paths_for_program_with_nested(mock_walk):
program = 'cp'
+ examples_dir = '/Users/tyrion'
+ program_file = 'cp.md'
+
+ mock_walk.return_value = [
+ [
+ examples_dir,
+ ['dirA', 'dirB'],
+ [program_file, 'cp.txt', 'other_file.md'],
+ ],
+ [
+ examples_dir + '/dirA',
+ ['dirA-child'],
+ [program_file, 'bad.md'],
+ ],
+ [
+ examples_dir + '/dirA/dirA-child',
+ [],
+ ['bad.md', program_file, 'wtf.md'],
+ ],
+ [
+ examples_dir + '/dirB',
+ [],
+ ['foo.md', program_file],
+ ],
+ ]
- has_entry = util.has_default_entry_for_program(program, test_config)
-
- assert_equal(False, has_entry)
-
-
-def test_has_custom_entry_for_program_no_custom_dir():
- test_config = _create_config(
- examples_dir='examplesdir',
- custom_dir=None,
- )
-
- program = 'find'
+ expected = [
+ '/Users/tyrion/cp.md',
+ '/Users/tyrion/dirA/cp.md',
+ '/Users/tyrion/dirA/dirA-child/cp.md',
+ '/Users/tyrion/dirB/cp.md',
+ ]
- has_entry = util.has_custom_entry_for_program(program, test_config)
+ actual = util.get_file_paths_for_program(program, examples_dir)
+ assert actual == expected
+ mock_walk.assert_called_once_with(examples_dir)
- assert_equal(False, has_entry)
+@patch('os.walk')
+def test_get_file_paths_for_program_with_none(mock_walk):
+ expected = []
+ mock_walk.return_value = []
-def test_has_default_entry_when_present():
- test_config = _create_config(
- examples_dir='examplesdir',
- )
- program = 'mv'
+ actual = util.get_file_paths_for_program('cp', '/Users/tyrion')
+ assert actual == expected
+ mock_walk.assert_called_once_with('/Users/tyrion')
- path = '/Users/tyrion/examplesdir/mv.md'
- _helper_assert_path_isfile_not_present(
- test_config,
- program,
- path,
- 'default',
- True,
- True
- )
+@patch('os.walk')
+def test_get_file_paths_for_program_with_no_dir(mock_walk):
+ assert util.get_file_paths_for_program('cp', None) == []
-def test_has_default_entry_when_not_present():
- test_config = _create_config(
- examples_dir='examplesdir',
- )
+@patch('eg.util.page_string')
+@patch('eg.util.get_formatted_contents')
+@patch('eg.util.get_contents_from_files')
+@patch('eg.util.get_resolved_program')
+def test_handle_program_no_entries(
+ mock_resolve_program,
+ mock_get_contents,
+ mock_format,
+ mock_page_string,
+):
+ """
+ We should do the right thing if there are no entries for a given program.
+ """
program = 'cp'
+ test_config = _create_config()
- path = '/Users/tyrion/examplesdir/cp.md'
-
- _helper_assert_path_isfile_not_present(
- test_config,
- program,
- path,
- 'default',
- False,
- False,
- )
-
-
-def test_has_custom_entry_when_present():
- test_config = _create_config(
- examples_dir=None,
- custom_dir='customdir',
- color_config=None,
- use_color=False,
- pager_cmd=None,
- squeeze=False,
- subs=None
- )
- program = 'find'
+ mock_resolve_program.return_value = program
- path = '/Users/tyrion/customdir/find.md'
+ util.handle_program(program, test_config)
- _helper_assert_path_isfile_not_present(
- test_config,
+ mock_resolve_program.assert_called_once_with(
program,
- path,
- 'custom',
- True,
- True
- )
-
-
-def test_has_custom_entry_when_not_present():
- test_config = _create_config(
- custom_dir='customdir',
- color_config=None,
- use_color=False,
- pager_cmd=None,
- squeeze=False,
- subs=None
+ test_config
)
- program = 'locate'
+ # We should have aborted and not called any of the
+ # other methods.
+ assert mock_get_contents.call_count == 0
+ assert mock_format.call_count == 0
+ assert mock_page_string.call_count == 0
- path = '/Users/tyrion/customdir/locate.md'
- _helper_assert_path_isfile_not_present(
- test_config,
- program,
- path,
- 'custom',
- False,
- False,
- )
-
-
-def _helper_assert_path_isfile_not_present(
- config,
- program,
- file_path_for_program,
- defaultOrCustom,
- isfile,
- has_entry
+@patch('eg.util.get_resolved_program')
+@patch('eg.util.get_contents_from_files')
+@patch('eg.util.get_file_paths_for_program')
+@patch('eg.util.get_formatted_contents')
+@patch('eg.util.page_string')
+def test_handle_program_finds_paths_and_calls_open_pager_no_alias(
+ mock_page,
+ mock_format,
+ mock_get_paths,
+ mock_get_contents,
+ mock_resolve,
):
- """
- Helper for asserting whether or not a default file is present. Pass in the
- parameters defining the program and directories and say whether or not that
- file should be found.
- """
- if defaultOrCustom != 'default' and defaultOrCustom != 'custom':
- raise TypeError(
- 'defaultOrCustom must be default or custom, not ' + defaultOrCustom
- )
- with patch(
- 'eg.util.get_file_path_for_program',
- return_value=file_path_for_program
- ) as mock_get_path:
- with patch('os.path.isfile', return_value=isfile) as mock_isfile:
-
- actual = None
- correct_dir = None
-
- if (defaultOrCustom == 'default'):
- correct_dir = config.examples_dir
- actual = util.has_default_entry_for_program(program, config)
- else:
- correct_dir = config.custom_dir
- actual = util.has_custom_entry_for_program(program, config)
-
- mock_get_path.assert_called_once_with(program, correct_dir)
- mock_isfile.assert_called_once_with(file_path_for_program)
-
- assert_equal(actual, has_entry)
-
-
-def test_handle_program_no_entries():
- """
- We should do the right thing if there are no entries for a given program.
- """
- program = 'cp'
- test_config = _create_config()
-
- with patch(
- 'eg.util.get_resolved_program',
- return_value=program
- ) as mock_resolve_program:
- with patch(
- 'eg.util.has_default_entry_for_program',
- return_value=False
- ) as mock_has_default:
- with patch(
- 'eg.util.has_custom_entry_for_program',
- return_value=False
- ) as mock_has_custom:
- with patch(
- 'eg.util.get_contents_from_files'
- ) as mock_get_contents:
- with patch(
- 'eg.util.get_formatted_contents'
- ) as mock_format:
- with patch(
- 'eg.util.page_string'
- ) as mock_page_string:
- util.handle_program(program, test_config)
-
- mock_resolve_program.assert_called_once_with(
- program,
- test_config
- )
-
- mock_has_default.assert_called_once_with(
- program,
- test_config
- )
-
- mock_has_custom.assert_called_once_with(
- program,
- test_config
- )
-
- # We should have aborted and not called any of the
- # other methods.
- assert_equal(mock_get_contents.call_count, 0)
- assert_equal(mock_format.call_count, 0)
- assert_equal(mock_page_string.call_count, 0)
-
-
-def test_handle_program_finds_paths_and_calls_open_pager_no_alias():
"""
If there are entries for the program, handle_program needs to get the
paths, get the contents, format the contents, and page the resulting
@@ -287,8 +189,8 @@ def test_handle_program_finds_paths_and_calls_open_pager_no_alias():
subs=subs
)
- default_path = 'test-eg-dir/mv.md'
- custom_path = 'test-custom-dir/mv.md'
+ default_paths = ['test-eg-dir/mv.md', 'test-eg-dir/foo/mv.md']
+ custom_paths = ['test-custom-dir/mv.md', 'test-custom-dir/bar.md']
def return_correct_path(*args, **kwargs):
program_param = args[0]
@@ -296,9 +198,9 @@ def test_handle_program_finds_paths_and_calls_open_pager_no_alias():
if program_param != program:
raise NameError('expected ' + program + ', got ' + program_param)
if dir_param == examples_dir:
- return default_path
+ return default_paths
elif dir_param == custom_dir:
- return custom_path
+ return custom_paths
else:
raise NameError(
'got ' +
@@ -308,76 +210,60 @@ def test_handle_program_finds_paths_and_calls_open_pager_no_alias():
' or ' +
custom_dir)
- with patch(
- 'eg.util.get_resolved_program',
- return_value=program
- ) as mock_resolve:
- with patch(
- 'eg.util.has_default_entry_for_program',
- return_value=True
- ) as mock_has_default:
- with patch(
- 'eg.util.has_custom_entry_for_program',
- return_value=True
- ) as mock_has_custom:
- with patch(
- 'eg.util.get_contents_from_files',
- return_value=file_contents
- ) as mock_get_contents:
- with patch(
- 'eg.util.get_file_path_for_program',
- side_effect=return_correct_path
- ) as mock_get_file:
- with patch(
- 'eg.util.get_formatted_contents',
- return_value=formatted_contents
- ) as mock_format:
- with patch('eg.util.page_string') as mock_page:
- util.handle_program(program, test_config)
-
- mock_resolve.assert_called_once_with(
- program,
- test_config
- )
-
- mock_has_default.assert_called_once_with(
- program,
- test_config
- )
- mock_has_custom.assert_called_once_with(
- program,
- test_config
- )
-
- mock_get_file.assert_any_call(
- program,
- examples_dir
- )
- mock_get_file.assert_any_call(
- program,
- custom_dir,
- )
-
- mock_get_contents.assert_called_once_with(
- default_path,
- custom_path
- )
-
- mock_format.assert_called_once_with(
- file_contents,
- use_color=test_config.use_color,
- color_config=test_config.color_config,
- squeeze=test_config.squeeze,
- subs=test_config.subs
- )
-
- mock_page.assert_called_once_with(
- formatted_contents,
- test_config.pager_cmd
- )
-
-
-def test_handle_program_finds_paths_and_calls_open_pager_with_alias():
+ mock_format.return_value = formatted_contents
+ mock_get_paths.side_effect=return_correct_path
+ mock_get_contents.return_value = file_contents
+ mock_resolve.return_value = program
+
+ util.handle_program(program, test_config)
+
+ mock_resolve.assert_called_once_with(
+ program,
+ test_config
+ )
+
+ mock_get_paths.assert_any_call(
+ program,
+ examples_dir
+ )
+ mock_get_paths.assert_any_call(
+ program,
+ custom_dir,
+ )
+
+ mock_get_contents.assert_called_once_with(
+ custom_paths[0],
+ custom_paths[1],
+ default_paths[0],
+ default_paths[1],
+ )
+
+ mock_format.assert_called_once_with(
+ file_contents,
+ use_color=test_config.use_color,
+ color_config=test_config.color_config,
+ squeeze=test_config.squeeze,
+ subs=test_config.subs
+ )
+
+ mock_page.assert_called_once_with(
+ formatted_contents,
+ test_config.pager_cmd
+ )
+
+
+@patch('eg.util.get_resolved_program')
+@patch('eg.util.get_contents_from_files')
+@patch('eg.util.get_file_paths_for_program')
+@patch('eg.util.get_formatted_contents')
+@patch('eg.util.page_string')
+def test_handle_program_finds_paths_and_calls_open_pager_with_alias(
+ mock_page,
+ mock_format,
+ mock_get_paths,
+ mock_get_contents,
+ mock_resolve,
+):
"""
If there are entries for the program, handle_program needs to get the
paths, get the contents, format the contents, and page the resulting
@@ -407,8 +293,8 @@ def test_handle_program_finds_paths_and_calls_open_pager_with_alias():
subs=subs
)
- default_path = 'test-eg-dir/ln.md'
- custom_path = 'test-custom-dir/ln.md'
+ default_paths = ['test-eg-dir/ln.md']
+ custom_paths = ['test-custom-dir/ln.md']
def return_correct_path(*args, **kwargs):
program_param = args[0]
@@ -421,9 +307,9 @@ def test_handle_program_finds_paths_and_calls_open_pager_with_alias():
program_param
)
if dir_param == examples_dir:
- return default_path
+ return default_paths
elif dir_param == custom_dir:
- return custom_path
+ return custom_paths
else:
raise NameError(
'got ' +
@@ -433,229 +319,104 @@ def test_handle_program_finds_paths_and_calls_open_pager_with_alias():
' or ' +
custom_dir)
- with patch(
- 'eg.util.get_resolved_program',
- return_value=resolved_program
- ) as mock_resolve:
- with patch(
- 'eg.util.has_default_entry_for_program',
- return_value=True
- ) as mock_has_default:
- with patch(
- 'eg.util.has_custom_entry_for_program',
- return_value=True
- ) as mock_has_custom:
- with patch(
- 'eg.util.get_contents_from_files',
- return_value=file_contents
- ) as mock_get_contents:
- with patch(
- 'eg.util.get_file_path_for_program',
- side_effect=return_correct_path
- ) as mock_get_file:
- with patch(
- 'eg.util.get_formatted_contents',
- return_value=formatted_contents
- ) as mock_format:
- with patch('eg.util.page_string') as mock_page:
- util.handle_program(
- alias_for_program,
- test_config
- )
-
- mock_resolve.assert_called_once_with(
- alias_for_program,
- test_config
- )
-
- mock_has_default.assert_called_once_with(
- resolved_program,
- test_config
- )
- mock_has_custom.assert_called_once_with(
- resolved_program,
- test_config
- )
-
- mock_get_file.assert_any_call(
- resolved_program,
- examples_dir
- )
- mock_get_file.assert_any_call(
- resolved_program,
- custom_dir,
- )
-
- mock_get_contents.assert_called_once_with(
- default_path,
- custom_path
- )
-
- mock_format.assert_called_once_with(
- file_contents,
- use_color=test_config.use_color,
- color_config=test_config.color_config,
- squeeze=test_config.squeeze,
- subs=test_config.subs
- )
-
- mock_page.assert_called_once_with(
- formatted_contents,
- test_config.pager_cmd
- )
-
-
-def _helper_assert_list_supported_programs(
- config_obj,
- default_list,
- custom_list,
- alias_dict,
- target_list
-):
- """
- config_obj: Config object to be passed to get_list function
- default_list: the list of default programs
- custom_list: the list of programs with custom programs
- alias_dict: dict of aliases
- target_list: list of string that should be returned
- """
- def give_list(*args, **kwargs):
- dir_name = args[0]
- if dir_name == config_obj.custom_dir:
- return custom_list
- elif dir_name == config_obj.examples_dir:
- return default_list
- else:
- raise NameError('Not the default or custom dir: ' + dir_name)
-
- with patch('os.path.isdir', return_value=True):
- with patch('os.listdir', side_effect=give_list):
- with patch('eg.util.get_alias_dict', return_value=alias_dict):
- actual = util.get_list_of_all_supported_commands(config_obj)
- assert_equal(actual, target_list)
-
+ mock_format.return_value = formatted_contents
+ mock_get_paths.side_effect = return_correct_path
+ mock_get_contents.return_value = file_contents
+ mock_resolve.return_value = resolved_program
-def test_list_supported_programs_only_default():
- example_dir = 'example/dir'
- custom_dir = 'custom/dir'
-
- test_config = _create_config(
- examples_dir=example_dir,
- custom_dir=custom_dir,
+ util.handle_program(
+ alias_for_program,
+ test_config
)
- examples_list = ['aliases', 'cp.md', 'find.md', 'xargs.md']
- custom_list = []
- target = ['cp', 'find', 'xargs']
- _helper_assert_list_supported_programs(
- test_config,
- examples_list,
- custom_list,
- {},
- target
+ mock_resolve.assert_called_once_with(
+ alias_for_program,
+ test_config
)
-
-def test_list_supported_programs_only_custom():
- example_dir = 'example/dir'
- custom_dir = 'custom/dir'
-
- test_config = _create_config(
- examples_dir=example_dir,
- custom_dir=custom_dir,
+ mock_get_paths.assert_any_call(
+ resolved_program,
+ examples_dir
)
- target = ['awk +', 'bar +', 'xor +']
- _helper_assert_list_supported_programs(
- test_config,
- [],
- ['awk.md', 'bar.md', 'xor.md'],
- {},
- target
+ mock_get_paths.assert_any_call(
+ resolved_program,
+ custom_dir,
)
+ mock_get_contents.assert_called_once_with(
+ custom_paths[0],
+ default_paths[0]
+ )
-def test_list_supported_programs_both():
- examples_dir = 'example/dir'
- custom_dir = 'custom/dir'
-
- test_config = _create_config(
- examples_dir=examples_dir,
- custom_dir=custom_dir,
+ mock_format.assert_called_once_with(
+ file_contents,
+ use_color=test_config.use_color,
+ color_config=test_config.color_config,
+ squeeze=test_config.squeeze,
+ subs=test_config.subs
)
- examples_list = ['alpha.md', 'bar.md', 'both.md', 'examples.md']
- custom_list = ['azy.md', 'both.md', 'examples.md', 'zeta.md']
- target = [
- 'alpha',
- 'azy +',
- 'bar',
- 'both *',
- 'examples *',
- 'zeta +'
- ]
- _helper_assert_list_supported_programs(
- test_config,
- examples_list,
- custom_list,
- {},
- target
+
+ mock_page.assert_called_once_with(
+ formatted_contents,
+ test_config.pager_cmd
)
-def test_list_supported_commands_includes_aliases():
- examples_dir = 'examples/dir/for/aliases'
- custom_dir = 'custom/dir/for/aliases'
+def test_get_list_of_all_supported_commands(tmpdir):
+ dir_example = tmpdir.mkdir('examples')
+ dir_custom = tmpdir.mkdir('custom')
- test_config = _create_config(
- examples_dir=examples_dir,
- custom_dir=custom_dir,
+ config = _create_config(
+ examples_dir=str(dir_example),
+ custom_dir=str(dir_custom),
)
- # Things we want to cover:
- # normal alias
- # alias that shadows a custom-only declaration
- # alias that points to a * or + program
-
- examples_list = [
- 'alpha.md',
- 'bar.md',
- 'both.md',
- 'default-only.md',
- 'examples.md',
- 'z-hidden-by-alias.md'
- ]
- custom_list = [
- 'aaa.md',
- 'azy.md',
- 'both.md',
- 'examples.md',
- 'zeta.md'
+
+ expected = [
+ 'a-only-default',
+ 'b-both *',
+ 'c-only-custom +',
+ 'd-only-custom-nested +',
+ 'e-only-default-nested',
+ 'f-default-custom-nested',
+ 'g-both-different-levels *',
+ 't-a-only-default-alias -> a-only-default',
+ 'u-b-both-alias -> b-both *',
+ 'v-c-only-custom-alias -> c-only-custom +'
]
- alias_dict = {
- 'aaa': 'alpha',
- 'y-alias-for-both': 'both',
- 'alias-for-azy': 'azy',
- 'z-hidden-by-alias': 'azy'
+
+ aliases = {
+ 't-a-only-default-alias': 'a-only-default',
+ 'u-b-both-alias': 'b-both',
+ 'v-c-only-custom-alias': 'c-only-custom'
}
- target = [
- 'aaa -> alpha', # shadow the custom file
- 'alias-for-azy -> azy +',
- 'alpha',
- 'azy +',
- 'bar',
- 'both *',
- 'default-only',
- 'examples *',
- 'y-alias-for-both -> both *',
- 'z-hidden-by-alias -> azy +',
- 'zeta +'
- ]
- _helper_assert_list_supported_programs(
- test_config,
- examples_list,
- custom_list,
- alias_dict,
- target
- )
+ # Make the directory structure we expect.
+ dir_example_nested = dir_example.mkdir('default-nested')
+ dir_custom_nested = dir_custom.mkdir('custom-nested')
+
+ dir_example.join('a-only-default.md').write('foo')
+
+ dir_example.join('b-both.md').write('foo')
+ dir_custom.join('b-both.md').write('foo')
+
+ dir_custom.join('c-only-custom.md').write('foo')
+
+ dir_custom_nested.join('d-only-custom-nested.md').write('foo')
+
+ dir_example_nested.join('e-only-default-nested.md').write('foo')
+
+ dir_example_nested.join('f-default-custom-nested.md').write('foo')
+
+ dir_example.join('g-both-different-levels.md').write('foo')
+ dir_custom_nested.join('g-both-different-levels.md').write('foo')
+
+ # Use the 'with' context manager rather than the @decorator, because the
+ # tmpdir fixture doesn't play nice with the decorator.
+ with patch('eg.util.get_alias_dict') as mock_get_alias:
+ mock_get_alias.return_value = aliases
+ actual = util.get_list_of_all_supported_commands(config)
+ assert actual == expected
+ mock_get_alias.assert_called_once_with(config)
def test_list_supported_programs_fails_gracefully_if_no_dirs():
@@ -664,7 +425,7 @@ def test_list_supported_programs_fails_gracefully_if_no_dirs():
actual = util.get_list_of_all_supported_commands(test_config)
target = []
- assert_equal(actual, target)
+ assert actual == target
def test_calls_pipepager_if_not_less():
@@ -702,10 +463,14 @@ def test_calls_fallback_if_cmd_is_flag_string():
)
+@patch('pydoc.pager')
+@patch('pydoc.pipepager')
def _helper_assert_about_pager(
str_to_page,
pager_cmd,
- use_fallback
+ use_fallback,
+ pipepager,
+ default_pager,
):
"""
Help with asserting about pager.
@@ -715,62 +480,17 @@ def _helper_assert_about_pager(
use_default: false if we should actually use pydoc.pipepager, true if we
instead are going to fallback to pydoc.pager
"""
- with patch('pydoc.pager') as default_pager:
- with patch('pydoc.pipepager') as pipepager:
- util.page_string(str_to_page, pager_cmd)
-
- if use_fallback:
- default_pager.assert_called_once_with(str_to_page)
- assert_equal(pipepager.call_count, 0)
- else:
- assert_equal(default_pager.call_count, 0)
- pipepager.assert_called_once_with(
- str_to_page,
- cmd=pager_cmd
- )
-
-
-def _helper_assert_file_contents(
- default_path,
- default_contents,
- custom_path,
- custom_contents,
- target_contents
-):
- """
- Helper method to assert things about the get_contents_from_files method.
- Does not actually hit the disk.
-
- default_path: the path of a default file
- default_contents: the contents of the default file
- custom_path: the path to a custom file
- custom_contents: the contents of the custom file
- target_contents: the final combined contents that should be returned by the
- get_contents_from_files method.
- """
-
- # This method will be used by the mock framework to return the right file
- # contents based on the file name.
- def return_file_contents(*args, **kwargs):
- if args[0] == default_path:
- return default_contents
- elif args[0] == custom_path:
- return custom_contents
- else:
- raise TypeError(
- args[0] +
- ' was an unexpected path--should be ' +
- default_path +
- ' or ' +
- custom_path
- )
+ util.page_string(str_to_page, pager_cmd)
- with patch(
- 'eg.util._get_contents_of_file',
- side_effect=return_file_contents
- ):
- actual = util.get_contents_from_files(default_path, custom_path)
- assert_equal(actual, target_contents)
+ if use_fallback:
+ default_pager.assert_called_once_with(str_to_page)
+ assert pipepager.call_count == 0
+ else:
+ assert default_pager.call_count == 0
+ pipepager.assert_called_once_with(
+ str_to_page,
+ cmd=pager_cmd
+ )
@patch('eg.util.pydoc.pipepager', side_effect=KeyboardInterrupt)
@@ -797,52 +517,87 @@ def test_page_string_excepts_keyboard_interrupt_if_none(pager_mock):
pager_mock.assert_called_once_with('page me plz')
-def test_get_contents_from_files_only_default():
+def test_get_contents_from_files_handles_none():
"""
- Retrieve the correct file contents when only a default file is present.
+ Empty string if no files.
"""
- default_path = 'test/default/path'
- default_contents = 'contents of the default file'
_helper_assert_file_contents(
- default_path,
- default_contents,
- None,
- None,
- default_contents
+ [],
+ ''
)
-def test_get_contents_from_files_only_custom():
- """
- Retrieve only the custom file contents when we only have a custom file
- path.
- """
- custom_path = 'test/custom/path'
- custom_contents = 'contents of the custom file'
+def test_get_contents_from_files_handles_one():
+ file_infos = [
+ {
+ 'path': 'test/path',
+ 'contents': 'contents of file'
+ }
+ ]
+ combined_contents = 'contents of file'
_helper_assert_file_contents(
- None,
- None,
- custom_path,
- custom_contents,
- custom_contents
+ file_infos,
+ combined_contents
)
-def test_get_contents_from_file_both_default_and_custom():
- default_path = 'test/default/path'
- default_contents = 'contents of the default file'
- custom_path = 'test/custom/path'
- custom_contents = 'contents of the custom file'
- combined_contents = custom_contents + default_contents
+def test_get_contents_from_files_handles_multiple():
+ file_infos = [
+ {
+ 'path': 'path/1',
+ 'contents': 'foo\n'
+ },
+ {
+ 'path': 'path/2/foo',
+ 'contents': 'bar\n'
+ },
+ {
+ 'path': 'another/path',
+ 'contents': 'baz'
+ }
+ ]
+
+ combined_contents = 'foo\nbar\nbaz'
+
_helper_assert_file_contents(
- default_path,
- default_contents,
- custom_path,
- custom_contents,
+ file_infos,
combined_contents
)
+@patch('eg.util._get_contents_of_file')
+def _helper_assert_file_contents(
+ file_infos,
+ target_contents,
+ get_contents_mock,
+):
+ """
+ Helper method to assert things about the get_contents_from_files method.
+ Does not actually hit the disk.
+
+ file_infos: array of { path, contents } dicts representing files. Array so
+ that we can assert proper order calling
+ target_contents: the final combined contents that should be returned by the
+ get_contents_from_files method.
+ """
+
+ # This method will be used by the mock framework to return the right file
+ # contents based on the file name.
+ def return_file_contents(*args, **kwargs):
+ for file_info in file_infos:
+ if file_info['path'] == args[0]:
+ return file_info['contents']
+ raise TypeError('did not find path in test obj')
+
+ get_contents_mock.side_effect = return_file_contents
+
+ paths = [el['path'] for el in file_infos]
+ actual = util.get_contents_from_files(*paths)
+ assert actual == target_contents
+
+@patch('eg.util.get_colorized_contents')
+@patch('eg.util.get_squeezed_contents')
+@patch('eg.util.get_substituted_contents')
def _helper_assert_formatted_contents(
starting_contents,
use_color,
@@ -852,7 +607,10 @@ def _helper_assert_formatted_contents(
colorized_contents,
squeezed_contents,
subbed_contents,
- formatted_result
+ formatted_result,
+ sub_method,
+ squeeze_method,
+ color_method,
):
"""
Helper method to assist in asserting things about the
@@ -869,55 +627,47 @@ def _helper_assert_formatted_contents(
subbed_contents: the result of subbed_contents
formatted_result: the final, formatted string that should be returned
"""
- with patch(
- 'eg.util.get_colorized_contents',
- return_value=colorized_contents
- ) as color_method:
- with patch(
- 'eg.util.get_squeezed_contents',
- return_value=squeezed_contents
- ) as squeeze_method:
- with patch(
- 'eg.util.get_substituted_contents',
- return_value=subbed_contents
- ) as sub_method:
- actual = util.get_formatted_contents(
- starting_contents,
- use_color,
- color_config,
- squeeze,
- subs
- )
-
- # We'll update the contents as they get formatted to make sure
- # we pass the right thing to the various methods.
- contents_thus_far = starting_contents
-
- if use_color:
- color_method.assert_called_once_with(
- contents_thus_far,
- color_config
- )
- contents_thus_far = colorized_contents
- else:
- assert_equal(color_method.call_count, 0)
-
- if squeeze:
- squeeze_method.assert_called_once_with(contents_thus_far)
- contents_thus_far = squeezed_contents
- else:
- assert_equal(squeeze_method.call_count, 0)
-
- if subs:
- sub_method.assert_called_once_with(
- contents_thus_far,
- subs
- )
- contents_thus_far = subbed_contents
- else:
- assert_equal(sub_method.call_count, 0)
-
- assert_equal(actual, formatted_result)
+ sub_method.return_value = subbed_contents
+ squeeze_method.return_value = squeezed_contents
+ color_method.return_value = colorized_contents
+
+ actual = util.get_formatted_contents(
+ starting_contents,
+ use_color,
+ color_config,
+ squeeze,
+ subs
+ )
+
+ # We'll update the contents as they get formatted to make sure
+ # we pass the right thing to the various methods.
+ contents_thus_far = starting_contents
+
+ if use_color:
+ color_method.assert_called_once_with(
+ contents_thus_far,
+ color_config
+ )
+ contents_thus_far = colorized_contents
+ else:
+ assert color_method.call_count == 0
+
+ if squeeze:
+ squeeze_method.assert_called_once_with(contents_thus_far)
+ contents_thus_far = squeezed_contents
+ else:
+ assert squeeze_method.call_count == 0
+
+ if subs:
+ sub_method.assert_called_once_with(
+ contents_thus_far,
+ subs
+ )
+ contents_thus_far = subbed_contents
+ else:
+ assert sub_method.call_count == 0
+
+ assert actual == formatted_result
def test_get_formatted_contents_does_not_format_methods_if_all_falsey():
@@ -927,15 +677,15 @@ def test_get_formatted_contents_does_not_format_methods_if_all_falsey():
"""
starting_contents = 'this is where we start'
_helper_assert_formatted_contents(
- starting_contents=starting_contents,
- use_color=False,
- color_config='some color config',
- squeeze=False,
- subs=None,
- colorized_contents='this was colored',
- squeezed_contents='this was squeezed',
- subbed_contents='these contents were subbed',
- formatted_result=starting_contents
+ starting_contents,
+ False,
+ 'some color config',
+ False,
+ None,
+ 'this was colored',
+ 'this was squeezed',
+ 'these contents were subbed',
+ starting_contents
)
@@ -946,15 +696,15 @@ def test_get_formatted_contents_calls_colorize_if_use_color():
starting_contents = 'this is where we start'
colorized_contents = 'COLORIZED: this is where we start'
_helper_assert_formatted_contents(
- starting_contents=starting_contents,
- use_color=True,
- color_config='some color config',
- squeeze=False,
- subs=None,
- colorized_contents=colorized_contents,
- squeezed_contents='this was squeezed',
- subbed_contents='these contents were subbed',
- formatted_result=colorized_contents
+ starting_contents,
+ True,
+ 'some color config',
+ False,
+ None,
+ colorized_contents,
+ 'this was squeezed',
+ 'these contents were subbed',
+ colorized_contents
)
@@ -963,15 +713,15 @@ def test_get_formatted_contents_squeezes():
starting_contents = 'this is where we start'
squeezed_contents = 'this is the result of a squeezing'
_helper_assert_formatted_contents(
- starting_contents=starting_contents,
- use_color=False,
- color_config='some color config',
- squeeze=True,
- subs=None,
- colorized_contents='this was colored',
- squeezed_contents=squeezed_contents,
- subbed_contents='these contents were subbed',
- formatted_result=squeezed_contents
+ starting_contents,
+ False,
+ 'some color config',
+ True,
+ None,
+ 'this was colored',
+ squeezed_contents,
+ 'these contents were subbed',
+ squeezed_contents
)
@@ -980,15 +730,15 @@ def test_get_formatted_contents_subsitutes():
starting_contents = 'this is where we start'
subbed_contents = 'substituted like a teacher'
_helper_assert_formatted_contents(
- starting_contents=starting_contents,
- use_color=False,
- color_config='some color config',
- squeeze=False,
- subs=['truthy', 'list'],
- colorized_contents='this was colored',
- squeezed_contents='this was squeezed',
- subbed_contents=subbed_contents,
- formatted_result=subbed_contents
+ starting_contents,
+ False,
+ 'some color config',
+ False,
+ ['truthy', 'list'],
+ 'this was colored',
+ 'this was squeezed',
+ subbed_contents,
+ subbed_contents
)
@@ -1000,15 +750,15 @@ def test_perform_all_formatting():
starting_contents = 'the starting point for grand formatting'
subbed_contents = 'subbed is the last thing called so should be the result'
_helper_assert_formatted_contents(
- starting_contents=starting_contents,
- use_color=True,
- color_config='some color config',
- squeeze=True,
- subs=['truthy', 'list'],
- colorized_contents='this was colored',
- squeezed_contents='this was squeezed',
- subbed_contents=subbed_contents,
- formatted_result=subbed_contents
+ starting_contents,
+ True,
+ 'some color config',
+ True,
+ ['truthy', 'list'],
+ 'this was colored',
+ 'this was squeezed',
+ subbed_contents,
+ subbed_contents
)
@@ -1031,14 +781,14 @@ def test_get_squeezed_contents_correctly_squeezes():
target = _get_file_as_string(PATH_SQUEEZED_FILE)
actual = util.get_squeezed_contents(unsqueezed)
- assert_equal(actual, target)
+ assert actual == target
def test_get_substituted_contents_handles_empty_subs():
"""Nothing should be formatted if there are no substitutions."""
raw_contents = 'this should not be subbed'
actual = util.get_substituted_contents(raw_contents, [])
- assert_equal(actual, raw_contents)
+ assert actual == raw_contents
def test_get_substituted_contents_substitutes_calls_correct_methods():
@@ -1062,7 +812,7 @@ def test_get_substituted_contents_substitutes_calls_correct_methods():
sub_one.apply_and_get_result.assert_called_once_with(starting_contents)
sub_two.apply_and_get_result.assert_called_once_with(sub_one_result)
- assert_equal(actual, target)
+ assert actual == target
def test_get_substituted_contents_substitutes_correctly():
@@ -1078,10 +828,11 @@ def test_get_substituted_contents_substitutes_correctly():
subs = [sub_one, sub_two]
actual = util.get_substituted_contents(start, subs)
- assert_equal(actual, target)
+ assert actual == target
-def test_get_colorized_contents_calls_methods():
+@patch('eg.color.EgColorizer')
+def test_get_colorized_contents_calls_methods(patched_colorizer_class):
"""
We should call the correct methods on the EgColorizer objects when we color
a file.
@@ -1089,22 +840,24 @@ def test_get_colorized_contents_calls_methods():
raw_contents = 'these are uncolored contents'
colored_contents = 'COLORED: ' + raw_contents
color_config = 'some color config'
- with patch('eg.color.EgColorizer') as patched_colorizer_class:
- # The actual instance created by these calls is stored at return_value.
- colorizer_instance = patched_colorizer_class.return_value
- colorizer_instance.colorize_text.return_value = colored_contents
- actual = util.get_colorized_contents(raw_contents, color_config)
+ # The actual instance created by these calls is stored at return_value.
+ colorizer_instance = patched_colorizer_class.return_value
+ colorizer_instance.colorize_text.return_value = colored_contents
+
+ actual = util.get_colorized_contents(raw_contents, color_config)
- assert_equal(actual, colored_contents)
- colorizer_instance.colorize_text.assert_called_once_with(raw_contents)
+ assert actual == colored_contents
+ colorizer_instance.colorize_text.assert_called_once_with(raw_contents)
+@patch('eg.util.get_alias_dict')
def _helper_assert_get_resolved_program(
program,
resolved_program,
config_obj,
- alias_dict
+ alias_dict,
+ mock_dict,
):
"""
program: the program to resolved for as an alias
@@ -1112,10 +865,11 @@ def _helper_assert_get_resolved_program(
config_obj: the config_obj to use toe resolve the alias path
alias_dict: the dict of aliases to be returned
"""
- with patch('eg.util.get_alias_dict', return_value=alias_dict) as mock_dict:
- actual = util.get_resolved_program(program, config_obj)
- assert_equal(actual, resolved_program)
- mock_dict.assert_called_once_with(config_obj)
+ mock_dict.return_value = alias_dict
+
+ actual = util.get_resolved_program(program, config_obj)
+ assert actual == resolved_program
+ mock_dict.assert_called_once_with(config_obj)
def test_get_resolved_program_no_alias():
@@ -1184,12 +938,18 @@ def test_get_alias_dict_fails_gracefully_if_not_file():
)
+@patch('eg.util._get_contents_of_file')
+@patch('eg.util._get_alias_file_path')
+@patch('os.path.isfile')
def _helper_assert_get_alias_dict(
contents_of_alias_dict_file,
target_alias_dict,
config_obj,
alias_file_path,
- alias_file_path_is_file
+ alias_file_path_is_file,
+ mock_is_file,
+ mock_get_alias_file_path,
+ mock_get_contents,
):
"""
contents_of_alias_dict_file: the string contents of the file storing the
@@ -1199,32 +959,25 @@ def _helper_assert_get_alias_dict(
alias_file_path: the path to be returned by _get_alias_file_path
alias_file_path_is_file: True if the alias path is a file, else False
"""
- with patch(
- 'eg.util._get_contents_of_file',
- return_value=contents_of_alias_dict_file
- ) as mock_get_contents:
- with patch(
- 'eg.util._get_alias_file_path',
- return_value=alias_file_path
- ) as mock_get_alias_file_path:
- with patch(
- 'os.path.isfile',
- return_value=alias_file_path_is_file
- ) as mock_is_file:
- actual = util.get_alias_dict(config_obj)
+ mock_is_file.return_value = alias_file_path_is_file
+ mock_get_alias_file_path.return_value = alias_file_path
+ mock_get_contents.return_value = contents_of_alias_dict_file
- assert_equal(actual, target_alias_dict)
+ actual = util.get_alias_dict(config_obj)
- mock_get_alias_file_path.assert_called_once_with(config_obj)
- mock_is_file.assert_called_once_with(alias_file_path)
+ assert actual == target_alias_dict
- if alias_file_path_is_file:
- mock_get_contents.assert_called_once_with(alias_file_path)
- else:
- assert_equal(mock_get_contents.call_count, 0)
+ mock_get_alias_file_path.assert_called_once_with(config_obj)
+ mock_is_file.assert_called_once_with(alias_file_path)
+ if alias_file_path_is_file:
+ mock_get_contents.assert_called_once_with(alias_file_path)
+ else:
+ assert mock_get_contents.call_count == 0
-def test_get_alias_file_path():
+
+@patch('os.path.join')
+def test_get_alias_file_path(mock_join):
"""
_get_alias_file_path should just join the example dir and the alias file
name, to make sure we look in the right place for the file.
@@ -1234,13 +987,14 @@ def test_get_alias_file_path():
)
join_result = 'joined path'
- with patch('os.path.join', return_value=join_result) as mock_join:
- actual = util._get_alias_file_path(config_obj)
- assert_equal(actual, join_result)
- mock_join.assert_called_once_with(
- config_obj.examples_dir,
- util.ALIAS_FILE_NAME
- )
+ mock_join.return_value = join_result
+
+ actual = util._get_alias_file_path(config_obj)
+ assert actual == join_result
+ mock_join.assert_called_once_with(
+ config_obj.examples_dir,
+ util.ALIAS_FILE_NAME
+ )
def test_is_example_file_true_if_has_suffix():
@@ -1249,7 +1003,7 @@ def test_is_example_file_true_if_has_suffix():
"""
file_name = 'find.md'
actual = util._is_example_file(file_name)
- assert_equal(actual, True)
+ assert actual == True
def test_is_example_file_true_if_not_suffix():
@@ -1258,7 +1012,7 @@ def test_is_example_file_true_if_not_suffix():
"""
file_name = 'aliases.json'
actual = util._is_example_file(file_name)
- assert_equal(actual, False)
+ assert actual == False
def test_can_parse_alias_file():
@@ -1275,17 +1029,17 @@ def test_can_parse_alias_file():
alias_file_contents = util._get_contents_of_file(alias_file_path)
alias_dict = json.loads(alias_file_contents)
# We'll check that link goes to ln, as we know that one will be present.
- assert_equal(alias_dict['link'], 'ln')
+ assert alias_dict['link'] == 'ln'
@patch('os.path.exists')
@patch('eg.util._inform_cannot_edit_no_custom_dir')
@patch('eg.util.get_resolved_program')
-@patch('eg.util.get_file_path_for_program')
+@patch('eg.util.get_file_paths_for_program')
@patch('subprocess.call')
def test_edit_custom_examples_correct_with_custom_dir(
mock_call,
- mock_get_path,
+ mock_get_paths,
mock_get_program,
mock_inform,
mock_exists,
@@ -1296,28 +1050,59 @@ def test_edit_custom_examples_correct_with_custom_dir(
program = 'du'
resolved_program = 'alias for du'
config = _create_config(custom_dir='path/to/custom', editor_cmd='nano')
- path = 'path/to/custom/du.md'
+ paths = ['path/to/custom/du.md', 'foo.md']
+
+ mock_get_program.return_value = resolved_program
+ mock_get_paths.return_value = paths
+ mock_exists.return_value = True
+
+ util.edit_custom_examples(program, config)
+
+ mock_get_program.assert_called_once_with(program, config)
+ mock_get_paths.assert_called_once_with(resolved_program, config.custom_dir)
+ mock_call.assert_called_once_with([config.editor_cmd, paths[0]])
+ assert mock_inform.call_count == 0
+
+
+@patch('os.path.exists')
+@patch('eg.util._inform_cannot_edit_no_custom_dir')
+@patch('eg.util.get_resolved_program')
+@patch('eg.util.get_file_paths_for_program')
+@patch('subprocess.call')
+def test_edit_custom_examples_creates_file_if_none_exist(
+ mock_call,
+ mock_get_paths,
+ mock_get_program,
+ mock_inform,
+ mock_exists,
+):
+ program = 'du'
+ resolved_program = 'alias-for-du'
+ config = _create_config(custom_dir='path/to/custom', editor_cmd='nano')
+ paths = []
mock_get_program.return_value = resolved_program
- mock_get_path.return_value = path
+ mock_get_paths.return_value = paths
mock_exists.return_value = True
util.edit_custom_examples(program, config)
mock_get_program.assert_called_once_with(program, config)
- mock_get_path.assert_called_once_with(resolved_program, config.custom_dir)
- mock_call.assert_called_once_with([config.editor_cmd, path])
- assert_equal(mock_inform.call_count, 0)
+ mock_get_paths.assert_called_once_with(resolved_program, config.custom_dir)
+ mock_call.assert_called_once_with(
+ [config.editor_cmd, 'path/to/custom/alias-for-du.md'])
+ assert mock_inform.call_count == 0
+
@patch('os.path.exists')
@patch('eg.util._inform_cannot_edit_no_custom_dir')
@patch('eg.util.get_resolved_program')
-@patch('eg.util.get_file_path_for_program')
+@patch('eg.util.get_file_paths_for_program')
@patch('subprocess.call')
def test_edit_custom_examples_informs_if_no_custom_dir(
mock_call,
- mock_get_path,
+ mock_get_paths,
mock_get_program,
mock_inform,
mock_exists,
@@ -1333,14 +1118,14 @@ def test_edit_custom_examples_informs_if_no_custom_dir(
config = _create_config(editor_cmd='vi -e')
mock_exists.return_value = True
util.edit_custom_examples(program, config)
- assert_equal(mock_inform.call_count, 1)
+ assert mock_inform.call_count == 1
# And now with it set but a nonexistent path.
config = _create_config(custom_dir='/path/to/custom', editor_cmd='vi -e')
mock_exists.return_value = False
util.edit_custom_examples(program, config)
- assert_equal(mock_inform.call_count, 2)
+ assert mock_inform.call_count == 2
- assert_equal(mock_call.call_count, 0)
- assert_equal(mock_get_path.call_count, 0)
- assert_equal(mock_get_program.call_count, 0)
+ assert mock_call.call_count == 0
+ assert mock_get_paths.call_count == 0
+ assert mock_get_program.call_count == 0
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 5
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"mock",
"pytest"
],
"pre_install": [],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/srsudar/eg.git@8efa97140cab25eac23129104c5e60bd45d34679#egg=eg
exceptiongroup==1.2.2
iniconfig==2.1.0
mock==5.2.0
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
tomli==2.2.1
| name: eg
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- mock==5.2.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/eg
| [
"test/util_test.py::test_get_file_paths_for_program_with_single",
"test/util_test.py::test_get_file_paths_for_program_with_nested",
"test/util_test.py::test_get_file_paths_for_program_with_none",
"test/util_test.py::test_get_file_paths_for_program_with_no_dir",
"test/util_test.py::test_handle_program_finds_paths_and_calls_open_pager_no_alias",
"test/util_test.py::test_handle_program_finds_paths_and_calls_open_pager_with_alias",
"test/util_test.py::test_get_list_of_all_supported_commands",
"test/util_test.py::test_get_contents_from_files_handles_none",
"test/util_test.py::test_get_contents_from_files_handles_one",
"test/util_test.py::test_get_contents_from_files_handles_multiple",
"test/util_test.py::test_edit_custom_examples_correct_with_custom_dir",
"test/util_test.py::test_edit_custom_examples_creates_file_if_none_exist",
"test/util_test.py::test_edit_custom_examples_informs_if_no_custom_dir"
]
| []
| [
"test/color_test.py::test_colorize_heading",
"test/color_test.py::test_colorize_block_indents",
"test/color_test.py::test_colorize_backticks",
"test/color_test.py::test_colorize_text_calls_all_sub_methods",
"test/config_test.py::test_config_returns_egrc_values_if_present",
"test/config_test.py::test_inform_if_paths_invalid_selectively_informs",
"test/config_test.py::test_get_resolved_config_uses_custom_egrc_path",
"test/config_test.py::test_get_egrc_config_reads_from_command_line",
"test/config_test.py::test_get_egrc_config_uses_default",
"test/config_test.py::test_get_egrc_returns_empty_if_no_egrc",
"test/config_test.py::test_get_resolved_config_calls_expand_paths",
"test/config_test.py::test_get_resolved_config_prioritizes_cli",
"test/config_test.py::test_get_resolved_config_defaults_to_egrc",
"test/config_test.py::test_get_resolved_config_falls_back_to_defaults",
"test/config_test.py::test_get_config_tuple_from_egrc_all_none_when_not_present",
"test/config_test.py::test_get_config_tuple_from_egrc_when_present",
"test/config_test.py::test_merge_color_configs_first_all_none",
"test/config_test.py::test_merge_color_configs_take_all_first",
"test/config_test.py::test_merge_color_configs_mixed",
"test/config_test.py::test_default_color_config",
"test/config_test.py::test_parse_bool_true_for_truthy_values",
"test/config_test.py::test_parse_bool_false_for_non_truthy_values",
"test/config_test.py::test_get_priority_first",
"test/config_test.py::test_get_priority_second",
"test/config_test.py::test_get_priority_third",
"test/config_test.py::test_get_priority_respect_false",
"test/config_test.py::test_parse_substitution_from_list_without_is_multiline",
"test/config_test.py::test_parse_substitution_from_list_with_is_multiline",
"test/config_test.py::test_parse_substitution_error_if_not_list",
"test/config_test.py::test_parse_substitution_error_if_wrong_length",
"test/config_test.py::test_parse_substitution_error_if_third_element_not_bool",
"test/config_test.py::test_get_substitution_from_config_finds_single_substitution",
"test/config_test.py::test_get_substitution_from_config_finds_multiple_substitutions",
"test/core_test.py::test_parse_args_fewer_than_two_args_fails",
"test/core_test.py::test_parse_args_requires_version_list_or_program",
"test/core_test.py::test_parses_correctly_if_just_program",
"test/core_test.py::test_parses_version_correctly",
"test/core_test.py::test_parses_config_file_correctly",
"test/core_test.py::test_parses_examples_dir_correctly",
"test/core_test.py::test_parses_custom_dir_correctly",
"test/core_test.py::test_parses_pager_cmd_correctly",
"test/core_test.py::test_parses_edit_correctly",
"test/core_test.py::test_parses_list_correctly",
"test/core_test.py::test_parses_use_color_correctly",
"test/core_test.py::test_parses_no_color_correctly",
"test/core_test.py::test_parses_squeeze_correctly",
"test/core_test.py::test_parses_all_valid_options_simultaneously",
"test/core_test.py::test_shows_version",
"test/core_test.py::test_shows_list",
"test/core_test.py::test_calls_handle_program",
"test/core_test.py::test_run_eg_informs_if_no_editor",
"test/core_test.py::test_run_eg_opens_editor",
"test/substitute_test.py::test_equality",
"test/substitute_test.py::test_not_equal",
"test/substitute_test.py::test_applies_multiline_substitution",
"test/substitute_test.py::test_applies_normal_mode_substitution",
"test/substitute_test.py::test_calls_correct_re_methods_for_multiline",
"test/substitute_test.py::test_calls_correct_re_methods_without_multiline",
"test/util_test.py::test_handle_program_no_entries",
"test/util_test.py::test_list_supported_programs_fails_gracefully_if_no_dirs",
"test/util_test.py::test_calls_pipepager_if_not_less",
"test/util_test.py::test_calls_fallback_pager_if_none",
"test/util_test.py::test_calls_pipepager_if_less",
"test/util_test.py::test_calls_fallback_if_cmd_is_flag_string",
"test/util_test.py::test_page_string_excepts_keyboard_interrupt_if_not_less",
"test/util_test.py::test_page_string_excepts_keyboard_interrupt_if_none",
"test/util_test.py::test_get_formatted_contents_does_not_format_methods_if_all_falsey",
"test/util_test.py::test_get_formatted_contents_calls_colorize_if_use_color",
"test/util_test.py::test_get_formatted_contents_squeezes",
"test/util_test.py::test_get_formatted_contents_subsitutes",
"test/util_test.py::test_perform_all_formatting",
"test/util_test.py::test_get_squeezed_contents_correctly_squeezes",
"test/util_test.py::test_get_substituted_contents_handles_empty_subs",
"test/util_test.py::test_get_substituted_contents_substitutes_calls_correct_methods",
"test/util_test.py::test_get_substituted_contents_substitutes_correctly",
"test/util_test.py::test_get_colorized_contents_calls_methods",
"test/util_test.py::test_get_resolved_program_no_alias",
"test/util_test.py::test_get_resolved_program_is_alias",
"test/util_test.py::test_get_alias_dict_returns_contents_of_correct_file",
"test/util_test.py::test_get_alias_dict_fails_gracefully_if_not_file",
"test/util_test.py::test_get_alias_file_path",
"test/util_test.py::test_is_example_file_true_if_has_suffix",
"test/util_test.py::test_is_example_file_true_if_not_suffix",
"test/util_test.py::test_can_parse_alias_file"
]
| []
| MIT License | 2,312 | [
"setup.py",
".gitignore",
".travis.yml",
"README.md",
"eg/util.py"
]
| [
"setup.py",
".gitignore",
".travis.yml",
"README.md",
"eg/util.py"
]
|
|
cekit__cekit-203 | 4b1e853c7718adcc5cc98f380cc8067fa30c8d24 | 2018-03-20 12:21:10 | c871246da5035e070cf5f79f486283fabd5bfc46 | diff --git a/cekit/builders/docker.py b/cekit/builders/docker.py
index 12abd71..25fbd22 100644
--- a/cekit/builders/docker.py
+++ b/cekit/builders/docker.py
@@ -13,6 +13,7 @@ class DockerBuilder(Builder):
"""This class wraps docker build command to build and image"""
def __init__(self, build_engine, target, params={}):
+ self._tags = params.get('tags')
super(DockerBuilder, self).__init__(build_engine, target, params)
def check_prerequisities(self):
@@ -20,20 +21,15 @@ class DockerBuilder(Builder):
subprocess.check_output(['docker', 'info'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
raise CekitError("Docker build engine needs docker installed and configured, error: %s"
- % ex.output)
+ % ex.output)
except Exception as ex:
raise CekitError("Docker build engine needs docker installed and configured!", ex)
- def build(self, build_args):
+ def build(self):
"""After the source siles are generated, the container image can be built.
We're using Docker to build the image currently.
-
- This can be changed by specifying the tags in CLI using --build-tags option.
-
- Args:
- build_tags - a list of image tags
"""
- tags = build_args.tags
+ tags = self._tags
cmd = ["docker", "build"]
# Custom tags for the container image
diff --git a/cekit/builders/osbs.py b/cekit/builders/osbs.py
index c517029..993c171 100644
--- a/cekit/builders/osbs.py
+++ b/cekit/builders/osbs.py
@@ -16,20 +16,28 @@ class OSBSBuilder(Builder):
"""Class representing OSBS builder."""
def __init__(self, build_engine, target, params={}):
+ self._user = params.get('user')
+ self._nowait = params.get('nowait', False)
+ self._release = params.get('release', False)
+
+ self._stage = params.get('stage', False)
+ if params.get('stage'):
+ self._rhpkg = 'rhpkg-stage'
+ else:
+ self._rhpkg = 'rhpkg'
+
super(OSBSBuilder, self).__init__(build_engine, target, params={})
- self.user = params.get('user')
- self.nowait = params.get('nowait')
def check_prerequisities(self):
try:
subprocess.check_output(
- ['rhpkg', 'help'], stderr=subprocess.STDOUT)
+ [self._rhpkg, 'help'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
raise CekitError("OSBS build engine needs 'rhpkg' tools installed, error: %s"
- % ex.output)
+ % ex.output)
except Exception as ex:
raise CekitError(
- "OSBS build engine needs 'rhpkg' tools installed!", ex)
+ "OSBS build engine needs '%s' tools installed!" % self._rhpkg, ex)
def prepare(self, descriptor):
"""Prepares dist-git repository for OSBS build."""
@@ -42,8 +50,13 @@ class OSBSBuilder(Builder):
raise CekitError(
"OSBS builder needs repostiory and branch provided!")
+ if self._stage:
+ osbs_dir = 'osbs-stage'
+ else:
+ osbs_dir = 'osbs'
+
self.dist_git_dir = os.path.join(os.path.expanduser(tools.cfg['common']['work_dir']),
- 'osbs',
+ osbs_dir,
repository)
if not os.path.exists(os.path.dirname(self.dist_git_dir)):
os.makedirs(os.path.dirname(self.dist_git_dir))
@@ -53,7 +66,7 @@ class OSBSBuilder(Builder):
repository,
branch)
- self.dist_git.prepare(self.user)
+ self.dist_git.prepare(self._stage, self._user)
self.dist_git.clean()
self.artifacts = [a['name'] for a in descriptor.get('artifacts', [])]
@@ -80,9 +93,9 @@ class OSBSBuilder(Builder):
logger.info("Updating lookaside cache...")
if not self.artifacts:
return
- cmd = ["rhpkg"]
- if self.user:
- cmd += ['--user', self.user]
+ cmd = [self._rhpkg]
+ if self._user:
+ cmd += ['--user', self._user]
cmd += ["new-sources"] + self.artifacts
logger.debug("Executing '%s'" % cmd)
@@ -95,15 +108,15 @@ class OSBSBuilder(Builder):
logger.info("Update finished.")
- def build(self, build_args):
- cmd = ["rhpkg"]
- if self.user:
- cmd += ['--user', self.user]
+ def build(self):
+ cmd = [self._rhpkg]
+ if self._user:
+ cmd += ['--user', self._user]
cmd.append("container-build")
- if self.nowait:
+ if self._nowait:
cmd += ['--nowait']
- if not build_args.build_osbs_release:
+ if not self._release:
cmd.append("--scratch")
with Chdir(self.dist_git_dir):
@@ -117,7 +130,7 @@ class OSBSBuilder(Builder):
logger.info("No changes made to the code, committing skipped")
if decision("Do you want to build the image in OSBS?"):
- build_type = "release" if build_args.build_osbs_release else "scratch"
+ build_type = "release" if self._release else "scratch"
logger.info("Executing %s container build in OSBS..." % build_type)
logger.debug("Executing '%s'." % ' '.join(cmd))
@@ -163,7 +176,7 @@ class DistGit(object):
return False
- def prepare(self, user=None):
+ def prepare(self, stage, user=None):
if os.path.exists(self.output):
with Chdir(self.output):
logger.info("Pulling latest changes in repo %s..." % self.repo)
@@ -177,11 +190,15 @@ class DistGit(object):
logger.info("Cloning %s git repository (%s branch)..." %
(self.repo, self.branch))
- cmd = ['rhpkg']
+ if stage:
+ cmd = ['rhpkg-stage']
+ else:
+ cmd = ['rhpkg']
+
if user:
cmd += ['--user', user]
cmd += ["-q", "clone", "-b", self.branch, self.repo, self.output]
-
+ logger.debug("Cloning: '%s'" % ' '.join(cmd))
subprocess.check_output(cmd)
logger.debug("Repository %s cloned" % self.repo)
diff --git a/cekit/cli.py b/cekit/cli.py
index 8b828f4..ce64beb 100644
--- a/cekit/cli.py
+++ b/cekit/cli.py
@@ -90,6 +90,11 @@ class Cekit(object):
action='store_true',
help='run rhpkg container build with --nowait option')
+ build_group.add_argument('--build-osbs-stage',
+ dest='build_osbs_stage',
+ action='store_true',
+ help='use rhpkg-stage instead of rhpkg')
+
build_group.add_argument('--build-tech-preview',
action='store_true',
help='perform tech preview build')
@@ -172,12 +177,17 @@ class Cekit(object):
if 'build' in self.args.commands:
params = {'user': self.args.build_osbs_user,
- 'nowait': self.args.build_osbs_nowait}
+ 'nowait': self.args.build_osbs_nowait,
+ 'stage': self.args.build_osbs_stage,
+ 'release': self.args.build_osbs_release,
+ 'tags': self.args.tags,
+ }
+
builder = Builder(self.args.build_engine,
self.args.target,
params)
builder.prepare(generator.image)
- builder.build(self.args)
+ builder.build()
if 'test' in self.args.commands:
diff --git a/docs/build.rst b/docs/build.rst
index 809bf4d..9503ece 100644
--- a/docs/build.rst
+++ b/docs/build.rst
@@ -19,6 +19,7 @@ You can execute an container image build by running:
* ``--tag`` -- an image tag used to build image (can be specified multiple times)
* ``--build-engine`` -- a builder engine to use ``osbs`` or ``docker`` [#f1]_
+* ``--build-osbs-stage`` -- use ``rhpkg-stage`` tool instead of ``rhpkg``
* ``--build-osbs-release`` [#f2]_ -- perform a OSBS release build
* ``--build-osbs-user`` -- alternative user passed to `rhpkg --user`
* ``--build-osbs-nowait`` -- run `rhpkg container-build` with `--nowait` option specified
| Add support for rhpkg-stage
We currently have support only for `rhpkg` command. We need to support `rhpkg-stage` too. This will help us using the stage environment to test cekit. This should be done probably using a switch, maybe `--build-osbs-stage`? | cekit/cekit | diff --git a/tests/test_builder.py b/tests/test_builder.py
new file mode 100644
index 0000000..c341b06
--- /dev/null
+++ b/tests/test_builder.py
@@ -0,0 +1,118 @@
+import subprocess
+
+from cekit.builder import Builder
+
+
+def test_osbs_builder_defaults(mocker):
+ mocker.patch.object(subprocess, 'check_output')
+
+ builder = Builder('osbs', 'tmp', {})
+
+ assert builder._release is False
+ assert builder._rhpkg == 'rhpkg'
+ assert builder._nowait is False
+
+
+def test_osbs_builder_use_rhpkg_staget(mocker):
+ mocker.patch.object(subprocess, 'check_output')
+
+ params = {'stage': True}
+ builder = Builder('osbs', 'tmp', params)
+
+ assert builder._rhpkg == 'rhpkg-stage'
+
+
+def test_osbs_builder_nowait(mocker):
+ mocker.patch.object(subprocess, 'check_output')
+
+ params = {'nowait': True}
+ builder = Builder('osbs', 'tmp', params)
+
+ assert builder._nowait is True
+
+
+def test_osbs_builder_user(mocker):
+ mocker.patch.object(subprocess, 'check_output')
+
+ params = {'user': 'UserFoo'}
+ builder = Builder('osbs', 'tmp', params)
+
+ assert builder._user == 'UserFoo'
+
+
+class DistGitMock(object):
+ def add(self):
+ pass
+
+ def stage_modified(self):
+ pass
+
+
+def create_osbs_build_object(mocker, builder_type, params):
+ mocker.patch.object(subprocess, 'check_output')
+ mocker.patch('cekit.builders.osbs.decision')
+
+ builder = Builder(builder_type, 'tmp', params)
+ builder.dist_git_dir = '/tmp'
+ builder.dist_git = DistGitMock()
+ builder.artifacts = []
+ return builder
+
+
+def test_osbs_builder_run_rhpkg_stage(mocker):
+ mocker.patch.object(subprocess, 'check_output')
+
+ params = {'stage': True}
+
+ check_call = mocker.patch.object(subprocess, 'check_call')
+ builder = create_osbs_build_object(mocker, 'osbs', params)
+ builder.build()
+
+ check_call.assert_called_once_with(['rhpkg-stage', 'container-build', '--scratch'])
+
+
+def test_osbs_builder_run_rhpkg(mocker):
+ mocker.patch.object(subprocess, 'check_output')
+
+ check_call = mocker.patch.object(subprocess, 'check_call')
+ builder = create_osbs_build_object(mocker, 'osbs', {})
+ builder.build()
+
+ check_call.assert_called_once_with(['rhpkg', 'container-build', '--scratch'])
+
+
+def test_osbs_builder_run_rhpkg_nowait(mocker):
+ mocker.patch.object(subprocess, 'check_output')
+ params = {'nowait': True}
+
+ check_call = mocker.patch.object(subprocess, 'check_call')
+ builder = create_osbs_build_object(mocker, 'osbs', params)
+ builder.build()
+
+ check_call.assert_called_once_with(['rhpkg', 'container-build', '--nowait', '--scratch'])
+
+
+def test_osbs_builder_run_rhpkg_user(mocker):
+ params = {'user': 'Foo'}
+
+ check_call = mocker.patch.object(subprocess, 'check_call')
+ builder = create_osbs_build_object(mocker, 'osbs', params)
+ builder.build()
+
+ check_call.assert_called_once_with(['rhpkg', '--user', 'Foo', 'container-build', '--scratch'])
+
+
+def test_docker_builder_defaults():
+ params = {'tags': ['foo', 'bar']}
+ builder = Builder('docker', 'tmp', params)
+
+ assert builder._tags == ['foo', 'bar']
+
+
+def test_docker_builder_run(mocker):
+ params = {'tags': ['foo', 'bar']}
+ check_call = mocker.patch.object(subprocess, 'check_call')
+ builder = create_osbs_build_object(mocker, 'docker', params)
+ builder.build()
+
+ check_call.assert_called_once_with(['docker', 'build', '-t', 'foo', '-t', 'bar', 'tmp/image'])
diff --git a/tests/test_unit_args.py b/tests/test_unit_args.py
index daf0cd1..e41516a 100644
--- a/tests/test_unit_args.py
+++ b/tests/test_unit_args.py
@@ -38,6 +38,18 @@ def test_args_build_engine(mocker, engine):
assert Cekit().parse().args.build_engine == engine
+def test_args_osbs_stage(mocker):
+ mocker.patch.object(sys, 'argv', ['cekit', 'build', '--build-osbs-stage'])
+
+ assert Cekit().parse().args.build_osbs_stage is True
+
+
+def test_args_osbs_stage_false(mocker):
+ mocker.patch.object(sys, 'argv', ['cekit', 'build'])
+
+ assert Cekit().parse().args.build_osbs_stage is False
+
+
def test_args_invalid_build_engine(mocker):
mocker.patch.object(sys, 'argv', ['cekit', 'build', '--build-engine', 'rkt'])
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 4
} | 1.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"behave",
"docker",
"future",
"lxml",
"mock",
"pytest",
"pytest-cov",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | behave==1.2.6
-e git+https://github.com/cekit/cekit.git@4b1e853c7718adcc5cc98f380cc8067fa30c8d24#egg=cekit
certifi==2025.1.31
charset-normalizer==3.4.1
colorlog==6.9.0
coverage==7.8.0
docker==7.1.0
docopt==0.6.2
exceptiongroup==1.2.2
future==1.0.0
idna==3.10
iniconfig==2.1.0
Jinja2==3.1.6
lxml==5.3.1
MarkupSafe==3.0.2
mock==5.2.0
packaging==24.2
parse==1.20.2
parse_type==0.6.4
pluggy==1.5.0
pykwalify==1.8.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
ruamel.yaml==0.18.10
ruamel.yaml.clib==0.2.12
six==1.17.0
tomli==2.2.1
typing_extensions==4.13.0
urllib3==2.3.0
| name: cekit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- behave==1.2.6
- certifi==2025.1.31
- charset-normalizer==3.4.1
- colorlog==6.9.0
- coverage==7.8.0
- docker==7.1.0
- docopt==0.6.2
- exceptiongroup==1.2.2
- future==1.0.0
- idna==3.10
- iniconfig==2.1.0
- jinja2==3.1.6
- lxml==5.3.1
- markupsafe==3.0.2
- mock==5.2.0
- packaging==24.2
- parse==1.20.2
- parse-type==0.6.4
- pluggy==1.5.0
- pykwalify==1.8.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- requests==2.32.3
- ruamel-yaml==0.18.10
- ruamel-yaml-clib==0.2.12
- six==1.17.0
- tomli==2.2.1
- typing-extensions==4.13.0
- urllib3==2.3.0
prefix: /opt/conda/envs/cekit
| [
"tests/test_builder.py::test_osbs_builder_defaults",
"tests/test_builder.py::test_osbs_builder_use_rhpkg_staget",
"tests/test_builder.py::test_osbs_builder_nowait",
"tests/test_builder.py::test_osbs_builder_user",
"tests/test_builder.py::test_osbs_builder_run_rhpkg_stage",
"tests/test_builder.py::test_osbs_builder_run_rhpkg",
"tests/test_builder.py::test_osbs_builder_run_rhpkg_nowait",
"tests/test_builder.py::test_osbs_builder_run_rhpkg_user",
"tests/test_builder.py::test_docker_builder_run",
"tests/test_unit_args.py::test_args_osbs_stage",
"tests/test_unit_args.py::test_args_osbs_stage_false"
]
| [
"tests/test_builder.py::test_docker_builder_defaults"
]
| [
"tests/test_unit_args.py::test_args_command[generate]",
"tests/test_unit_args.py::test_args_command[build]",
"tests/test_unit_args.py::test_args_command[test]",
"tests/test_unit_args.py::test_args_not_valid_command",
"tests/test_unit_args.py::test_args_tags[tags0-build_tags0-expected0]",
"tests/test_unit_args.py::test_args_tags[tags1-build_tags1-expected1]",
"tests/test_unit_args.py::test_args_tags[tags2-build_tags2-expected2]",
"tests/test_unit_args.py::test_args_tags[tags3-build_tags3-expected3]",
"tests/test_unit_args.py::test_args_build_engine[osbs]",
"tests/test_unit_args.py::test_args_build_engine[docker]",
"tests/test_unit_args.py::test_args_invalid_build_engine",
"tests/test_unit_args.py::test_args_osbs_user",
"tests/test_unit_args.py::test_args_config_default",
"tests/test_unit_args.py::test_args_config",
"tests/test_unit_args.py::test_args_osbs_nowait",
"tests/test_unit_args.py::test_args_osbs_no_nowait"
]
| []
| MIT License | 2,313 | [
"cekit/builders/osbs.py",
"docs/build.rst",
"cekit/builders/docker.py",
"cekit/cli.py"
]
| [
"cekit/builders/osbs.py",
"docs/build.rst",
"cekit/builders/docker.py",
"cekit/cli.py"
]
|
|
pydata__sparse-128 | 5f19f0aeea17c1d523e4a2e0e091cd376dde68c5 | 2018-03-20 16:16:36 | b03b6b9a480a10a3cf59d7994292b9c5d3015cd5 | codecov-io: # [Codecov](https://codecov.io/gh/pydata/sparse/pull/128?src=pr&el=h1) Report
> Merging [#128](https://codecov.io/gh/pydata/sparse/pull/128?src=pr&el=desc) into [master](https://codecov.io/gh/pydata/sparse/commit/250d7c6effb5438614400a004d9469f234e4f5a0?src=pr&el=desc) will **decrease** coverage by `2.38%`.
> The diff coverage is `62.62%`.
[](https://codecov.io/gh/pydata/sparse/pull/128?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #128 +/- ##
==========================================
- Coverage 92.03% 89.65% -2.39%
==========================================
Files 13 12 -1
Lines 1256 1295 +39
==========================================
+ Hits 1156 1161 +5
- Misses 100 134 +34
```
| Flag | Coverage Δ | |
|---|---|---|
| #python27 | `88.95% <62.62%> (-2.33%)` | :arrow_down: |
| #python36 | `89.34% <62.62%> (-2.38%)` | :arrow_down: |
| [Impacted Files](https://codecov.io/gh/pydata/sparse/pull/128?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [benchmarks/benchmark\_coo.py](https://codecov.io/gh/pydata/sparse/pull/128/diff?src=pr&el=tree#diff-YmVuY2htYXJrcy9iZW5jaG1hcmtfY29vLnB5) | `0% <0%> (ø)` | :arrow_up: |
| [sparse/coo/core.py](https://codecov.io/gh/pydata/sparse/pull/128/diff?src=pr&el=tree#diff-c3BhcnNlL2Nvby9jb3JlLnB5) | `87.34% <54.16%> (-6.37%)` | :arrow_down: |
| [sparse/slicing.py](https://codecov.io/gh/pydata/sparse/pull/128/diff?src=pr&el=tree#diff-c3BhcnNlL3NsaWNpbmcucHk=) | `98.18% <95.83%> (+1.69%)` | :arrow_up: |
| [sparse/\_\_init\_\_.py](https://codecov.io/gh/pydata/sparse/pull/128/diff?src=pr&el=tree#diff-c3BhcnNlL19faW5pdF9fLnB5) | | |
------
[Continue to review full report at Codecov](https://codecov.io/gh/pydata/sparse/pull/128?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/pydata/sparse/pull/128?src=pr&el=footer). Last update [250d7c6...0fdfdc1](https://codecov.io/gh/pydata/sparse/pull/128?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
hameerabbasi: I will note, however, that this indexing is geared towards integer or tuple of integer. Large slices WILL make this slow as molasses. Maybe a fallback if the slices are too large?
hameerabbasi: Latest benchmarks:
```
In[2]: import sparse
In[3]: x = sparse.random((1000, 1000, 1000), density=0.01)
In[4]: %timeit x[:500]
78.2 ms ± 613 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)
In[5]: %timeit x[:500, :500]
223 ms ± 6.73 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
In[6]: %timeit x[:500, :500, :500]
230 ms ± 6.07 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
In[7]: %timeit x[5]
157 µs ± 423 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
```
Faster in all cases than old. Falls back to simple matching based on a "guesstimate" of which one will be faster.
hameerabbasi: cc @mrocklin Ready for review. :-)
mrocklin: Cool. I'm looking forward to it. I took a brief glance at it and noticed that it was fairly involved, including many new routines with complex logic. This will probably take some time to review properly and so I may not get to this today.
If you're inclined I suspect that you could make reviewing (and thus reading by future devs) easier by including small and simple doctests within each utility function that show what it does with a minimal example.
hameerabbasi: I had already partially done that, but I made it more complete.
hameerabbasi: A couple of replies.
hameerabbasi: I added a bit of comments to the main function.
hameerabbasi: Rebased onto infrastructure fix commits.
hameerabbasi: @mrocklin I'm done working on this. Do you have time to review in the near future?
hameerabbasi: I think your idea of moving this to a separate module and adding a module-level docstring is probably best. I'll request you to hold off reviewing further until I make that change (it'll make things easier). I'll hopefully do this tomorrow at some point.
The algorithm itself probably isn't that difficult, but I can see the need for terminology being clarified.
mrocklin: Thanks. In general I'm excited about the performance gains here. Another option would just be to merge and move ahead without review. Ideally in the future we'll have more people who have time to both write and review code.
hameerabbasi: I've clarified everything, hopefully. :-) Feel free to review and/or merge.
mrocklin: It seems like a lot of the logic here is based around performance considerations. Do we want to add benchmarks for these various cases?
hameerabbasi: > It seems like a lot of the logic here is based around performance considerations. Do we want to add benchmarks for these various cases?
Seems like an excellent idea!
hameerabbasi: I noticed there were a few cases we supported but didn't actually test for. That isn't additional in this PR, but it was nice to add tests for those cases as well.
mrocklin: > I'm fairly certain how it will be done, but I haven't mapped it out in code yet. However, it requires list[list[int]] on Numba's end.
How confident are we that this will be implemented soon and, if implemented, that it will be fast enough? If neither of those are true then do we have a backup plan?
hameerabbasi: > How confident are we that this will be implemented soon and, if implemented, that it will be fast enough? If neither of those are true then do we have a backup plan?
numba/numba#2560 is the issue we're looking for. It has a milestone of 0.38 (RC), which is due out April 11. Unless it's pushed to the next release, we should be okay.
There may be a way with flat data structures as well, but it's slightly more complicated. May be more performant, though. I'm shooting for next weekend. It requires some Numba trickery in any case (I think that trickery should be possible in `nopython` mode and if so, it'll be fast, roughly equivalent to `n` integer indexes where `n` is the size of the advanced index).
If nothing works out we can simply loop over the relevant indices in Python.
Do you need it urgently in your work?
mrocklin: No, I just want to make sure that we don't over-engineer into a solution that can not be extended. I suspect that accepting lists and arrays of boolean and integer values will be important for XArray support, which is a nice use case to target.
hameerabbasi: Good thinking! But don't worry, I'm fairly sure that with integer indexing and some transposing and reshaping, we'll be good to go.
mrocklin: I still haven't gone through all of the logic here, but I've probably gone as far as I'm likely to. In general things seem fine to me. +1
Thanks for slogging through this @hameerabbasi . I hope you're enjoying yourself :) | diff --git a/.gitignore b/.gitignore
index e408e4a..71b3ae3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -45,6 +45,10 @@ htmlcov/
nosetests.xml
coverage.xml
*,cover
+.pytest_cache/
+
+# Airspeed velocity
+.asv/
# Translations
*.mo
diff --git a/benchmarks/benchmark_coo.py b/benchmarks/benchmark_coo.py
index 5cb9f74..4bf760f 100644
--- a/benchmarks/benchmark_coo.py
+++ b/benchmarks/benchmark_coo.py
@@ -12,12 +12,17 @@ class ElemwiseSuite(object):
self.x.sum_duplicates()
self.y.sum_duplicates()
+ self.x + self.y # Numba compilation
+
def time_add(self):
self.x + self.y
def time_mul(self):
self.x * self.y
+ def time_index(self):
+ self.x[5]
+
class ElemwiseBroadcastingSuite(object):
def setup(self):
@@ -33,3 +38,24 @@ class ElemwiseBroadcastingSuite(object):
def time_mul(self):
self.x * self.y
+
+
+class IndexingSuite(object):
+ def setup(self):
+ np.random.seed(0)
+ self.x = sparse.random((100, 100, 100), density=0.01)
+ self.x.sum_duplicates()
+
+ self.x[5] # Numba compilation
+
+ def time_index_scalar(self):
+ self.x[5]
+
+ def time_index_slice(self):
+ self.x[:50]
+
+ def time_index_slice2(self):
+ self.x[:50, :50]
+
+ def time_index_slice3(self):
+ self.x[:50, :50, :50]
diff --git a/sparse/coo/core.py b/sparse/coo/core.py
index 6fbfdd9..cbc9612 100644
--- a/sparse/coo/core.py
+++ b/sparse/coo/core.py
@@ -1,4 +1,3 @@
-import numbers
from collections import Iterable, defaultdict, deque
import numpy as np
@@ -6,9 +5,9 @@ import scipy.sparse
from numpy.lib.mixins import NDArrayOperatorsMixin
from .common import dot
+from .indexing import getitem
from .umath import elemwise, broadcast_to
from ..compatibility import int, range
-from ..slicing import normalize_index
from ..sparse_array import SparseArray
from ..utils import _zero_of_dtype
@@ -487,93 +486,7 @@ class COO(SparseArray, NDArrayOperatorsMixin):
def __sizeof__(self):
return self.nbytes
- def __getitem__(self, index):
- if not isinstance(index, tuple):
- if isinstance(index, str):
- data = self.data[index]
- idx = np.where(data)
- coords = list(self.coords[:, idx[0]])
- coords.extend(idx[1:])
-
- return COO(coords, data[idx].flatten(),
- shape=self.shape + self.data.dtype[index].shape,
- has_duplicates=self.has_duplicates,
- sorted=self.sorted)
- else:
- index = (index,)
-
- last_ellipsis = len(index) > 0 and index[-1] is Ellipsis
- index = normalize_index(index, self.shape)
- if len(index) != 0 and all(not isinstance(ind, Iterable) and ind == slice(None) for ind in index):
- return self
- mask = np.ones(self.nnz, dtype=np.bool)
- for i, ind in enumerate([i for i in index if i is not None]):
- if not isinstance(ind, Iterable) and ind == slice(None):
- continue
- mask &= _mask(self.coords[i], ind, self.shape[i])
-
- n = mask.sum()
- coords = []
- shape = []
- i = 0
- for ind in index:
- if isinstance(ind, numbers.Integral):
- i += 1
- continue
- elif isinstance(ind, slice):
- step = ind.step if ind.step is not None else 1
- if step > 0:
- start = ind.start if ind.start is not None else 0
- start = max(start, 0)
- stop = ind.stop if ind.stop is not None else self.shape[i]
- stop = min(stop, self.shape[i])
- if start > stop:
- start = stop
- shape.append((stop - start + step - 1) // step)
- else:
- start = ind.start or self.shape[i] - 1
- stop = ind.stop if ind.stop is not None else -1
- start = min(start, self.shape[i] - 1)
- stop = max(stop, -1)
- if start < stop:
- start = stop
- shape.append((start - stop - step - 1) // (-step))
-
- dt = np.min_scalar_type(min(-(dim - 1) if dim != 0 else -1 for dim in shape))
- coords.append((self.coords[i, mask].astype(dt) - start) // step)
- i += 1
- elif isinstance(ind, Iterable):
- old = self.coords[i][mask]
- new = np.empty(shape=old.shape, dtype=old.dtype)
- for j, item in enumerate(ind):
- new[old == item] = j
- coords.append(new)
- shape.append(len(ind))
- i += 1
- elif ind is None:
- coords.append(np.zeros(n))
- shape.append(1)
-
- for j in range(i, self.ndim):
- coords.append(self.coords[j][mask])
- shape.append(self.shape[j])
-
- if coords:
- coords = np.stack(coords, axis=0)
- else:
- if last_ellipsis:
- coords = np.empty((0, np.sum(mask)), dtype=np.uint8)
- else:
- if np.sum(mask) != 0:
- return self.data[mask][0]
- else:
- return _zero_of_dtype(self.dtype)[()]
- shape = tuple(shape)
- data = self.data[mask]
-
- return COO(coords, data, shape=shape,
- has_duplicates=self.has_duplicates,
- sorted=self.sorted)
+ __getitem__ = getitem
def __str__(self):
return "<COO: shape=%s, dtype=%s, nnz=%d, sorted=%s, duplicates=%s>" % (
@@ -1572,28 +1485,6 @@ def _keepdims(original, new, axis):
return new.reshape(shape)
-def _mask(coords, idx, shape):
- if isinstance(idx, numbers.Integral):
- return coords == idx
- elif isinstance(idx, slice):
- step = idx.step if idx.step is not None else 1
- if step > 0:
- start = idx.start if idx.start is not None else 0
- stop = idx.stop if idx.stop is not None else shape
- return (coords >= start) & (coords < stop) & \
- (coords % step == start % step)
- else:
- start = idx.start if idx.start is not None else (shape - 1)
- stop = idx.stop if idx.stop is not None else -1
- return (coords <= start) & (coords > stop) & \
- (coords % step == start % step)
- elif isinstance(idx, Iterable):
- mask = np.zeros(len(coords), dtype=np.bool)
- for item in idx:
- mask |= _mask(coords, item, shape)
- return mask
-
-
def _grouped_reduce(x, groups, method, **kwargs):
"""
Performs a :code:`ufunc` grouped reduce.
diff --git a/sparse/coo/indexing.py b/sparse/coo/indexing.py
new file mode 100644
index 0000000..f172412
--- /dev/null
+++ b/sparse/coo/indexing.py
@@ -0,0 +1,452 @@
+from collections import Iterable
+from numbers import Integral
+
+import numba
+import numpy as np
+
+from ..compatibility import range, zip_longest
+from ..slicing import normalize_index
+from ..utils import _zero_of_dtype
+
+
+def getitem(x, index):
+ """
+ This function implements the indexing functionality for COO.
+
+ The overall algorithm has three steps:
+
+ 1. Normalize the index to canonical form. Function: normalize_index
+ 2. Get the mask, which is a list of integers corresponding to
+ the indices in coords/data for the output data. Function: _mask
+ 3. Transform the coordinates to what they will be in the output.
+
+ Parameters
+ ----------
+ x : COO
+ The array to apply the indexing operation on.
+ index : {tuple, str}
+ The index into the array.
+ """
+ from .core import COO
+
+ x.sum_duplicates()
+
+ # If string, this is an index into an np.void
+ # Custom dtype.
+ if isinstance(index, str):
+ data = x.data[index]
+ idx = np.where(data)
+ coords = list(x.coords[:, idx[0]])
+ coords.extend(idx[1:])
+
+ return COO(coords, data[idx].flatten(),
+ shape=x.shape + x.data.dtype[index].shape,
+ has_duplicates=x.has_duplicates,
+ sorted=x.sorted)
+
+ # Otherwise, convert into a tuple.
+ if not isinstance(index, tuple):
+ index = (index,)
+
+ # Check if the last index is an ellipsis.
+ last_ellipsis = len(index) > 0 and index[-1] is Ellipsis
+
+ # Normalize the index into canonical form.
+ index = normalize_index(index, x.shape)
+
+ # zip_longest so things like x[..., None] are picked up.
+ if len(index) != 0 and all(ind == slice(0, dim, 1) for ind, dim in zip_longest(index, x.shape)):
+ return x
+
+ # Get the mask
+ mask = _mask(x.coords, index, x.shape)
+
+ # Get the length of the mask
+ if isinstance(mask, slice):
+ n = len(range(mask.start, mask.stop, mask.step))
+ else:
+ n = len(mask)
+
+ coords = []
+ shape = []
+ i = 0
+ for ind in index:
+ # Nothing is added to shape or coords if the index is an integer.
+ if isinstance(ind, Integral):
+ i += 1
+ continue
+ # Add to the shape and transform the coords in the case of a slice.
+ elif isinstance(ind, slice):
+ shape.append(len(range(ind.start, ind.stop, ind.step)))
+ dt = np.min_scalar_type(min(-(dim - 1) if dim != 0 else -1 for dim in shape))
+ coords.append((x.coords[i, mask].astype(dt) - ind.start) // ind.step)
+ i += 1
+ elif isinstance(ind, Iterable):
+ raise NotImplementedError('Advanced indexing is not yet supported.')
+ # Add a dimension for None.
+ elif ind is None:
+ coords.append(np.zeros(n))
+ shape.append(1)
+
+ # Join all the transformed coords.
+ if coords:
+ coords = np.stack(coords, axis=0)
+ else:
+ # If index result is a scalar, return a 0-d COO or
+ # a scalar depending on whether the last index is an ellipsis.
+ if last_ellipsis:
+ coords = np.empty((0, n), dtype=np.uint8)
+ else:
+ if n != 0:
+ return x.data[mask][0]
+ else:
+ return _zero_of_dtype(x.dtype)[()]
+
+ shape = tuple(shape)
+ data = x.data[mask]
+
+ return COO(coords, data, shape=shape,
+ has_duplicates=x.has_duplicates,
+ sorted=x.sorted)
+
+
+def _mask(coords, indices, shape):
+ indices = _prune_indices(indices, shape)
+
+ ind_ar = np.empty((len(indices), 3), dtype=np.intp)
+
+ for i, idx in enumerate(indices):
+ if isinstance(idx, slice):
+ ind_ar[i] = [idx.start, idx.stop, idx.step]
+ else: # idx is an integer
+ ind_ar[i] = [idx, idx + 1, 1]
+
+ mask, is_slice = _compute_mask(coords, ind_ar)
+
+ if is_slice:
+ return slice(mask[0], mask[1], 1)
+ else:
+ return mask
+
+
+def _prune_indices(indices, shape, prune_none=True):
+ """
+ Gets rid of the indices that do not contribute to the
+ overall mask, e.g. None and full slices.
+
+ Parameters
+ ----------
+ indices : tuple
+ The indices to the array.
+ shape : tuple[int]
+ The shape of the array.
+
+ Returns
+ -------
+ indices : tuple
+ The filtered indices.
+
+ Examples
+ --------
+ >>> _prune_indices((None, 5), (10,)) # None won't affect the mask
+ [5]
+ >>> _prune_indices((slice(0, 10, 1),), (10,)) # Full slices don't affect the mask
+ []
+ """
+ if prune_none:
+ indices = [idx for idx in indices if idx is not None]
+
+ i = 0
+ for idx, l in zip(indices[::-1], shape[::-1]):
+ if not isinstance(idx, slice):
+ break
+
+ if idx.start == 0 and idx.stop == l and idx.step == 1:
+ i += 1
+ continue
+
+ if idx.start == l - 1 and idx.stop == -1 and idx.step == -1:
+ i += 1
+ continue
+
+ break
+ if i != 0:
+ indices = indices[:-i]
+ return indices
+
+
[email protected](nopython=True)
+def _compute_mask(coords, indices): # pragma: no cover
+ """
+ Gets the mask for the coords given the indices in slice format.
+
+ Works with either start-stop ranges of matching indices into coords
+ called "pairs" (start-stop pairs) or filters the mask directly, based
+ on which is faster.
+
+ Exploits the structure in sorted coords, which is that for a constant
+ value of coords[i - 1], coords[i - 2] and so on, coords[i] is sorted.
+ Concretely, ``coords[i, coords[i - 1] == v1 & coords[i - 2] = v2, ...]``
+ is always sorted. It uses this sortedness to find sub-pairs for each
+ dimension given the previous, and so on. This is efficient for small
+ slices or ints, but not for large ones.
+
+ After it detects that working with pairs is rather inefficient (or after
+ going through each possible index), it constructs a filtered mask from the
+ start-stop pairs.
+
+ Parameters
+ ----------
+ coords : np.ndarray
+ The coordinates of the array.
+ indices : np.ndarray
+ The indices in the form of slices such that indices[:, 0] are starts,
+ indices[:, 1] are stops and indices[:, 2] are steps.
+
+ Returns
+ -------
+ mask : np.ndarray
+ The starts and stops in the mask.
+ is_slice : bool
+ Whether or not the array represents a continuous slice.
+
+ Examples
+ --------
+ Let's create some mock coords and indices
+
+ >>> import numpy as np
+ >>> coords = np.array([[0, 0, 1, 1, 2, 2]])
+ >>> indices = np.array([[0, 3, 2]]) # Equivalent to slice(0, 3, 2)
+
+ Now let's get the mask. Notice that the indices of ``0`` and ``2`` are matched.
+
+ >>> _compute_mask(coords, indices)
+ (array([0, 1, 4, 5]), False)
+
+ Now, let's try with a more "continuous" slice. Matches ``0`` and ``1``.
+
+ >>> indices = np.array([[0, 2, 1]])
+ >>> _compute_mask(coords, indices)
+ (array([0, 4]), True)
+
+ This is equivalent to mask being ``slice(0, 4, 1)``.
+ """
+ # Set the initial mask to be the entire range of coordinates.
+ starts = [0]
+ stops = [coords.shape[1]]
+ n_matches = coords.shape[1]
+
+ i = 0
+ while i < len(indices):
+ # Guesstimate whether working with pairs is more efficient or
+ # working with the mask directly.
+ # One side is the estimate of time taken for binary searches
+ # (n_searches * log(avg_length))
+ # The other is an estimated time of a linear filter for the mask.
+ n_pairs = len(starts)
+ n_current_slices = _get_slice_len(indices[i]) * n_pairs + 2
+ if n_current_slices * np.log(n_current_slices / max(n_pairs, 1)) > \
+ n_matches + n_pairs:
+ break
+
+ # For each of the pairs, search inside the coordinates for other
+ # matching sub-pairs.
+ # This gets the start-end coordinates in coords for each 'sub-array'
+ # Which would come out of indexing a single integer.
+ starts, stops, n_matches = _get_mask_pairs(starts, stops, coords[i], indices[i])
+
+ i += 1
+
+ # Combine adjacent pairs
+ starts, stops = _join_adjacent_pairs(starts, stops)
+
+ # If just one pair is left over, treat it as a slice.
+ if i == len(indices) and len(starts) == 1:
+ return np.array([starts[0], stops[0]]), True
+
+ # Convert start-stop pairs into mask, filtering by remaining
+ # coordinates.
+ mask = _filter_pairs(starts, stops, coords[i:], indices[i:])
+
+ return np.array(mask, dtype=np.intp), False
+
+
[email protected](nopython=True)
+def _get_mask_pairs(starts_old, stops_old, c, idx): # pragma: no cover
+ """
+ Gets the pairs for a following dimension given the pairs for
+ a dimension.
+
+ For each pair, it searches in the following dimension for
+ matching coords and returns those.
+
+ The total combined length of all pairs is returned to
+ help with the performance guesstimate.
+
+ Parameters
+ ----------
+ starts_old, stops_old : list[int]
+ The starts and stops from the previous index.
+ c : np.ndarray
+ The coords for this index's dimension.
+ idx : np.ndarray
+ The index in the form of a slice.
+ idx[0], idx[1], idx[2] = start, stop, step
+
+ Returns
+ -------
+ starts, stops: list
+ The starts and stops after applying the current index.
+ n_matches : int
+ The sum of elements in all ranges.
+
+ Examples
+ --------
+ >>> c = np.array([1, 2, 1, 2, 1, 1, 2, 2])
+ >>> starts_old = [4]
+ >>> stops_old = [8]
+ >>> idx = np.array([1, 2, 1])
+ >>> _get_mask_pairs(starts_old, stops_old, c, idx)
+ ([4], [6], 2)
+ """
+ starts = []
+ stops = []
+ n_matches = 0
+
+ for j in range(len(starts_old)):
+ # For each matching "integer" in the slice, search within the "sub-coords"
+ # Using binary search.
+ for p_match in range(idx[0], idx[1], idx[2]):
+ start = np.searchsorted(c[starts_old[j]:stops_old[j]], p_match) + starts_old[j]
+ stop = np.searchsorted(c[starts_old[j]:stops_old[j]], p_match + 1) + starts_old[j]
+
+ if start != stop:
+ starts.append(start)
+ stops.append(stop)
+ n_matches += stop - start
+
+ return starts, stops, n_matches
+
+
[email protected](nopython=True)
+def _get_slice_len(idx): # pragma: no cover
+ """
+ Get the number of elements in a slice.
+
+ Parameters
+ ----------
+ idx : np.ndarray
+ A (3,) shaped array containing start, stop, step
+
+ Returns
+ -------
+ n : int
+ The length of the slice.
+
+ Examples
+ --------
+ >>> idx = np.array([5, 15, 5])
+ >>> _get_slice_len(idx)
+ 2
+ """
+ start, stop, step = idx[0], idx[1], idx[2]
+
+ if step > 0:
+ return (stop - start + step - 1) // step
+ else:
+ return (start - stop - step - 1) // (-step)
+
+
[email protected](nopython=True)
+def _filter_pairs(starts, stops, coords, indices): # pragma: no cover
+ """
+ Converts all the pairs into a single integer mask, additionally filtering
+ by the indices.
+
+ Parameters
+ ----------
+ starts, stops : list[int]
+ The starts and stops to convert into an array.
+ coords : np.ndarray
+ The coordinates to filter by.
+ indices : np.ndarray
+ The indices in the form of slices such that indices[:, 0] are starts,
+ indices[:, 1] are stops and indices[:, 2] are steps.
+
+ Returns
+ -------
+ mask : list
+ The output integer mask.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> starts = [2]
+ >>> stops = [7]
+ >>> coords = np.array([[0, 1, 2, 3, 4, 5, 6, 7]])
+ >>> indices = np.array([[2, 8, 2]]) # Start, stop, step pairs
+ >>> _filter_pairs(starts, stops, coords, indices)
+ [2, 4, 6]
+ """
+ mask = []
+
+ # For each pair,
+ for i in range(len(starts)):
+ # For each element match within the pair range
+ for j in range(starts[i], stops[i]):
+ match = True
+
+ # Check if it matches all indices
+ for k in range(len(indices)):
+ idx = indices[k]
+ elem = coords[k, j]
+
+ match &= ((elem - idx[0]) % idx[2] == 0 and
+ ((idx[2] > 0 and idx[0] <= elem < idx[1])
+ or (idx[2] < 0 and idx[0] >= elem > idx[1])))
+
+ # and append to the mask if so.
+ if match:
+ mask.append(j)
+
+ return mask
+
+
[email protected](nopython=True)
+def _join_adjacent_pairs(starts_old, stops_old): # pragma: no cover
+ """
+ Joins adjacent pairs into one. For example, 2-5 and 5-7
+ will reduce to 2-7 (a single pair). This may help in
+ returning a slice in the end which could be faster.
+
+ Parameters
+ ----------
+ starts_old, stops_old : list[int]
+ The input starts and stops
+
+ Returns
+ -------
+ starts, stops : list[int]
+ The reduced starts and stops.
+
+ Examples
+ --------
+ >>> starts = [2, 5]
+ >>> stops = [5, 7]
+ >>> _join_adjacent_pairs(starts, stops)
+ ([2], [7])
+ """
+ if len(starts_old) <= 1:
+ return starts_old, stops_old
+
+ starts = [starts_old[0]]
+ stops = []
+
+ for i in range(1, len(starts_old)):
+ if starts_old[i] != stops_old[i - 1]:
+ starts.append(starts_old[i])
+ stops.append(stops_old[i - 1])
+
+ stops.append(stops_old[-1])
+
+ return starts, stops
diff --git a/sparse/slicing.py b/sparse/slicing.py
index 962e792..56e5b25 100644
--- a/sparse/slicing.py
+++ b/sparse/slicing.py
@@ -3,6 +3,7 @@
import math
from numbers import Integral, Number
+
import numpy as np
@@ -12,7 +13,7 @@ def normalize_index(idx, shape):
2. Adds full slices to end of index
3. Checks bounding conditions
4. Replaces numpy arrays with lists
- 5. Posify's integers and lists
+ 5. Posify's slices integers and lists
6. Normalizes slices to canonical form
Examples
--------
@@ -23,9 +24,9 @@ def normalize_index(idx, shape):
>>> normalize_index([-1], (10,))
(array([9]),)
>>> normalize_index(slice(-3, 10, 1), (10,))
- (slice(7, None, None),)
+ (slice(7, 10, 1),)
>>> normalize_index((Ellipsis, None), (10,))
- (slice(None, None, None), None)
+ (slice(0, 10, 1), None)
"""
if not isinstance(idx, tuple):
idx = (idx,)
@@ -55,8 +56,9 @@ def normalize_index(idx, shape):
if d is not None:
check_index(i, d)
idx = tuple(map(sanitize_index, idx))
- idx = tuple(map(normalize_slice, idx, none_shape))
+ idx = tuple(map(replace_none, idx, none_shape))
idx = posify_index(none_shape, idx)
+ idx = tuple(map(clip_slice, idx, none_shape))
return idx
@@ -176,50 +178,6 @@ def _sanitize_index_element(ind):
return int(ind)
-def normalize_slice(idx, dim):
- """ Normalize slices to canonical form
- Parameters
- ----------
- idx: slice or other index
- dim: dimension length
- Examples
- --------
- >>> normalize_slice(slice(0, 10, 1), 10)
- slice(None, None, None)
- """
-
- if isinstance(idx, slice):
- start, stop, step = idx.start, idx.stop, idx.step
- if start is not None:
- if start < 0 and not math.isnan(dim):
- start = max(0, start + dim)
- elif start > dim:
- start = dim
- if stop is not None:
- if stop < 0 and not math.isnan(dim):
- stop = max(0, stop + dim)
- elif stop > dim:
- stop = dim
-
- step = 1 if step is None else step
-
- if step > 0:
- if start == 0:
- start = None
- if stop == dim:
- stop = None
- else:
- if start == dim - 1:
- start = None
- if stop == -1:
- stop = None
-
- if step == 1:
- step = None
- return slice(start, stop, step)
- return idx
-
-
def posify_index(shape, ind):
""" Flip negative indices around to positive ones
>>> posify_index(10, 3)
@@ -243,5 +201,93 @@ def posify_index(shape, ind):
if isinstance(ind, (np.ndarray, list)) and not math.isnan(shape):
ind = np.asanyarray(ind)
return np.where(ind < 0, ind + shape, ind)
+ if isinstance(ind, slice):
+ start, stop, step = ind.start, ind.stop, ind.step
+
+ if start < 0:
+ start += shape
+
+ if not (0 > stop >= step) and stop < 0:
+ stop += shape
+
+ return slice(start, stop, ind.step)
return ind
+
+
+def clip_slice(idx, dim):
+ """
+ Clip slice to its effective size given the shape.
+
+ Parameters
+ ----------
+ idx : The index.
+ dim : The size along the corresponding dimension.
+
+ Returns
+ -------
+ idx : slice
+
+ Examples
+ --------
+ >>> clip_slice(slice(0, 20, 1), 10)
+ slice(0, 10, 1)
+ """
+ if not isinstance(idx, slice):
+ return idx
+
+ start, stop, step = idx.start, idx.stop, idx.step
+
+ if step > 0:
+ start = max(start, 0)
+ stop = min(stop, dim)
+
+ if start > stop:
+ start = stop
+ else:
+ start = min(start, dim - 1)
+ stop = max(stop, -1)
+
+ if start < stop:
+ start = stop
+
+ return slice(start, stop, step)
+
+
+def replace_none(idx, dim):
+ """
+ Normalize slices to canonical form, i.e.
+ replace ``None`` with the appropriate integers.
+
+ Parameters
+ ----------
+ idx: slice or other index
+ dim: dimension length
+
+ Examples
+ --------
+ >>> replace_none(slice(None, None, None), 10)
+ slice(0, 10, 1)
+ """
+ if not isinstance(idx, slice):
+ return idx
+
+ start, stop, step = idx.start, idx.stop, idx.step
+
+ if step is None:
+ step = 1
+
+ if step > 0:
+ if start is None:
+ start = 0
+
+ if stop is None:
+ stop = dim
+ else:
+ if start is None:
+ start = dim - 1
+
+ if stop is None:
+ stop = -1
+
+ return slice(start, stop, step)
| Indexing is O(n) when it can be made O(log n)
If we call `COO.sum_duplicates()` beforehand, it may be possible, with some trickery, to make indexing `O(log n)`. I'm still looking into how this can be done, and how to generalize it to multiple indices, but I know it's possible.
One concern I have is that I do not want it to become `O(n log n)` for multiple indices, so this needs to be thought out carefully. | pydata/sparse | diff --git a/sparse/tests/test_coo.py b/sparse/tests/test_coo.py
index d903683..6d83d93 100644
--- a/sparse/tests/test_coo.py
+++ b/sparse/tests/test_coo.py
@@ -1,14 +1,12 @@
-import pytest
-
import operator
import numpy as np
+import pytest
import scipy.sparse
import scipy.stats
-from sparse import COO
-
import sparse
+from sparse import COO
from sparse.utils import assert_eq, is_lexsorted, random_value_array
@@ -772,36 +770,27 @@ def test_gt():
@pytest.mark.parametrize('index', [
+ # Integer
0,
1,
-1,
+ (1, 1, 1),
+ # Pure slices
(slice(0, 2),),
(slice(None, 2), slice(None, 2)),
(slice(1, None), slice(1, None)),
(slice(None, None),),
+ (slice(None, None, -1),),
(slice(None, 2, -1), slice(None, 2, -1)),
(slice(1, None, 2), slice(1, None, 2)),
(slice(None, None, 2),),
(slice(None, 2, -1), slice(None, 2, -2)),
(slice(1, None, 2), slice(1, None, 1)),
(slice(None, None, -2),),
+ # Combinations
(0, slice(0, 2),),
(slice(0, 1), 0),
- ([1, 0], 0),
- (1, [0, 2]),
- (0, [1, 0], 0),
- (1, [2, 0], 0),
(None, slice(1, 3), 0),
- (Ellipsis, slice(1, 3)),
- (1, Ellipsis, slice(1, 3)),
- (slice(0, 1), Ellipsis),
- (Ellipsis, None),
- (None, Ellipsis),
- (1, Ellipsis),
- (1, Ellipsis, None),
- (1, 1, 1),
- (1, 1, 1, Ellipsis),
- (Ellipsis, 1, None),
(slice(0, 3), None, 0),
(slice(1, 2), slice(2, 4)),
(slice(1, 2), slice(None, None)),
@@ -815,10 +804,42 @@ def test_gt():
(slice(2, 0, -1), slice(None, None), -1),
(slice(-2, None, None),),
(slice(-1, None, None), slice(-2, None, None)),
+ # With ellipsis
+ (Ellipsis, slice(1, 3)),
+ (1, Ellipsis, slice(1, 3)),
+ (slice(0, 1), Ellipsis),
+ (Ellipsis, None),
+ (None, Ellipsis),
+ (1, Ellipsis),
+ (1, Ellipsis, None),
+ (1, 1, 1, Ellipsis),
+ (Ellipsis, 1, None),
+ # Pathological - Slices larger than array
+ (slice(None, 1000)),
+ (slice(None), slice(None, 1000)),
+ (slice(None), slice(1000, -1000, -1)),
+ (slice(None), slice(1000, -1000, -50)),
+ # Pathological - Wrong ordering of start/stop
+ (slice(5, 0),),
+ (slice(0, 5, -1),),
+])
+def test_slicing(index):
+ s = sparse.random((2, 3, 4), density=0.5)
+ x = s.todense()
+
+ assert_eq(x[index], s[index])
+
+
[email protected]('index', [
+ ([1, 0], 0),
+ (1, [0, 2]),
+ (0, [1, 0], 0),
+ (1, [2, 0], 0),
([True, False], slice(1, None), slice(-2, None)),
(slice(1, None), slice(-2, None), [True, False, True, False]),
])
-def test_slicing(index):
[email protected](reason='Advanced indexing is temporarily broken.')
+def test_advanced_indexing(index):
s = sparse.random((2, 3, 4), density=0.5)
x = s.todense()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 4
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-flake8",
"pytest-cov"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
asv==0.5.1
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
distlib==0.3.9
docutils==0.17.1
filelock==3.4.1
flake8==5.0.4
idna==3.10
imagesize==1.4.1
importlib-metadata==4.2.0
importlib-resources==5.4.0
iniconfig==1.1.1
Jinja2==3.0.3
llvmlite==0.36.0
MarkupSafe==2.0.1
mccabe==0.7.0
numba==0.53.1
numpy==1.19.5
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
pockets==0.9.1
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-flake8==1.1.1
pytz==2025.2
requests==2.27.1
scipy==1.5.4
six==1.17.0
snowballstemmer==2.2.0
-e git+https://github.com/pydata/sparse.git@5f19f0aeea17c1d523e4a2e0e091cd376dde68c5#egg=sparse
Sphinx==4.3.2
sphinx-rtd-theme==1.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-napoleon==0.7
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.16.2
zipp==3.6.0
| name: sparse
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- asv==0.5.1
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- distlib==0.3.9
- docutils==0.17.1
- filelock==3.4.1
- flake8==5.0.4
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.2.0
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jinja2==3.0.3
- llvmlite==0.36.0
- markupsafe==2.0.1
- mccabe==0.7.0
- numba==0.53.1
- numpy==1.19.5
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- pockets==0.9.1
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-flake8==1.1.1
- pytz==2025.2
- requests==2.27.1
- scipy==1.5.4
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==4.3.2
- sphinx-rtd-theme==1.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-napoleon==0.7
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/sparse
| [
"sparse/slicing.py::sparse.slicing.clip_slice",
"sparse/slicing.py::sparse.slicing.replace_none",
"sparse/coo/indexing.py::sparse.coo.indexing._compute_mask",
"sparse/coo/indexing.py::sparse.coo.indexing._filter_pairs",
"sparse/coo/indexing.py::sparse.coo.indexing._get_mask_pairs",
"sparse/coo/indexing.py::sparse.coo.indexing._get_slice_len",
"sparse/coo/indexing.py::sparse.coo.indexing._join_adjacent_pairs",
"sparse/coo/indexing.py::sparse.coo.indexing._prune_indices",
"sparse/tests/test_coo.py::test_slicing[index42]",
"sparse/tests/test_coo.py::test_slicing[index43]",
"sparse/tests/test_dok.py::test_setitem[shape0-index0-0.3677514566770569]",
"sparse/tests/test_dok.py::test_setitem[shape1-index1-0.5428743048014814]",
"sparse/tests/test_dok.py::test_setitem[shape3-1-0.03577115120288721]",
"sparse/tests/test_dok.py::test_setitem[shape4-index4-0.23294320117337797]",
"sparse/tests/test_dok.py::test_setitem[shape5-index5-0.6335989177833085]",
"sparse/tests/test_dok.py::test_setitem[shape9-index9-0.8394366614692292]",
"sparse/tests/test_dok.py::test_setitem[shape11-index11-0.2550012585313681]",
"sparse/tests/test_dok.py::test_setitem[shape13-index13-0.14924941670578296]"
]
| [
"sparse/tests/test_coo.py::test_op_scipy_sparse_left[func2]",
"sparse/tests/test_coo.py::test_op_scipy_sparse_left[func3]",
"sparse/tests/test_coo.py::test_op_scipy_sparse_left[func4]",
"sparse/tests/test_coo.py::test_op_scipy_sparse_left[func5]"
]
| [
"sparse/dok.py::sparse.dok.DOK",
"sparse/dok.py::sparse.dok.DOK.from_coo",
"sparse/dok.py::sparse.dok.DOK.from_numpy",
"sparse/dok.py::sparse.dok.DOK.nnz",
"sparse/dok.py::sparse.dok.DOK.to_coo",
"sparse/dok.py::sparse.dok.DOK.todense",
"sparse/slicing.py::sparse.slicing.check_index",
"sparse/slicing.py::sparse.slicing.normalize_index",
"sparse/slicing.py::sparse.slicing.posify_index",
"sparse/slicing.py::sparse.slicing.replace_ellipsis",
"sparse/slicing.py::sparse.slicing.sanitize_index",
"sparse/sparse_array.py::sparse.sparse_array.SparseArray.density",
"sparse/sparse_array.py::sparse.sparse_array.SparseArray.ndim",
"sparse/sparse_array.py::sparse.sparse_array.SparseArray.nnz",
"sparse/sparse_array.py::sparse.sparse_array.SparseArray.size",
"sparse/utils.py::sparse.utils.random",
"sparse/coo/core.py::sparse.coo.core.COO",
"sparse/coo/core.py::sparse.coo.core.COO.T",
"sparse/coo/core.py::sparse.coo.core.COO.__len__",
"sparse/coo/core.py::sparse.coo.core.COO.dot",
"sparse/coo/core.py::sparse.coo.core.COO.dtype",
"sparse/coo/core.py::sparse.coo.core.COO.from_numpy",
"sparse/coo/core.py::sparse.coo.core.COO.from_scipy_sparse",
"sparse/coo/core.py::sparse.coo.core.COO.linear_loc",
"sparse/coo/core.py::sparse.coo.core.COO.max",
"sparse/coo/core.py::sparse.coo.core.COO.maybe_densify",
"sparse/coo/core.py::sparse.coo.core.COO.min",
"sparse/coo/core.py::sparse.coo.core.COO.nbytes",
"sparse/coo/core.py::sparse.coo.core.COO.nnz",
"sparse/coo/core.py::sparse.coo.core.COO.prod",
"sparse/coo/core.py::sparse.coo.core.COO.reduce",
"sparse/coo/core.py::sparse.coo.core.COO.reshape",
"sparse/coo/core.py::sparse.coo.core.COO.sort_indices",
"sparse/coo/core.py::sparse.coo.core.COO.sum",
"sparse/coo/core.py::sparse.coo.core.COO.sum_duplicates",
"sparse/coo/core.py::sparse.coo.core.COO.todense",
"sparse/coo/core.py::sparse.coo.core.COO.transpose",
"sparse/tests/test_coo.py::test_reductions[True-None-max-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_reductions[True-None-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_reductions[True-None-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_reductions[True-None-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_reductions[True-None-min-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_reductions[True-0-max-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_reductions[True-0-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_reductions[True-0-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_reductions[True-0-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_reductions[True-0-min-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_reductions[True-1-max-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_reductions[True-1-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_reductions[True-1-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_reductions[True-1-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_reductions[True-1-min-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_reductions[True-2-max-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_reductions[True-2-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_reductions[True-2-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_reductions[True-2-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_reductions[True-2-min-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_reductions[True-axis4-max-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_reductions[True-axis4-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_reductions[True-axis4-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_reductions[True-axis4-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_reductions[True-axis4-min-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_reductions[True--3-max-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_reductions[True--3-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_reductions[True--3-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_reductions[True--3-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_reductions[True--3-min-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_reductions[True-axis6-max-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_reductions[True-axis6-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_reductions[True-axis6-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_reductions[True-axis6-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_reductions[True-axis6-min-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_reductions[False-None-max-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_reductions[False-None-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_reductions[False-None-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_reductions[False-None-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_reductions[False-None-min-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_reductions[False-0-max-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_reductions[False-0-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_reductions[False-0-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_reductions[False-0-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_reductions[False-0-min-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_reductions[False-1-max-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_reductions[False-1-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_reductions[False-1-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_reductions[False-1-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_reductions[False-1-min-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_reductions[False-2-max-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_reductions[False-2-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_reductions[False-2-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_reductions[False-2-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_reductions[False-2-min-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_reductions[False-axis4-max-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_reductions[False-axis4-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_reductions[False-axis4-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_reductions[False-axis4-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_reductions[False-axis4-min-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_reductions[False--3-max-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_reductions[False--3-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_reductions[False--3-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_reductions[False--3-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_reductions[False--3-min-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_reductions[False-axis6-max-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_reductions[False-axis6-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_reductions[False-axis6-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_reductions[False-axis6-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_reductions[False-axis6-min-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-None-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-None-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-None-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-None-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-None-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-0-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-0-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-0-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-0-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-0-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-1-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-1-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-1-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-1-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-1-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-2-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-2-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-2-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-2-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-2-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-axis4-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-axis4-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-axis4-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-axis4-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_ufunc_reductions[True-axis4-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-None-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-None-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-None-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-None-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-None-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-0-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-0-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-0-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-0-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-0-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-1-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-1-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-1-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-1-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-1-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-2-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-2-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-2-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-2-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-2-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-axis4-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-axis4-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-axis4-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-axis4-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_coo.py::test_ufunc_reductions[False-axis4-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_coo.py::test_ufunc_reductions_kwargs[amax-kwargs0]",
"sparse/tests/test_coo.py::test_ufunc_reductions_kwargs[sum-kwargs1]",
"sparse/tests/test_coo.py::test_ufunc_reductions_kwargs[prod-kwargs2]",
"sparse/tests/test_coo.py::test_ufunc_reductions_kwargs[reduce-kwargs3]",
"sparse/tests/test_coo.py::test_ufunc_reductions_kwargs[reduce-kwargs4]",
"sparse/tests/test_coo.py::test_ufunc_reductions_kwargs[reduce-kwargs5]",
"sparse/tests/test_coo.py::test_nan_reductions[0.25-False-None-nansum]",
"sparse/tests/test_coo.py::test_nan_reductions[0.25-False-None-nanprod]",
"sparse/tests/test_coo.py::test_nan_reductions[0.25-False-None-nanmax]",
"sparse/tests/test_coo.py::test_nan_reductions[0.25-False-None-nanmin]",
"sparse/tests/test_coo.py::test_nan_reductions[0.25-False-0-nansum]",
"sparse/tests/test_coo.py::test_nan_reductions[0.25-False-0-nanprod]",
"sparse/tests/test_coo.py::test_nan_reductions[0.25-False-0-nanmax]",
"sparse/tests/test_coo.py::test_nan_reductions[0.25-False-0-nanmin]",
"sparse/tests/test_coo.py::test_nan_reductions[0.25-False-1-nansum]",
"sparse/tests/test_coo.py::test_nan_reductions[0.25-False-1-nanprod]",
"sparse/tests/test_coo.py::test_nan_reductions[0.25-False-1-nanmax]",
"sparse/tests/test_coo.py::test_nan_reductions[0.25-False-1-nanmin]",
"sparse/tests/test_coo.py::test_nan_reductions[0.5-False-None-nansum]",
"sparse/tests/test_coo.py::test_nan_reductions[0.5-False-None-nanprod]",
"sparse/tests/test_coo.py::test_nan_reductions[0.5-False-None-nanmax]",
"sparse/tests/test_coo.py::test_nan_reductions[0.5-False-None-nanmin]",
"sparse/tests/test_coo.py::test_nan_reductions[0.5-False-0-nansum]",
"sparse/tests/test_coo.py::test_nan_reductions[0.5-False-0-nanprod]",
"sparse/tests/test_coo.py::test_nan_reductions[0.5-False-0-nanmax]",
"sparse/tests/test_coo.py::test_nan_reductions[0.5-False-0-nanmin]",
"sparse/tests/test_coo.py::test_nan_reductions[0.5-False-1-nansum]",
"sparse/tests/test_coo.py::test_nan_reductions[0.5-False-1-nanprod]",
"sparse/tests/test_coo.py::test_nan_reductions[0.5-False-1-nanmax]",
"sparse/tests/test_coo.py::test_nan_reductions[0.5-False-1-nanmin]",
"sparse/tests/test_coo.py::test_nan_reductions[0.75-False-None-nansum]",
"sparse/tests/test_coo.py::test_nan_reductions[0.75-False-None-nanprod]",
"sparse/tests/test_coo.py::test_nan_reductions[0.75-False-None-nanmax]",
"sparse/tests/test_coo.py::test_nan_reductions[0.75-False-None-nanmin]",
"sparse/tests/test_coo.py::test_nan_reductions[0.75-False-0-nansum]",
"sparse/tests/test_coo.py::test_nan_reductions[0.75-False-0-nanprod]",
"sparse/tests/test_coo.py::test_nan_reductions[0.75-False-0-nanmax]",
"sparse/tests/test_coo.py::test_nan_reductions[0.75-False-0-nanmin]",
"sparse/tests/test_coo.py::test_nan_reductions[0.75-False-1-nansum]",
"sparse/tests/test_coo.py::test_nan_reductions[0.75-False-1-nanprod]",
"sparse/tests/test_coo.py::test_nan_reductions[0.75-False-1-nanmax]",
"sparse/tests/test_coo.py::test_nan_reductions[0.75-False-1-nanmin]",
"sparse/tests/test_coo.py::test_nan_reductions[1.0-False-None-nansum]",
"sparse/tests/test_coo.py::test_nan_reductions[1.0-False-None-nanprod]",
"sparse/tests/test_coo.py::test_nan_reductions[1.0-False-None-nanmax]",
"sparse/tests/test_coo.py::test_nan_reductions[1.0-False-None-nanmin]",
"sparse/tests/test_coo.py::test_nan_reductions[1.0-False-0-nansum]",
"sparse/tests/test_coo.py::test_nan_reductions[1.0-False-0-nanprod]",
"sparse/tests/test_coo.py::test_nan_reductions[1.0-False-0-nanmax]",
"sparse/tests/test_coo.py::test_nan_reductions[1.0-False-0-nanmin]",
"sparse/tests/test_coo.py::test_nan_reductions[1.0-False-1-nansum]",
"sparse/tests/test_coo.py::test_nan_reductions[1.0-False-1-nanprod]",
"sparse/tests/test_coo.py::test_nan_reductions[1.0-False-1-nanmax]",
"sparse/tests/test_coo.py::test_nan_reductions[1.0-False-1-nanmin]",
"sparse/tests/test_coo.py::test_all_nan_reduction_warning[None-nanmax]",
"sparse/tests/test_coo.py::test_all_nan_reduction_warning[None-nanmin]",
"sparse/tests/test_coo.py::test_all_nan_reduction_warning[0-nanmax]",
"sparse/tests/test_coo.py::test_all_nan_reduction_warning[0-nanmin]",
"sparse/tests/test_coo.py::test_all_nan_reduction_warning[1-nanmax]",
"sparse/tests/test_coo.py::test_all_nan_reduction_warning[1-nanmin]",
"sparse/tests/test_coo.py::test_transpose[None]",
"sparse/tests/test_coo.py::test_transpose[axis1]",
"sparse/tests/test_coo.py::test_transpose[axis2]",
"sparse/tests/test_coo.py::test_transpose[axis3]",
"sparse/tests/test_coo.py::test_transpose[axis4]",
"sparse/tests/test_coo.py::test_transpose[axis5]",
"sparse/tests/test_coo.py::test_transpose[axis6]",
"sparse/tests/test_coo.py::test_transpose_error[axis0]",
"sparse/tests/test_coo.py::test_transpose_error[axis1]",
"sparse/tests/test_coo.py::test_transpose_error[axis2]",
"sparse/tests/test_coo.py::test_transpose_error[axis3]",
"sparse/tests/test_coo.py::test_transpose_error[axis4]",
"sparse/tests/test_coo.py::test_transpose_error[axis5]",
"sparse/tests/test_coo.py::test_reshape[a0-b0]",
"sparse/tests/test_coo.py::test_reshape[a1-b1]",
"sparse/tests/test_coo.py::test_reshape[a2-b2]",
"sparse/tests/test_coo.py::test_reshape[a3-b3]",
"sparse/tests/test_coo.py::test_reshape[a4-b4]",
"sparse/tests/test_coo.py::test_reshape[a5-b5]",
"sparse/tests/test_coo.py::test_reshape[a6-b6]",
"sparse/tests/test_coo.py::test_reshape[a7-b7]",
"sparse/tests/test_coo.py::test_reshape[a8-b8]",
"sparse/tests/test_coo.py::test_reshape[a9-b9]",
"sparse/tests/test_coo.py::test_large_reshape",
"sparse/tests/test_coo.py::test_reshape_same",
"sparse/tests/test_coo.py::test_to_scipy_sparse",
"sparse/tests/test_coo.py::test_tensordot[a_shape0-b_shape0-axes0]",
"sparse/tests/test_coo.py::test_tensordot[a_shape1-b_shape1-axes1]",
"sparse/tests/test_coo.py::test_tensordot[a_shape2-b_shape2-axes2]",
"sparse/tests/test_coo.py::test_tensordot[a_shape3-b_shape3-axes3]",
"sparse/tests/test_coo.py::test_tensordot[a_shape4-b_shape4-axes4]",
"sparse/tests/test_coo.py::test_tensordot[a_shape5-b_shape5-axes5]",
"sparse/tests/test_coo.py::test_tensordot[a_shape6-b_shape6-axes6]",
"sparse/tests/test_coo.py::test_tensordot[a_shape7-b_shape7-axes7]",
"sparse/tests/test_coo.py::test_tensordot[a_shape8-b_shape8-axes8]",
"sparse/tests/test_coo.py::test_tensordot[a_shape9-b_shape9-0]",
"sparse/tests/test_coo.py::test_dot[a_shape0-b_shape0]",
"sparse/tests/test_coo.py::test_dot[a_shape1-b_shape1]",
"sparse/tests/test_coo.py::test_dot[a_shape2-b_shape2]",
"sparse/tests/test_coo.py::test_dot[a_shape3-b_shape3]",
"sparse/tests/test_coo.py::test_dot[a_shape4-b_shape4]",
"sparse/tests/test_coo.py::test_elemwise[expm1]",
"sparse/tests/test_coo.py::test_elemwise[log1p]",
"sparse/tests/test_coo.py::test_elemwise[sin]",
"sparse/tests/test_coo.py::test_elemwise[tan]",
"sparse/tests/test_coo.py::test_elemwise[sinh]",
"sparse/tests/test_coo.py::test_elemwise[tanh]",
"sparse/tests/test_coo.py::test_elemwise[floor]",
"sparse/tests/test_coo.py::test_elemwise[ceil]",
"sparse/tests/test_coo.py::test_elemwise[sqrt]",
"sparse/tests/test_coo.py::test_elemwise[conjugate0]",
"sparse/tests/test_coo.py::test_elemwise[round_]",
"sparse/tests/test_coo.py::test_elemwise[rint]",
"sparse/tests/test_coo.py::test_elemwise[<lambda>0]",
"sparse/tests/test_coo.py::test_elemwise[conjugate1]",
"sparse/tests/test_coo.py::test_elemwise[conjugate2]",
"sparse/tests/test_coo.py::test_elemwise[<lambda>1]",
"sparse/tests/test_coo.py::test_elemwise[abs]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape0-mul]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape0-add]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape0-sub]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape0-gt]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape0-lt]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape0-ne]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape1-mul]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape1-add]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape1-sub]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape1-gt]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape1-lt]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape1-ne]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape2-mul]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape2-add]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape2-sub]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape2-gt]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape2-lt]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape2-ne]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape3-mul]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape3-add]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape3-sub]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape3-gt]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape3-lt]",
"sparse/tests/test_coo.py::test_elemwise_binary[shape3-ne]",
"sparse/tests/test_coo.py::test_elemwise_trinary[shape0-<lambda>0]",
"sparse/tests/test_coo.py::test_elemwise_trinary[shape0-<lambda>1]",
"sparse/tests/test_coo.py::test_elemwise_trinary[shape0-<lambda>2]",
"sparse/tests/test_coo.py::test_elemwise_trinary[shape0-<lambda>3]",
"sparse/tests/test_coo.py::test_elemwise_trinary[shape1-<lambda>0]",
"sparse/tests/test_coo.py::test_elemwise_trinary[shape1-<lambda>1]",
"sparse/tests/test_coo.py::test_elemwise_trinary[shape1-<lambda>2]",
"sparse/tests/test_coo.py::test_elemwise_trinary[shape1-<lambda>3]",
"sparse/tests/test_coo.py::test_elemwise_trinary[shape2-<lambda>0]",
"sparse/tests/test_coo.py::test_elemwise_trinary[shape2-<lambda>1]",
"sparse/tests/test_coo.py::test_elemwise_trinary[shape2-<lambda>2]",
"sparse/tests/test_coo.py::test_elemwise_trinary[shape2-<lambda>3]",
"sparse/tests/test_coo.py::test_elemwise_trinary[shape3-<lambda>0]",
"sparse/tests/test_coo.py::test_elemwise_trinary[shape3-<lambda>1]",
"sparse/tests/test_coo.py::test_elemwise_trinary[shape3-<lambda>2]",
"sparse/tests/test_coo.py::test_elemwise_trinary[shape3-<lambda>3]",
"sparse/tests/test_coo.py::test_binary_broadcasting[shape10-shape20-add]",
"sparse/tests/test_coo.py::test_binary_broadcasting[shape10-shape20-mul]",
"sparse/tests/test_coo.py::test_binary_broadcasting[shape11-shape21-add]",
"sparse/tests/test_coo.py::test_binary_broadcasting[shape11-shape21-mul]",
"sparse/tests/test_coo.py::test_binary_broadcasting[shape12-shape22-add]",
"sparse/tests/test_coo.py::test_binary_broadcasting[shape12-shape22-mul]",
"sparse/tests/test_coo.py::test_binary_broadcasting[shape13-shape23-add]",
"sparse/tests/test_coo.py::test_binary_broadcasting[shape13-shape23-mul]",
"sparse/tests/test_coo.py::test_binary_broadcasting[shape14-shape24-add]",
"sparse/tests/test_coo.py::test_binary_broadcasting[shape14-shape24-mul]",
"sparse/tests/test_coo.py::test_binary_broadcasting[shape15-shape25-add]",
"sparse/tests/test_coo.py::test_binary_broadcasting[shape15-shape25-mul]",
"sparse/tests/test_coo.py::test_binary_broadcasting[shape16-shape26-add]",
"sparse/tests/test_coo.py::test_binary_broadcasting[shape16-shape26-mul]",
"sparse/tests/test_coo.py::test_binary_broadcasting[shape17-shape27-add]",
"sparse/tests/test_coo.py::test_binary_broadcasting[shape17-shape27-mul]",
"sparse/tests/test_coo.py::test_binary_broadcasting[shape18-shape28-add]",
"sparse/tests/test_coo.py::test_binary_broadcasting[shape18-shape28-mul]",
"sparse/tests/test_coo.py::test_broadcast_to[shape10-shape20]",
"sparse/tests/test_coo.py::test_broadcast_to[shape11-shape21]",
"sparse/tests/test_coo.py::test_broadcast_to[shape12-shape22]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes0]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes1]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes2]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes3]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes4]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes5]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes6]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes7]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes0]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes1]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes2]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes3]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes4]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes5]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes6]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes7]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes0]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes1]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes2]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes3]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes4]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes5]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes6]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes7]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes0]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes1]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes2]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes3]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes4]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes5]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes6]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes7]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes0]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes1]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes2]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes3]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes4]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes5]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes6]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes7]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes0]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes1]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes2]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes3]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes4]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes5]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes6]",
"sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes7]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-nan-shapes0-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-nan-shapes1-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-nan-shapes2-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-nan-shapes3-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-inf-shapes0-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-inf-shapes1-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-inf-shapes2-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-inf-shapes3-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25--inf-shapes0-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25--inf-shapes1-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25--inf-shapes2-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25--inf-shapes3-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-nan-shapes0-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-nan-shapes1-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-nan-shapes2-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-nan-shapes3-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-inf-shapes0-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-inf-shapes1-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-inf-shapes2-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-inf-shapes3-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5--inf-shapes0-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5--inf-shapes1-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5--inf-shapes2-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5--inf-shapes3-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-nan-shapes0-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-nan-shapes1-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-nan-shapes2-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-nan-shapes3-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-inf-shapes0-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-inf-shapes1-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-inf-shapes2-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-inf-shapes3-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75--inf-shapes0-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75--inf-shapes1-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75--inf-shapes2-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75--inf-shapes3-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-nan-shapes0-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-nan-shapes1-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-nan-shapes2-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-nan-shapes3-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-inf-shapes0-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-inf-shapes1-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-inf-shapes2-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-inf-shapes3-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0--inf-shapes0-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0--inf-shapes1-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0--inf-shapes2-<lambda>]",
"sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0--inf-shapes3-<lambda>]",
"sparse/tests/test_coo.py::test_sparse_broadcasting",
"sparse/tests/test_coo.py::test_dense_broadcasting",
"sparse/tests/test_coo.py::test_sparsearray_elemwise[coo]",
"sparse/tests/test_coo.py::test_sparsearray_elemwise[dok]",
"sparse/tests/test_coo.py::test_ndarray_densification_fails",
"sparse/tests/test_coo.py::test_elemwise_noargs",
"sparse/tests/test_coo.py::test_auto_densification_fails[pow]",
"sparse/tests/test_coo.py::test_auto_densification_fails[truediv]",
"sparse/tests/test_coo.py::test_auto_densification_fails[floordiv]",
"sparse/tests/test_coo.py::test_auto_densification_fails[ge]",
"sparse/tests/test_coo.py::test_auto_densification_fails[le]",
"sparse/tests/test_coo.py::test_auto_densification_fails[eq]",
"sparse/tests/test_coo.py::test_auto_densification_fails[mod]",
"sparse/tests/test_coo.py::test_elemwise_scalar[True-mul-5]",
"sparse/tests/test_coo.py::test_elemwise_scalar[True-add-0]",
"sparse/tests/test_coo.py::test_elemwise_scalar[True-sub-0]",
"sparse/tests/test_coo.py::test_elemwise_scalar[True-pow-5]",
"sparse/tests/test_coo.py::test_elemwise_scalar[True-truediv-3]",
"sparse/tests/test_coo.py::test_elemwise_scalar[True-floordiv-4]",
"sparse/tests/test_coo.py::test_elemwise_scalar[True-gt-5]",
"sparse/tests/test_coo.py::test_elemwise_scalar[True-lt--5]",
"sparse/tests/test_coo.py::test_elemwise_scalar[True-ne-0]",
"sparse/tests/test_coo.py::test_elemwise_scalar[True-ge-5]",
"sparse/tests/test_coo.py::test_elemwise_scalar[True-le--3]",
"sparse/tests/test_coo.py::test_elemwise_scalar[True-eq-1]",
"sparse/tests/test_coo.py::test_elemwise_scalar[True-mod-5]",
"sparse/tests/test_coo.py::test_elemwise_scalar[False-mul-5]",
"sparse/tests/test_coo.py::test_elemwise_scalar[False-add-0]",
"sparse/tests/test_coo.py::test_elemwise_scalar[False-sub-0]",
"sparse/tests/test_coo.py::test_elemwise_scalar[False-pow-5]",
"sparse/tests/test_coo.py::test_elemwise_scalar[False-truediv-3]",
"sparse/tests/test_coo.py::test_elemwise_scalar[False-floordiv-4]",
"sparse/tests/test_coo.py::test_elemwise_scalar[False-gt-5]",
"sparse/tests/test_coo.py::test_elemwise_scalar[False-lt--5]",
"sparse/tests/test_coo.py::test_elemwise_scalar[False-ne-0]",
"sparse/tests/test_coo.py::test_elemwise_scalar[False-ge-5]",
"sparse/tests/test_coo.py::test_elemwise_scalar[False-le--3]",
"sparse/tests/test_coo.py::test_elemwise_scalar[False-eq-1]",
"sparse/tests/test_coo.py::test_elemwise_scalar[False-mod-5]",
"sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-mul-5]",
"sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-add-0]",
"sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-sub-0]",
"sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-gt--5]",
"sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-lt-5]",
"sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-ne-0]",
"sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-ge--5]",
"sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-le-3]",
"sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-eq-1]",
"sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-mul-5]",
"sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-add-0]",
"sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-sub-0]",
"sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-gt--5]",
"sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-lt-5]",
"sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-ne-0]",
"sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-ge--5]",
"sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-le-3]",
"sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-eq-1]",
"sparse/tests/test_coo.py::test_scalar_densification_fails[add-5]",
"sparse/tests/test_coo.py::test_scalar_densification_fails[sub--5]",
"sparse/tests/test_coo.py::test_scalar_densification_fails[pow--3]",
"sparse/tests/test_coo.py::test_scalar_densification_fails[truediv-0]",
"sparse/tests/test_coo.py::test_scalar_densification_fails[floordiv-0]",
"sparse/tests/test_coo.py::test_scalar_densification_fails[gt--5]",
"sparse/tests/test_coo.py::test_scalar_densification_fails[lt-5]",
"sparse/tests/test_coo.py::test_scalar_densification_fails[ne-1]",
"sparse/tests/test_coo.py::test_scalar_densification_fails[ge--3]",
"sparse/tests/test_coo.py::test_scalar_densification_fails[le-3]",
"sparse/tests/test_coo.py::test_scalar_densification_fails[eq-0]",
"sparse/tests/test_coo.py::test_bitwise_binary[shape0-and_]",
"sparse/tests/test_coo.py::test_bitwise_binary[shape0-or_]",
"sparse/tests/test_coo.py::test_bitwise_binary[shape0-xor]",
"sparse/tests/test_coo.py::test_bitwise_binary[shape1-and_]",
"sparse/tests/test_coo.py::test_bitwise_binary[shape1-or_]",
"sparse/tests/test_coo.py::test_bitwise_binary[shape1-xor]",
"sparse/tests/test_coo.py::test_bitwise_binary[shape2-and_]",
"sparse/tests/test_coo.py::test_bitwise_binary[shape2-or_]",
"sparse/tests/test_coo.py::test_bitwise_binary[shape2-xor]",
"sparse/tests/test_coo.py::test_bitwise_binary[shape3-and_]",
"sparse/tests/test_coo.py::test_bitwise_binary[shape3-or_]",
"sparse/tests/test_coo.py::test_bitwise_binary[shape3-xor]",
"sparse/tests/test_coo.py::test_bitshift_binary[shape0-lshift]",
"sparse/tests/test_coo.py::test_bitshift_binary[shape0-rshift]",
"sparse/tests/test_coo.py::test_bitshift_binary[shape1-lshift]",
"sparse/tests/test_coo.py::test_bitshift_binary[shape1-rshift]",
"sparse/tests/test_coo.py::test_bitshift_binary[shape2-lshift]",
"sparse/tests/test_coo.py::test_bitshift_binary[shape2-rshift]",
"sparse/tests/test_coo.py::test_bitshift_binary[shape3-lshift]",
"sparse/tests/test_coo.py::test_bitshift_binary[shape3-rshift]",
"sparse/tests/test_coo.py::test_bitwise_scalar[shape0-and_]",
"sparse/tests/test_coo.py::test_bitwise_scalar[shape1-and_]",
"sparse/tests/test_coo.py::test_bitwise_scalar[shape2-and_]",
"sparse/tests/test_coo.py::test_bitwise_scalar[shape3-and_]",
"sparse/tests/test_coo.py::test_bitshift_scalar[shape0-lshift]",
"sparse/tests/test_coo.py::test_bitshift_scalar[shape0-rshift]",
"sparse/tests/test_coo.py::test_bitshift_scalar[shape1-lshift]",
"sparse/tests/test_coo.py::test_bitshift_scalar[shape1-rshift]",
"sparse/tests/test_coo.py::test_bitshift_scalar[shape2-lshift]",
"sparse/tests/test_coo.py::test_bitshift_scalar[shape2-rshift]",
"sparse/tests/test_coo.py::test_bitshift_scalar[shape3-lshift]",
"sparse/tests/test_coo.py::test_bitshift_scalar[shape3-rshift]",
"sparse/tests/test_coo.py::test_unary_bitwise_densification_fails[shape0-invert]",
"sparse/tests/test_coo.py::test_unary_bitwise_densification_fails[shape1-invert]",
"sparse/tests/test_coo.py::test_unary_bitwise_densification_fails[shape2-invert]",
"sparse/tests/test_coo.py::test_unary_bitwise_densification_fails[shape3-invert]",
"sparse/tests/test_coo.py::test_binary_bitwise_densification_fails[shape0-or_]",
"sparse/tests/test_coo.py::test_binary_bitwise_densification_fails[shape0-xor]",
"sparse/tests/test_coo.py::test_binary_bitwise_densification_fails[shape1-or_]",
"sparse/tests/test_coo.py::test_binary_bitwise_densification_fails[shape1-xor]",
"sparse/tests/test_coo.py::test_binary_bitwise_densification_fails[shape2-or_]",
"sparse/tests/test_coo.py::test_binary_bitwise_densification_fails[shape2-xor]",
"sparse/tests/test_coo.py::test_binary_bitwise_densification_fails[shape3-or_]",
"sparse/tests/test_coo.py::test_binary_bitwise_densification_fails[shape3-xor]",
"sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape0-lshift]",
"sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape0-rshift]",
"sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape1-lshift]",
"sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape1-rshift]",
"sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape2-lshift]",
"sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape2-rshift]",
"sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape3-lshift]",
"sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape3-rshift]",
"sparse/tests/test_coo.py::test_bitwise_binary_bool[shape0-and_]",
"sparse/tests/test_coo.py::test_bitwise_binary_bool[shape0-or_]",
"sparse/tests/test_coo.py::test_bitwise_binary_bool[shape0-xor]",
"sparse/tests/test_coo.py::test_bitwise_binary_bool[shape1-and_]",
"sparse/tests/test_coo.py::test_bitwise_binary_bool[shape1-or_]",
"sparse/tests/test_coo.py::test_bitwise_binary_bool[shape1-xor]",
"sparse/tests/test_coo.py::test_bitwise_binary_bool[shape2-and_]",
"sparse/tests/test_coo.py::test_bitwise_binary_bool[shape2-or_]",
"sparse/tests/test_coo.py::test_bitwise_binary_bool[shape2-xor]",
"sparse/tests/test_coo.py::test_bitwise_binary_bool[shape3-and_]",
"sparse/tests/test_coo.py::test_bitwise_binary_bool[shape3-or_]",
"sparse/tests/test_coo.py::test_bitwise_binary_bool[shape3-xor]",
"sparse/tests/test_coo.py::test_elemwise_binary_empty",
"sparse/tests/test_coo.py::test_gt",
"sparse/tests/test_coo.py::test_slicing[0]",
"sparse/tests/test_coo.py::test_slicing[1]",
"sparse/tests/test_coo.py::test_slicing[-1]",
"sparse/tests/test_coo.py::test_slicing[index3]",
"sparse/tests/test_coo.py::test_slicing[index4]",
"sparse/tests/test_coo.py::test_slicing[index5]",
"sparse/tests/test_coo.py::test_slicing[index6]",
"sparse/tests/test_coo.py::test_slicing[index7]",
"sparse/tests/test_coo.py::test_slicing[index8]",
"sparse/tests/test_coo.py::test_slicing[index9]",
"sparse/tests/test_coo.py::test_slicing[index10]",
"sparse/tests/test_coo.py::test_slicing[index11]",
"sparse/tests/test_coo.py::test_slicing[index12]",
"sparse/tests/test_coo.py::test_slicing[index13]",
"sparse/tests/test_coo.py::test_slicing[index14]",
"sparse/tests/test_coo.py::test_slicing[index15]",
"sparse/tests/test_coo.py::test_slicing[index16]",
"sparse/tests/test_coo.py::test_slicing[index17]",
"sparse/tests/test_coo.py::test_slicing[index18]",
"sparse/tests/test_coo.py::test_slicing[index19]",
"sparse/tests/test_coo.py::test_slicing[index20]",
"sparse/tests/test_coo.py::test_slicing[index21]",
"sparse/tests/test_coo.py::test_slicing[index22]",
"sparse/tests/test_coo.py::test_slicing[index23]",
"sparse/tests/test_coo.py::test_slicing[index24]",
"sparse/tests/test_coo.py::test_slicing[index25]",
"sparse/tests/test_coo.py::test_slicing[index26]",
"sparse/tests/test_coo.py::test_slicing[index27]",
"sparse/tests/test_coo.py::test_slicing[index28]",
"sparse/tests/test_coo.py::test_slicing[index29]",
"sparse/tests/test_coo.py::test_slicing[index30]",
"sparse/tests/test_coo.py::test_slicing[index31]",
"sparse/tests/test_coo.py::test_slicing[index32]",
"sparse/tests/test_coo.py::test_slicing[index33]",
"sparse/tests/test_coo.py::test_slicing[index34]",
"sparse/tests/test_coo.py::test_slicing[index35]",
"sparse/tests/test_coo.py::test_slicing[index36]",
"sparse/tests/test_coo.py::test_slicing[index37]",
"sparse/tests/test_coo.py::test_slicing[index38]",
"sparse/tests/test_coo.py::test_slicing[index39]",
"sparse/tests/test_coo.py::test_slicing[index40]",
"sparse/tests/test_coo.py::test_slicing[index41]",
"sparse/tests/test_coo.py::test_slicing[index44]",
"sparse/tests/test_coo.py::test_slicing[index45]",
"sparse/tests/test_coo.py::test_custom_dtype_slicing",
"sparse/tests/test_coo.py::test_slicing_errors[index0]",
"sparse/tests/test_coo.py::test_slicing_errors[index1]",
"sparse/tests/test_coo.py::test_slicing_errors[index2]",
"sparse/tests/test_coo.py::test_slicing_errors[5]",
"sparse/tests/test_coo.py::test_slicing_errors[-5]",
"sparse/tests/test_coo.py::test_slicing_errors[foo]",
"sparse/tests/test_coo.py::test_slicing_errors[index6]",
"sparse/tests/test_coo.py::test_slicing_errors[0.5]",
"sparse/tests/test_coo.py::test_slicing_errors[index8]",
"sparse/tests/test_coo.py::test_slicing_errors[index9]",
"sparse/tests/test_coo.py::test_canonical",
"sparse/tests/test_coo.py::test_concatenate",
"sparse/tests/test_coo.py::test_concatenate_mixed[stack-0]",
"sparse/tests/test_coo.py::test_concatenate_mixed[stack-1]",
"sparse/tests/test_coo.py::test_concatenate_mixed[concatenate-0]",
"sparse/tests/test_coo.py::test_concatenate_mixed[concatenate-1]",
"sparse/tests/test_coo.py::test_stack[0-shape0]",
"sparse/tests/test_coo.py::test_stack[0-shape1]",
"sparse/tests/test_coo.py::test_stack[0-shape2]",
"sparse/tests/test_coo.py::test_stack[1-shape0]",
"sparse/tests/test_coo.py::test_stack[1-shape1]",
"sparse/tests/test_coo.py::test_stack[1-shape2]",
"sparse/tests/test_coo.py::test_stack[-1-shape0]",
"sparse/tests/test_coo.py::test_stack[-1-shape1]",
"sparse/tests/test_coo.py::test_stack[-1-shape2]",
"sparse/tests/test_coo.py::test_large_concat_stack",
"sparse/tests/test_coo.py::test_coord_dtype",
"sparse/tests/test_coo.py::test_addition",
"sparse/tests/test_coo.py::test_addition_not_ok_when_large_and_sparse",
"sparse/tests/test_coo.py::test_scalar_multiplication[2]",
"sparse/tests/test_coo.py::test_scalar_multiplication[2.5]",
"sparse/tests/test_coo.py::test_scalar_multiplication[scalar2]",
"sparse/tests/test_coo.py::test_scalar_multiplication[scalar3]",
"sparse/tests/test_coo.py::test_scalar_exponentiation",
"sparse/tests/test_coo.py::test_create_with_lists_of_tuples",
"sparse/tests/test_coo.py::test_sizeof",
"sparse/tests/test_coo.py::test_scipy_sparse_interface",
"sparse/tests/test_coo.py::test_scipy_sparse_interaction[coo]",
"sparse/tests/test_coo.py::test_scipy_sparse_interaction[csr]",
"sparse/tests/test_coo.py::test_scipy_sparse_interaction[dok]",
"sparse/tests/test_coo.py::test_scipy_sparse_interaction[csc]",
"sparse/tests/test_coo.py::test_op_scipy_sparse[mul]",
"sparse/tests/test_coo.py::test_op_scipy_sparse[add]",
"sparse/tests/test_coo.py::test_op_scipy_sparse[sub]",
"sparse/tests/test_coo.py::test_op_scipy_sparse[gt]",
"sparse/tests/test_coo.py::test_op_scipy_sparse[lt]",
"sparse/tests/test_coo.py::test_op_scipy_sparse[ne]",
"sparse/tests/test_coo.py::test_op_scipy_sparse_left[add]",
"sparse/tests/test_coo.py::test_op_scipy_sparse_left[sub]",
"sparse/tests/test_coo.py::test_cache_csr",
"sparse/tests/test_coo.py::test_empty_shape",
"sparse/tests/test_coo.py::test_single_dimension",
"sparse/tests/test_coo.py::test_raise_dense",
"sparse/tests/test_coo.py::test_large_sum",
"sparse/tests/test_coo.py::test_add_many_sparse_arrays",
"sparse/tests/test_coo.py::test_caching",
"sparse/tests/test_coo.py::test_scalar_slicing",
"sparse/tests/test_coo.py::test_triul[shape0-0]",
"sparse/tests/test_coo.py::test_triul[shape1-1]",
"sparse/tests/test_coo.py::test_triul[shape2--1]",
"sparse/tests/test_coo.py::test_triul[shape3--2]",
"sparse/tests/test_coo.py::test_triul[shape4-1000]",
"sparse/tests/test_coo.py::test_empty_reduction",
"sparse/tests/test_coo.py::test_random_shape[0.1-shape0]",
"sparse/tests/test_coo.py::test_random_shape[0.1-shape1]",
"sparse/tests/test_coo.py::test_random_shape[0.1-shape2]",
"sparse/tests/test_coo.py::test_random_shape[0.3-shape0]",
"sparse/tests/test_coo.py::test_random_shape[0.3-shape1]",
"sparse/tests/test_coo.py::test_random_shape[0.3-shape2]",
"sparse/tests/test_coo.py::test_random_shape[0.5-shape0]",
"sparse/tests/test_coo.py::test_random_shape[0.5-shape1]",
"sparse/tests/test_coo.py::test_random_shape[0.5-shape2]",
"sparse/tests/test_coo.py::test_random_shape[0.7-shape0]",
"sparse/tests/test_coo.py::test_random_shape[0.7-shape1]",
"sparse/tests/test_coo.py::test_random_shape[0.7-shape2]",
"sparse/tests/test_coo.py::test_two_random_unequal",
"sparse/tests/test_coo.py::test_two_random_same_seed",
"sparse/tests/test_coo.py::test_random_sorted",
"sparse/tests/test_coo.py::test_random_rvs[0.0-shape0-None-float64]",
"sparse/tests/test_coo.py::test_random_rvs[0.0-shape0-rvs-int]",
"sparse/tests/test_coo.py::test_random_rvs[0.0-shape0-<lambda>-bool]",
"sparse/tests/test_coo.py::test_random_rvs[0.0-shape1-None-float64]",
"sparse/tests/test_coo.py::test_random_rvs[0.0-shape1-rvs-int]",
"sparse/tests/test_coo.py::test_random_rvs[0.0-shape1-<lambda>-bool]",
"sparse/tests/test_coo.py::test_random_rvs[0.01-shape0-None-float64]",
"sparse/tests/test_coo.py::test_random_rvs[0.01-shape0-rvs-int]",
"sparse/tests/test_coo.py::test_random_rvs[0.01-shape0-<lambda>-bool]",
"sparse/tests/test_coo.py::test_random_rvs[0.01-shape1-None-float64]",
"sparse/tests/test_coo.py::test_random_rvs[0.01-shape1-rvs-int]",
"sparse/tests/test_coo.py::test_random_rvs[0.01-shape1-<lambda>-bool]",
"sparse/tests/test_coo.py::test_random_rvs[0.1-shape0-None-float64]",
"sparse/tests/test_coo.py::test_random_rvs[0.1-shape0-rvs-int]",
"sparse/tests/test_coo.py::test_random_rvs[0.1-shape0-<lambda>-bool]",
"sparse/tests/test_coo.py::test_random_rvs[0.1-shape1-None-float64]",
"sparse/tests/test_coo.py::test_random_rvs[0.1-shape1-rvs-int]",
"sparse/tests/test_coo.py::test_random_rvs[0.1-shape1-<lambda>-bool]",
"sparse/tests/test_coo.py::test_random_rvs[0.2-shape0-None-float64]",
"sparse/tests/test_coo.py::test_random_rvs[0.2-shape0-rvs-int]",
"sparse/tests/test_coo.py::test_random_rvs[0.2-shape0-<lambda>-bool]",
"sparse/tests/test_coo.py::test_random_rvs[0.2-shape1-None-float64]",
"sparse/tests/test_coo.py::test_random_rvs[0.2-shape1-rvs-int]",
"sparse/tests/test_coo.py::test_random_rvs[0.2-shape1-<lambda>-bool]",
"sparse/tests/test_coo.py::test_scalar_shape_construction",
"sparse/tests/test_coo.py::test_len",
"sparse/tests/test_coo.py::test_density",
"sparse/tests/test_coo.py::test_size",
"sparse/tests/test_coo.py::test_np_array",
"sparse/tests/test_coo.py::test_three_arg_where[shapes0]",
"sparse/tests/test_coo.py::test_three_arg_where[shapes1]",
"sparse/tests/test_coo.py::test_three_arg_where[shapes2]",
"sparse/tests/test_coo.py::test_three_arg_where[shapes3]",
"sparse/tests/test_coo.py::test_three_arg_where[shapes4]",
"sparse/tests/test_coo.py::test_three_arg_where[shapes5]",
"sparse/tests/test_coo.py::test_three_arg_where[shapes6]",
"sparse/tests/test_coo.py::test_three_arg_where[shapes7]",
"sparse/tests/test_coo.py::test_one_arg_where",
"sparse/tests/test_coo.py::test_one_arg_where_dense",
"sparse/tests/test_coo.py::test_two_arg_where",
"sparse/tests/test_dok.py::test_random_shape_nnz[0.1-shape0]",
"sparse/tests/test_dok.py::test_random_shape_nnz[0.1-shape1]",
"sparse/tests/test_dok.py::test_random_shape_nnz[0.1-shape2]",
"sparse/tests/test_dok.py::test_random_shape_nnz[0.3-shape0]",
"sparse/tests/test_dok.py::test_random_shape_nnz[0.3-shape1]",
"sparse/tests/test_dok.py::test_random_shape_nnz[0.3-shape2]",
"sparse/tests/test_dok.py::test_random_shape_nnz[0.5-shape0]",
"sparse/tests/test_dok.py::test_random_shape_nnz[0.5-shape1]",
"sparse/tests/test_dok.py::test_random_shape_nnz[0.5-shape2]",
"sparse/tests/test_dok.py::test_random_shape_nnz[0.7-shape0]",
"sparse/tests/test_dok.py::test_random_shape_nnz[0.7-shape1]",
"sparse/tests/test_dok.py::test_random_shape_nnz[0.7-shape2]",
"sparse/tests/test_dok.py::test_convert_to_coo",
"sparse/tests/test_dok.py::test_convert_from_coo",
"sparse/tests/test_dok.py::test_convert_from_numpy",
"sparse/tests/test_dok.py::test_convert_to_numpy",
"sparse/tests/test_dok.py::test_construct[2-data0]",
"sparse/tests/test_dok.py::test_construct[shape1-data1]",
"sparse/tests/test_dok.py::test_construct[shape2-data2]",
"sparse/tests/test_dok.py::test_getitem[0.1-shape0]",
"sparse/tests/test_dok.py::test_getitem[0.1-shape1]",
"sparse/tests/test_dok.py::test_getitem[0.1-shape2]",
"sparse/tests/test_dok.py::test_getitem[0.3-shape0]",
"sparse/tests/test_dok.py::test_getitem[0.3-shape1]",
"sparse/tests/test_dok.py::test_getitem[0.3-shape2]",
"sparse/tests/test_dok.py::test_getitem[0.5-shape0]",
"sparse/tests/test_dok.py::test_getitem[0.5-shape1]",
"sparse/tests/test_dok.py::test_getitem[0.5-shape2]",
"sparse/tests/test_dok.py::test_getitem[0.7-shape0]",
"sparse/tests/test_dok.py::test_getitem[0.7-shape1]",
"sparse/tests/test_dok.py::test_getitem[0.7-shape2]",
"sparse/tests/test_dok.py::test_setitem[shape2-index2-value2]",
"sparse/tests/test_dok.py::test_setitem[shape6-index6-value6]",
"sparse/tests/test_dok.py::test_setitem[shape7-index7-value7]",
"sparse/tests/test_dok.py::test_setitem[shape8-index8-value8]",
"sparse/tests/test_dok.py::test_setitem[shape10-index10-value10]",
"sparse/tests/test_dok.py::test_setitem[shape12-index12-value12]",
"sparse/tests/test_dok.py::test_default_dtype",
"sparse/tests/test_dok.py::test_int_dtype",
"sparse/tests/test_dok.py::test_float_dtype",
"sparse/tests/test_dok.py::test_set_zero"
]
| []
| BSD 3-Clause "New" or "Revised" License | 2,314 | [
"benchmarks/benchmark_coo.py",
"sparse/slicing.py",
"sparse/coo/indexing.py",
".gitignore",
"sparse/coo/core.py"
]
| [
"benchmarks/benchmark_coo.py",
"sparse/slicing.py",
"sparse/coo/indexing.py",
".gitignore",
"sparse/coo/core.py"
]
|
conan-io__conan-2646 | 01b3d70da53db12cab2ca5d2c68464bf27652745 | 2018-03-20 18:26:01 | 0f8b143c43d0354c6a75da94a1374d5ce39b7f96 | memsharded: Good news are that no package in conan-center uses this setting. | diff --git a/.ci/jenkins/runner.py b/.ci/jenkins/runner.py
index cf747a165..513de3284 100644
--- a/.ci/jenkins/runner.py
+++ b/.ci/jenkins/runner.py
@@ -65,6 +65,7 @@ def run_tests(module_path, pyver, source_folder, tmp_folder,
env = get_environ(tmp_folder)
env["PYTHONPATH"] = source_folder
+ env["CONAN_RECIPE_LINTER"] = "False"
env["CONAN_LOGGING_LEVEL"] = "50" if platform.system() == "Darwin" else "50"
env["CHANGE_AUTHOR_DISPLAY_NAME"] = ""
with chdir(source_folder):
diff --git a/conans/client/build/compiler_flags.py b/conans/client/build/compiler_flags.py
index 6763defaa..8a483fb63 100644
--- a/conans/client/build/compiler_flags.py
+++ b/conans/client/build/compiler_flags.py
@@ -64,9 +64,9 @@ def libcxx_flag(compiler, libcxx):
return '-stdlib=libc++'
elif str(compiler) == 'sun-cc':
return ({"libCstd": "-library=Cstd",
- "libstdcxx": "-library=stdcxx4",
- "libstlport": "-library=stlport4",
- "libstdc++": "-library=stdcpp"}.get(libcxx, ""))
+ "libstdcxx": "-library=stdcxx4",
+ "libstlport": "-library=stlport4",
+ "libstdc++": "-library=stdcpp"}.get(libcxx, ""))
return ""
diff --git a/conans/client/build/cppstd_flags.py b/conans/client/build/cppstd_flags.py
index 407cca443..e5ca936c3 100644
--- a/conans/client/build/cppstd_flags.py
+++ b/conans/client/build/cppstd_flags.py
@@ -24,16 +24,18 @@ def cppstd_flag(compiler, compiler_version, cppstd):
def cppstd_default(compiler, compiler_version):
-
default = {"gcc": _gcc_cppstd_default(compiler_version),
- "clang": "gnu++98",
- "apple-clang": "gnu++98",
+ "clang": _clang_cppstd_default(compiler_version),
+ "apple-clang": "gnu98",
"Visual Studio": _visual_cppstd_default(compiler_version)}.get(str(compiler), None)
return default
-def _gcc_cppstd_default(compiler_version):
+def _clang_cppstd_default(compiler_version):
+ return "gnu98" if Version(compiler_version) < "6.0" else "gnu14"
+
+def _gcc_cppstd_default(compiler_version):
return "gnu98" if Version(compiler_version) < "6.1" else "gnu14"
@@ -113,12 +115,17 @@ def _cppstd_clang(clang_version, cppstd):
if Version(clang_version) >= "3.5":
v14 = "c++14"
vgnu14 = "gnu++14"
- v17 = "c++1z"
- vgnu17 = "gnu++1z"
elif Version(clang_version) >= "3.4":
v14 = "c++1y"
vgnu14 = "gnu++1y"
+ if Version(clang_version) >= "5":
+ v17 = "c++17"
+ vgnu17 = "gnu++17"
+ elif Version(clang_version) >= "3.5":
+ v17 = "c++1z"
+ vgnu17 = "gnu++1z"
+
flag = {"98": v98, "gnu98": vgnu98,
"11": v11, "gnu11": vgnu11,
"14": v14, "gnu14": vgnu14,
diff --git a/conans/client/cmd/export_linter.py b/conans/client/cmd/export_linter.py
index 2205b56e2..20f63976e 100644
--- a/conans/client/cmd/export_linter.py
+++ b/conans/client/cmd/export_linter.py
@@ -1,14 +1,12 @@
import os
import json
import sys
-import six
-from six import StringIO
-
-from pylint.reporters.json import JSONReporter
-from pylint.lint import Run
+import platform
from conans.client.output import Color
from conans.errors import ConanException
+from subprocess import PIPE, Popen
+from conans import __path__ as root_path
def conan_linter(conanfile_path, out):
@@ -18,76 +16,48 @@ def conan_linter(conanfile_path, out):
apply_lint = os.environ.get("CONAN_RECIPE_LINTER", True)
if not apply_lint or apply_lint == "False":
return
+
+ dir_path = os.path.dirname(root_path[0])
+ dirname = os.path.dirname(conanfile_path)
+ hook = '--init-hook="import sys;sys.path.extend([\'%s\', \'%s\'])"' % (dirname, dir_path)
+
try:
- dirname = os.path.dirname(conanfile_path)
- sys.path.append(dirname)
- py3_msgs = _lint_py3(conanfile_path)
+ py3_msgs = None
+ msgs, py3_msgs = _normal_linter(conanfile_path, hook)
+ except Exception as e:
+ out.warn("Failed pylint: %s" % e)
+ else:
if py3_msgs:
out.writeln("Python 3 incompatibilities\n ERROR: %s"
% "\n ERROR: ".join(py3_msgs),
front=Color.BRIGHT_MAGENTA)
- msgs = _normal_linter(conanfile_path)
if msgs:
out.writeln("Linter warnings\n WARN: %s" % "\n WARN: ".join(msgs),
front=Color.MAGENTA)
pylint_werr = os.environ.get("CONAN_PYLINT_WERR", None)
if pylint_werr and (py3_msgs or msgs):
raise ConanException("Package recipe has linter errors. Please fix them.")
- finally:
- sys.path.pop()
-
-
-class _WritableObject(object):
- def __init__(self):
- self.content = []
-
- def write(self, st):
- self.content.append(st)
def _runner(args):
- try:
- output = _WritableObject()
- stdout_ = sys.stderr
- stream = StringIO()
- sys.stderr = stream
- Run(args, reporter=JSONReporter(output), exit=False)
- finally:
- sys.stderr = stdout_
- try:
- output = "".join(output.content)
- return json.loads(output)
- except ValueError:
- return []
+ command = ["pylint", "--output-format=json"] + args
+ command = " ".join(command)
+ shell = True if platform.system() != "Windows" else False
+ proc = Popen(command, shell=shell, bufsize=10, stdout=PIPE, stderr=PIPE)
+ stdout, _ = proc.communicate()
+ return json.loads(stdout) if stdout else {}
-def _lint_py3(conanfile_path):
- if six.PY3:
- return
-
- args = ['--py3k', "--reports=no", "--disable=no-absolute-import", "--persistent=no",
- conanfile_path]
-
- output_json = _runner(args)
-
- result = []
- for msg in output_json:
- if msg.get("type") in ("warning", "error"):
- result.append("Py3 incompatibility. Line %s: %s"
- % (msg.get("line"), msg.get("message")))
- return result
-
-
-def _normal_linter(conanfile_path):
- args = ["--reports=no", "--disable=no-absolute-import", "--persistent=no", conanfile_path]
+def _normal_linter(conanfile_path, hook):
+ args = ['--py3k', "--enable=all", "--reports=no", "--disable=no-absolute-import", "--persistent=no",
+ hook, '"%s"' % conanfile_path]
pylintrc = os.environ.get("CONAN_PYLINTRC", None)
if pylintrc:
if not os.path.exists(pylintrc):
raise ConanException("File %s defined by PYLINTRC doesn't exist" % pylintrc)
- args.append('--rcfile=%s' % pylintrc)
+ args.append('--rcfile="%s"' % pylintrc)
output_json = _runner(args)
-
dynamic_fields = ("source_folder", "build_folder", "package_folder", "info_build",
"build_requires", "info")
@@ -107,9 +77,14 @@ def _normal_linter(conanfile_path):
return True
result = []
+ py3msgs = []
for msg in output_json:
if msg.get("type") in ("warning", "error"):
- if _accept_message(msg):
+ message_id = msg.get("symbol")
+ if message_id in ("print-statement", "dict-iter-method"):
+ py3msgs.append("Py3 incompatibility. Line %s: %s"
+ % (msg.get("line"), msg.get("message")))
+ elif _accept_message(msg):
result.append("Linter. Line %s: %s" % (msg.get("line"), msg.get("message")))
- return result
+ return result, py3msgs
diff --git a/conans/client/command.py b/conans/client/command.py
index b27c913c5..df9034e9a 100644
--- a/conans/client/command.py
+++ b/conans/client/command.py
@@ -35,10 +35,13 @@ class Extender(argparse.Action):
# share this destination.
parser.set_defaults(**{self.dest: None})
- try:
- dest.extend(values)
- except ValueError:
+ if isinstance(values, str):
dest.append(values)
+ elif values:
+ try:
+ dest.extend(values)
+ except ValueError:
+ dest.append(values)
class OnceArgument(argparse.Action):
@@ -1235,7 +1238,7 @@ def _add_common_install_arguments(parser, build_help):
'-e CXX=/usr/bin/clang++',
nargs=1, action=Extender)
if build_help:
- parser.add_argument("-b", "--build", action=Extender, nargs="*", help=build_help)
+ parser.add_argument("-b", "--build", action=Extender, nargs="?", help=build_help)
_help_build_policies = '''Optional, use it to choose if you want to build from sources:
diff --git a/conans/client/manager.py b/conans/client/manager.py
index 6fb69bba0..d5415ace3 100644
--- a/conans/client/manager.py
+++ b/conans/client/manager.py
@@ -538,7 +538,9 @@ class ConanManager(object):
if not remote:
remote = remote_proxy.registry.default_remote.name
name, password = self._user_io.request_login(remote_name=remote, username=name)
- return remote_proxy.authenticate(name, password)
+
+ all_remotes = True if remote is None else False
+ return remote_proxy.authenticate(name, password, all_remotes=all_remotes)
def get_path(self, reference, package_id=None, path=None, remote=None):
remote_proxy = ConanProxy(self._client_cache, self._user_io, self._remote_manager, remote)
diff --git a/conans/client/proxy.py b/conans/client/proxy.py
index 19f114f3e..bdfd72041 100644
--- a/conans/client/proxy.py
+++ b/conans/client/proxy.py
@@ -395,13 +395,22 @@ class ConanProxy(object):
output.warn('Binary for %s not in remote: %s' % (package_id, str(e)))
return False
- def authenticate(self, name, password):
- if not name: # List all users, from all remotes
- remotes = self._registry.remotes
- if not remotes:
- self._out.error("No remotes defined")
- for remote in remotes:
- self._remote_manager.authenticate(remote, None, None)
- return
- remote, _ = self._get_remote()
- return self._remote_manager.authenticate(remote, name, password)
+ def authenticate(self, name, password, all_remotes=False):
+ """
+ Manage user auth against remote.
+ Also displays a list of authenticated users against remote(s) if user name is evaluated to False.
+
+ :param name: user name string
+ :param password: password string
+ :param all_remotes: True/False to use all available remotes to display a list of authenticated users if
+ user name is evaluated to False.
+ """
+ current_remote, _ = self._get_remote()
+
+ if name:
+ return self._remote_manager.authenticate(current_remote, name, password)
+
+ # List all users from required remotes
+ remotes = self._registry.remotes if all_remotes else [current_remote]
+ for remote in remotes:
+ self._remote_manager.authenticate(remote, None, None)
diff --git a/conans/client/tools/oss.py b/conans/client/tools/oss.py
index cd7aa79b3..d0713c0e5 100644
--- a/conans/client/tools/oss.py
+++ b/conans/client/tools/oss.py
@@ -123,7 +123,7 @@ class OSInfo(object):
def with_zypper(self):
return self.is_linux and self.linux_distro in \
("opensuse", "sles")
-
+
@staticmethod
def get_win_os_version():
"""
diff --git a/pyinstaller.py b/pyinstaller.py
index 485d832a4..368963699 100644
--- a/pyinstaller.py
+++ b/pyinstaller.py
@@ -87,7 +87,7 @@ def pyinstall(source_folder):
conan_path = os.path.join(source_folder, 'conans', 'conan.py')
conan_server_path = os.path.join(source_folder, 'conans', 'conan_server.py')
conan_build_info_path = os.path.join(source_folder, "conans/build_info/command.py")
- hidden = "--hidden-import=glob --hidden-import=pylint.reporters.text"
+ hidden = "--hidden-import=glob"
if platform.system() != "Windows":
hidden += " --hidden-import=setuptools.msvc"
win_ver = ""
| Clang 6.0 implications in cppstd flags
We have added to the `settings.yml` clang 6.0, but we have to review the `cppstd` flags and `package_id` to adjust it correctly and knowing the default one of the compiler to keep the compatibility of packages that do not specify the `cppstd` setting.
| conan-io/conan | diff --git a/conans/test/build_helpers/cpp_std_flags_test.py b/conans/test/build_helpers/cpp_std_flags_test.py
index a8448b6c2..b110bb3f4 100644
--- a/conans/test/build_helpers/cpp_std_flags_test.py
+++ b/conans/test/build_helpers/cpp_std_flags_test.py
@@ -78,26 +78,26 @@ class CompilerFlagsTest(unittest.TestCase):
self.assertEquals(cppstd_flag("clang", "5", "11"), '-std=c++11')
self.assertEquals(cppstd_flag("clang", "5", "14"), '-std=c++14')
self.assertEquals(cppstd_flag("clang", "5", "gnu14"), '-std=gnu++14')
- self.assertEquals(cppstd_flag("clang", "5", "17"), '-std=c++1z')
+ self.assertEquals(cppstd_flag("clang", "5", "17"), '-std=c++17')
self.assertEquals(cppstd_flag("clang", "5.1", "11"), '-std=c++11')
self.assertEquals(cppstd_flag("clang", "5.1", "14"), '-std=c++14')
- self.assertEquals(cppstd_flag("clang", "5.1", "17"), '-std=c++1z')
+ self.assertEquals(cppstd_flag("clang", "5.1", "17"), '-std=c++17')
self.assertEquals(cppstd_flag("clang", "7", "11"), '-std=c++11')
self.assertEquals(cppstd_flag("clang", "7", "14"), '-std=c++14')
- self.assertEquals(cppstd_flag("clang", "7", "17"), '-std=c++1z')
+ self.assertEquals(cppstd_flag("clang", "7", "17"), '-std=c++17')
def test_clang_cppstd_defaults(self):
- self.assertEquals(cppstd_default("clang", "2"), "gnu++98")
- self.assertEquals(cppstd_default("clang", "2.1"), "gnu++98")
- self.assertEquals(cppstd_default("clang", "3.0"), "gnu++98")
- self.assertEquals(cppstd_default("clang", "3.1"), "gnu++98")
- self.assertEquals(cppstd_default("clang", "3.4"), "gnu++98")
- self.assertEquals(cppstd_default("clang", "3.5"), "gnu++98")
- self.assertEquals(cppstd_default("clang", "5"), "gnu++98")
- self.assertEquals(cppstd_default("clang", "5.1"), "gnu++98")
- self.assertEquals(cppstd_default("clang", "7"), "gnu++98")
+ self.assertEquals(cppstd_default("clang", "2"), "gnu98")
+ self.assertEquals(cppstd_default("clang", "2.1"), "gnu98")
+ self.assertEquals(cppstd_default("clang", "3.0"), "gnu98")
+ self.assertEquals(cppstd_default("clang", "3.1"), "gnu98")
+ self.assertEquals(cppstd_default("clang", "3.4"), "gnu98")
+ self.assertEquals(cppstd_default("clang", "3.5"), "gnu98")
+ self.assertEquals(cppstd_default("clang", "5"), "gnu98")
+ self.assertEquals(cppstd_default("clang", "5.1"), "gnu98")
+ self.assertEquals(cppstd_default("clang", "7"), "gnu14")
def test_apple_clang_cppstd_flags(self):
self.assertEquals(cppstd_flag("apple-clang", "3.9", "98"), None)
@@ -137,14 +137,14 @@ class CompilerFlagsTest(unittest.TestCase):
self.assertEquals(cppstd_flag("apple-clang", "9", "17"), "-std=c++1z")
def test_apple_clang_cppstd_defaults(self):
- self.assertEquals(cppstd_default("apple-clang", "2"), "gnu++98")
- self.assertEquals(cppstd_default("apple-clang", "3"), "gnu++98")
- self.assertEquals(cppstd_default("apple-clang", "4"), "gnu++98")
- self.assertEquals(cppstd_default("apple-clang", "5"), "gnu++98")
- self.assertEquals(cppstd_default("apple-clang", "6"), "gnu++98")
- self.assertEquals(cppstd_default("apple-clang", "7"), "gnu++98")
- self.assertEquals(cppstd_default("apple-clang", "8"), "gnu++98")
- self.assertEquals(cppstd_default("apple-clang", "9"), "gnu++98")
+ self.assertEquals(cppstd_default("apple-clang", "2"), "gnu98")
+ self.assertEquals(cppstd_default("apple-clang", "3"), "gnu98")
+ self.assertEquals(cppstd_default("apple-clang", "4"), "gnu98")
+ self.assertEquals(cppstd_default("apple-clang", "5"), "gnu98")
+ self.assertEquals(cppstd_default("apple-clang", "6"), "gnu98")
+ self.assertEquals(cppstd_default("apple-clang", "7"), "gnu98")
+ self.assertEquals(cppstd_default("apple-clang", "8"), "gnu98")
+ self.assertEquals(cppstd_default("apple-clang", "9"), "gnu98")
def test_visual_cppstd_flags(self):
self.assertEquals(cppstd_flag("Visual Studio", "12", "11"), None)
diff --git a/conans/test/command/export_linter_test.py b/conans/test/command/export_linter_test.py
index 3fd7b0da6..54b320739 100644
--- a/conans/test/command/export_linter_test.py
+++ b/conans/test/command/export_linter_test.py
@@ -3,6 +3,7 @@ from conans.paths import CONANFILE
from conans.test.utils.tools import TestClient
import six
import os
+from conans import tools
conanfile = """
@@ -19,6 +20,14 @@ class TestConan(ConanFile):
class ExportLinterTest(unittest.TestCase):
+ def setUp(self):
+ self.old_env = dict(os.environ)
+ os.environ["CONAN_RECIPE_LINTER"] = "True"
+
+ def tearDown(self):
+ os.environ.clear()
+ os.environ.update(self.old_env)
+
def test_basic(self):
client = TestClient()
client.save({CONANFILE: conanfile})
@@ -36,10 +45,10 @@ class ExportLinterTest(unittest.TestCase):
def test_disable_linter(self):
client = TestClient()
client.save({CONANFILE: conanfile})
- client.run("config set general.recipe_linter=False")
- client.run("export . lasote/stable")
- self.assertNotIn("ERROR: Py3 incompatibility", client.user_io.out)
- self.assertNotIn("WARN: Linter", client.user_io.out)
+ with tools.environment_append({"CONAN_RECIPE_LINTER": "False"}):
+ client.run("export . lasote/stable")
+ self.assertNotIn("ERROR: Py3 incompatibility", client.user_io.out)
+ self.assertNotIn("WARN: Linter", client.user_io.out)
def test_custom_rc_linter(self):
client = TestClient()
@@ -127,7 +136,6 @@ class BaseConan(ConanFile):
client.user_io.out)
def export_deploy_test(self):
-
conanfile = """
from conans import ConanFile
class BaseConan(ConanFile):
diff --git a/conans/test/command/install_test.py b/conans/test/command/install_test.py
index 9cc8437e9..543241c62 100644
--- a/conans/test/command/install_test.py
+++ b/conans/test/command/install_test.py
@@ -453,3 +453,42 @@ class Pkg(ConanFile):
self.assertTrue(error)
self.assertIn("ERROR: Unable to find 'Hello/0.1@lasote/stable' in remotes",
client.out)
+
+ def install_argument_order_test(self):
+ # https://github.com/conan-io/conan/issues/2520
+
+ conanfile_boost = """from conans import ConanFile
+class BoostConan(ConanFile):
+ name = "boost"
+ version = "0.1"
+ options = {"shared": [True, False]}
+ default_options = "shared=True"
+"""
+ conanfile = """from conans import ConanFile
+class TestConan(ConanFile):
+ name = "Hello"
+ version = "0.1"
+ requires = "boost/0.1@conan/stable"
+"""
+ client = TestClient()
+ client.save({"conanfile.py": conanfile,
+ "conanfile_boost.py": conanfile_boost})
+ client.run("create conanfile_boost.py conan/stable")
+ client.run("install . -o boost:shared=True --build=missing")
+ output_0 = "%s" % client.out
+ client.run("install . -o boost:shared=True --build missing")
+ output_1 = "%s" % client.out
+ client.run("install -o boost:shared=True . --build missing")
+ output_2 = "%s" % client.out
+ client.run("install -o boost:shared=True --build missing .")
+ output_3 = "%s" % client.out
+ self.assertNotIn("ERROR", output_3)
+ self.assertEqual(output_0, output_1)
+ self.assertEqual(output_1, output_2)
+ self.assertEqual(output_2, output_3)
+
+ client.run("install -o boost:shared=True --build boost . --build missing")
+ output_4 = "%s" % client.out
+ client.run("install -o boost:shared=True --build missing --build boost .")
+ output_5 = "%s" % client.out
+ self.assertEqual(output_4, output_5)
diff --git a/conans/test/command/user_test.py b/conans/test/command/user_test.py
index a0a6a8a60..c247b910f 100644
--- a/conans/test/command/user_test.py
+++ b/conans/test/command/user_test.py
@@ -4,13 +4,44 @@ from conans.test.utils.tools import TestClient, TestServer
class UserTest(unittest.TestCase):
- def test_command_user(self):
- """ Test that the user can be shown and changed, and it is reflected in the
- user cache localdb
+ def test_command_user_no_remotes(self):
+ """ Test that proper error is reported when no remotes are defined and conan user is executed
"""
client = TestClient()
- client.run('user')
- self.assertIn("ERROR: No remotes defined", client.user_io.out)
+ with self.assertRaises(Exception):
+ client.run("user")
+ self.assertIn("ERROR: No default remote defined", client.user_io.out)
+
+ with self.assertRaises(Exception):
+ client.run("user -r wrong_remote")
+ self.assertIn("ERROR: No remote 'wrong_remote' defined", client.user_io.out)
+
+ def test_command_user_list(self):
+ """ Test list of user is reported for all remotes or queried remote
+ """
+ servers = {
+ "default": TestServer(),
+ "test_remote_1": TestServer(),
+ }
+ client = TestClient(servers=servers)
+
+ # Test with wrong remote right error is reported
+ with self.assertRaises(Exception):
+ client.run("user -r Test_Wrong_Remote")
+ self.assertIn("ERROR: No remote 'Test_Wrong_Remote' defined", client.user_io.out)
+
+ # Test user list for requested remote reported
+ client.run("user -r test_remote_1")
+ self.assertIn("Current 'test_remote_1' user: None (anonymous)", client.user_io.out)
+ self.assertNotIn("Current 'default' user: None (anonymous)", client.user_io.out)
+
+ # Test user list for all remotes is reported
+ client.run("user")
+ self.assertIn(
+ ("Current 'default' user: None (anonymous)\n"
+ "Current 'test_remote_1' user: None (anonymous)"),
+ client.user_io.out
+ )
def test_with_remote_no_connect(self):
test_server = TestServer()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 9
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"conans/requirements.txt",
"conans/requirements_osx.txt",
"conans/requirements_server.txt",
"conans/requirements_dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asn1crypto==1.5.1
astroid==1.6.6
attrs==22.2.0
beautifulsoup4==4.12.3
bottle==0.12.25
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
codecov==2.1.13
colorama==0.3.9
-e git+https://github.com/conan-io/conan.git@01b3d70da53db12cab2ca5d2c68464bf27652745#egg=conan
coverage==4.2
cryptography==2.1.4
distro==1.1.0
fasteners==0.19
future==0.16.0
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
mock==1.3.0
ndg-httpsclient==0.4.4
node-semver==0.2.0
nose==1.3.7
packaging==21.3
parameterized==0.8.1
patch==1.16
pbr==6.1.1
pluggy==1.0.0
pluginbase==0.7
py==1.11.0
pyasn==1.5.0b7
pyasn1==0.5.1
pycparser==2.21
Pygments==2.14.0
PyJWT==1.7.1
pylint==1.8.4
pyOpenSSL==17.5.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==3.12
requests==2.27.1
six==1.17.0
soupsieve==2.3.2.post1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
waitress==2.0.0
WebOb==1.8.9
WebTest==2.0.35
wrapt==1.16.0
zipp==3.6.0
| name: conan
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asn1crypto==1.5.1
- astroid==1.6.6
- attrs==22.2.0
- beautifulsoup4==4.12.3
- bottle==0.12.25
- cffi==1.15.1
- charset-normalizer==2.0.12
- codecov==2.1.13
- colorama==0.3.9
- coverage==4.2
- cryptography==2.1.4
- distro==1.1.0
- fasteners==0.19
- future==0.16.0
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- mock==1.3.0
- ndg-httpsclient==0.4.4
- node-semver==0.2.0
- nose==1.3.7
- packaging==21.3
- parameterized==0.8.1
- patch==1.16
- pbr==6.1.1
- pluggy==1.0.0
- pluginbase==0.7
- py==1.11.0
- pyasn==1.5.0b7
- pyasn1==0.5.1
- pycparser==2.21
- pygments==2.14.0
- pyjwt==1.7.1
- pylint==1.8.4
- pyopenssl==17.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==3.12
- requests==2.27.1
- six==1.17.0
- soupsieve==2.3.2.post1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- waitress==2.0.0
- webob==1.8.9
- webtest==2.0.35
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/conan
| [
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_apple_clang_cppstd_defaults",
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_clang_cppstd_defaults",
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_clang_cppstd_flags",
"conans/test/command/export_linter_test.py::ExportLinterTest::test_catch_em_all",
"conans/test/command/user_test.py::UserTest::test_clean",
"conans/test/command/user_test.py::UserTest::test_command_user_list",
"conans/test/command/user_test.py::UserTest::test_command_user_no_remotes"
]
| [
"conans/test/command/export_linter_test.py::ExportLinterTest::test_basic",
"conans/test/command/export_linter_test.py::ExportLinterTest::test_custom_rc_linter",
"conans/test/command/export_linter_test.py::ExportLinterTest::test_dynamic_fields",
"conans/test/command/export_linter_test.py::ExportLinterTest::test_warning_as_errors"
]
| [
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_apple_clang_cppstd_flags",
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_gcc_cppstd_defaults",
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_gcc_cppstd_flags",
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_visual_cppstd_defaults",
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_visual_cppstd_flags",
"conans/test/command/export_linter_test.py::ExportLinterTest::test_disable_linter",
"conans/test/command/user_test.py::UserTest::test_command_interactive_only",
"conans/test/command/user_test.py::UserTest::test_command_user_with_interactive_password",
"conans/test/command/user_test.py::UserTest::test_command_user_with_password",
"conans/test/command/user_test.py::UserTest::test_command_user_with_password_spaces",
"conans/test/command/user_test.py::UserTest::test_with_remote_no_connect"
]
| []
| MIT License | 2,315 | [
"pyinstaller.py",
"conans/client/build/compiler_flags.py",
"conans/client/command.py",
"conans/client/manager.py",
"conans/client/build/cppstd_flags.py",
"conans/client/cmd/export_linter.py",
"conans/client/proxy.py",
"conans/client/tools/oss.py",
".ci/jenkins/runner.py"
]
| [
"pyinstaller.py",
"conans/client/build/compiler_flags.py",
"conans/client/command.py",
"conans/client/manager.py",
"conans/client/build/cppstd_flags.py",
"conans/client/cmd/export_linter.py",
"conans/client/proxy.py",
"conans/client/tools/oss.py",
".ci/jenkins/runner.py"
]
|
nipy__nipype-2502 | 27b33ef128f761cc7166205fc42ef7c04a8d4e4f | 2018-03-20 21:19:03 | 704b97dee7848283692bac38f04541c5af2a87b5 | diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py
index 13a065c27..9ede8d13e 100644
--- a/nipype/interfaces/afni/preprocess.py
+++ b/nipype/interfaces/afni/preprocess.py
@@ -218,7 +218,9 @@ class AllineateInputSpec(AFNICommandInputSpec):
out_file = File(
desc='output file from 3dAllineate',
argstr='-prefix %s',
- genfile=True,
+ name_template='%s_allineate',
+ name_source='in_file',
+ hash_files=False,
xor=['allcostx'])
out_param_file = File(
argstr='-1Dparam_save %s',
@@ -424,11 +426,11 @@ class AllineateInputSpec(AFNICommandInputSpec):
_dirs = ['X', 'Y', 'Z', 'I', 'J', 'K']
nwarp_fixmot = traits.List(
traits.Enum(*_dirs),
- argstr='-nwarp_fixmot%s',
+ argstr='-nwarp_fixmot%s...',
desc='To fix motion along directions.')
nwarp_fixdep = traits.List(
traits.Enum(*_dirs),
- argstr='-nwarp_fixdep%s',
+ argstr='-nwarp_fixdep%s...',
desc='To fix non-linear warp dependency along directions.')
verbose = traits.Bool(
argstr='-verb', desc='Print out verbose progress reports.')
@@ -465,7 +467,6 @@ class Allineate(AFNICommand):
'3dAllineate -source functional.nii -prefix functional_allineate.nii -1Dmatrix_apply cmatrix.mat'
>>> res = allineate.run() # doctest: +SKIP
- >>> from nipype.interfaces import afni
>>> allineate = afni.Allineate()
>>> allineate.inputs.in_file = 'functional.nii'
>>> allineate.inputs.reference = 'structural.nii'
@@ -473,23 +474,22 @@ class Allineate(AFNICommand):
>>> allineate.cmdline
'3dAllineate -source functional.nii -base structural.nii -allcostx |& tee out.allcostX.txt'
>>> res = allineate.run() # doctest: +SKIP
+
+ >>> allineate = afni.Allineate()
+ >>> allineate.inputs.in_file = 'functional.nii'
+ >>> allineate.inputs.reference = 'structural.nii'
+ >>> allineate.inputs.nwarp_fixmot = ['X', 'Y']
+ >>> allineate.cmdline
+ '3dAllineate -source functional.nii -nwarp_fixmotX -nwarp_fixmotY -prefix functional_allineate -base structural.nii'
+ >>> res = allineate.run() # doctest: +SKIP
"""
_cmd = '3dAllineate'
input_spec = AllineateInputSpec
output_spec = AllineateOutputSpec
- def _format_arg(self, name, trait_spec, value):
- if name == 'nwarp_fixmot' or name == 'nwarp_fixdep':
- arg = ' '.join([trait_spec.argstr % v for v in value])
- return arg
- return super(Allineate, self)._format_arg(name, trait_spec, value)
-
def _list_outputs(self):
- outputs = self.output_spec().get()
-
- if self.inputs.out_file:
- outputs['out_file'] = op.abspath(self.inputs.out_file)
+ outputs = super(Allineate, self)._list_outputs()
if self.inputs.out_weight_file:
outputs['out_weight_file'] = op.abspath(
@@ -512,16 +512,10 @@ class Allineate(AFNICommand):
outputs['out_param_file'] = op.abspath(
self.inputs.out_param_file)
- if isdefined(self.inputs.allcostx):
- outputs['allcostX'] = os.path.abspath(
- os.path.join(os.getcwd(), self.inputs.allcostx))
+ if self.inputs.allcostx:
+ outputs['allcostX'] = os.path.abspath(self.inputs.allcostx)
return outputs
- def _gen_filename(self, name):
- if name == 'out_file':
- return self._list_outputs()[name]
- return None
-
class AutoTcorrelateInputSpec(AFNICommandInputSpec):
in_file = File(
diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py
index c199af3ca..d15628fd6 100644
--- a/nipype/interfaces/base/core.py
+++ b/nipype/interfaces/base/core.py
@@ -1072,6 +1072,12 @@ class CommandLine(BaseInterface):
if not isdefined(retval) or "%s" in retval:
if not trait_spec.name_source:
return retval
+
+ # Do not generate filename when excluded by other inputs
+ if trait_spec.xor and any(isdefined(getattr(self.inputs, field))
+ for field in trait_spec.xor):
+ return retval
+
if isdefined(retval) and "%s" in retval:
name_template = retval
else:
| issues with afni.allineate w/ solution (and possible issue with genfile parameter)
### Summary
Hi all. Just started learning nipype, and it's been pretty great so far. I've only run into issues with the `afni.allineate` interface. I was able to fix it on my own, but I thought I should post an issue here for awareness (plus I'm not sure how to do a pull request...)
There are 2 problems I've encountered. The first is that `afni.allineate` consistently fails the hash check when placed in a workflow and always reruns. I think this is because the `hash_files` parameter needs to be set to `False`.
```
out_file = File(
desc='output file from 3dAllineate',
argstr='-prefix %s',
genfile=True,
hash_files=False, # <-- when this is added it starts working again
xor=['allcostx'])
```
Referencing the docs, I think this is an appropriate change:
> `hash_files`
To be used with inputs that are defining output filenames. When this flag is set to false any Nipype will not try to hash any files described by this input. This is useful to avoid rerunning when the specified output file already exists and has changed.
The second issue is that the prefix (`out_file`) argument does not get generated when used in a workflow. This issue is also referenced here: #2216, but with a temporary workaround.
My understanding is that when `genfile` is set to `True`, the `_gen_filename()` method for the interface gets called when the user does not specify a value. Looking at the code, the `_gen_filename()` does seems to implemented, but not correctly:
```
def _gen_filename(self, name): # this gets called when `out_file` is not defined
if name == 'out_file':
return self._list_outputs()[name] # <-- we are returning the value of `out_file`, which is undefined...
return None
```
So I changed it to this:
```
def _gen_filename(self, name): # this gets called when `out_file` is not defined
if name == 'out_file':
return self._gen_fname(self.inputs.in_file,op.dirname(self.inputs.in_file),suffix='_allineate')
return None
```
Which should define the `out_file` to be in the same location and have the same name as the `in_file`, but with the '_allineate' suffix.
When I run this, the problem seems to be fixed, as the prefix is now defined:
```
[Node] Running "3dallineate_orig" ("nipype.interfaces.afni.preprocess.Allineate"), a CommandLine Interface with command:
3dAllineate -source /home/vana/Projects/p3/tmp/P3/_subject_id_sub-CTS200/3dallineate_orig/orig_out.nii.gz -prefix /home/vana/Projects/p3/tmp/P3/_subject_id_sub-CTS200/3dallineate_orig/orig_out_allineate.nii.gz -1Dmatrix_save FSorig.XFM.FS2MPR.aff12.1D -overwrite -base /home/vana/Projects/p3/dataset/sub-CTS200/anat/sub-CTS200_T1w.nii.gz
```
But downstream nodes don't seem to notice that the `out_file` is actually defined now... (Note: I have a node whose in_file is connected to the out_file of the allineate node)
```
[Node] Error on "P3.3drefit1" (/home/vana/Projects/p3/tmp/P3/_subject_id_sub-CTS200/3drefit1)
Traceback (most recent call last):
File "./preproc.py", line 293, in <module>
wf.run()
File "/home/vana/.local/lib/python3.6/site-packages/nipype/pipeline/engine/workflows.py", line 602, in run
runner.run(execgraph, updatehash=updatehash, config=self.config)
File "/home/vana/.local/lib/python3.6/site-packages/nipype/pipeline/plugins/linear.py", line 44, in run
node.run(updatehash=updatehash)
File "/home/vana/.local/lib/python3.6/site-packages/nipype/pipeline/engine/nodes.py", line 487, in run
result = self._run_interface(execute=True)
File "/home/vana/.local/lib/python3.6/site-packages/nipype/pipeline/engine/nodes.py", line 571, in _run_interface
return self._run_command(execute)
File "/home/vana/.local/lib/python3.6/site-packages/nipype/pipeline/engine/nodes.py", line 638, in _run_command
cmd = self._interface.cmdline
File "/home/vana/.local/lib/python3.6/site-packages/nipype/interfaces/base/core.py", line 935, in cmdline
self._check_mandatory_inputs()
File "/home/vana/.local/lib/python3.6/site-packages/nipype/interfaces/base/core.py", line 389, in _check_mandatory_inputs
raise ValueError(msg)
ValueError: Refit requires a value for input 'in_file'. For a list of required inputs, see Refit.help()
```
After looking at some of the fsl interfaces (and some trial-and-error), I figured out that the downstream nodes seem to be getting the information for their inputs from the `_list_outputs` method of the upstream node, and ignore `_gen_filename`. So when I do this:
```
def _gen_outfilename(self):
out_file = self.inputs.out_file
if not isdefined(out_file) and isdefined(self.inputs.in_file) and not isdefined(self.inputs.allcostx):
out_file = op.abspath(self._gen_fname(self.inputs.in_file,op.dirname(self.inputs.in_file),suffix='_allineate'))
return out_file
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self._gen_outfilename()
# other stuff below this ...
def _gen_filename(self, name):
if name == 'out_file':
return self._gen_outfilename()
return None
```
Everything works! But then the question becomes, what's the point of the `genfile` parameter if I have to define a method that checks if the input is defined anyway?
I'm not sure how to do a pull request to this repo, so I've attached my git patch below. (This is on nipype 1.0.1)
[allineate.patch.tar.gz](https://github.com/nipy/nipype/files/1826031/allineate.patch.tar.gz) | nipy/nipype | diff --git a/nipype/interfaces/afni/tests/test_auto_Allineate.py b/nipype/interfaces/afni/tests/test_auto_Allineate.py
index 59b1929fb..1c5476a42 100644
--- a/nipype/interfaces/afni/tests/test_auto_Allineate.py
+++ b/nipype/interfaces/afni/tests/test_auto_Allineate.py
@@ -61,12 +61,14 @@ def test_Allineate_inputs():
usedefault=True,
),
nwarp=dict(argstr='-nwarp %s', ),
- nwarp_fixdep=dict(argstr='-nwarp_fixdep%s', ),
- nwarp_fixmot=dict(argstr='-nwarp_fixmot%s', ),
+ nwarp_fixdep=dict(argstr='-nwarp_fixdep%s...', ),
+ nwarp_fixmot=dict(argstr='-nwarp_fixmot%s...', ),
one_pass=dict(argstr='-onepass', ),
out_file=dict(
argstr='-prefix %s',
- genfile=True,
+ hash_files=False,
+ name_source='in_file',
+ name_template='%s_allineate',
xor=['allcostx'],
),
out_matrix=dict(
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 2
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
codecov==2.1.13
configparser==5.2.0
coverage==6.2
cycler==0.11.0
decorator==4.4.2
docutils==0.18.1
execnet==1.9.0
funcsigs==1.0.2
future==1.0.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
isodate==0.6.1
Jinja2==3.0.3
kiwisolver==1.3.1
lxml==5.3.1
MarkupSafe==2.0.1
matplotlib==3.3.4
mock==5.2.0
networkx==2.5.1
nibabel==3.2.2
-e git+https://github.com/nipy/nipype.git@27b33ef128f761cc7166205fc42ef7c04a8d4e4f#egg=nipype
numpy==1.19.5
numpydoc==1.1.0
packaging==21.3
Pillow==8.4.0
pluggy==1.0.0
prov==1.5.0
py==1.11.0
pydot==1.4.2
pydotplus==2.0.2
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-env==0.6.2
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
rdflib==5.0.0
requests==2.27.1
scipy==1.5.4
simplejson==3.20.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
traits==6.4.1
typing_extensions==4.1.1
urllib3==1.26.20
yapf==0.32.0
zipp==3.6.0
| name: nipype
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- click==8.0.4
- codecov==2.1.13
- configparser==5.2.0
- coverage==6.2
- cycler==0.11.0
- decorator==4.4.2
- docutils==0.18.1
- execnet==1.9.0
- funcsigs==1.0.2
- future==1.0.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isodate==0.6.1
- jinja2==3.0.3
- kiwisolver==1.3.1
- lxml==5.3.1
- markupsafe==2.0.1
- matplotlib==3.3.4
- mock==5.2.0
- networkx==2.5.1
- nibabel==3.2.2
- numpy==1.19.5
- numpydoc==1.1.0
- packaging==21.3
- pillow==8.4.0
- pluggy==1.0.0
- prov==1.5.0
- py==1.11.0
- pydot==1.4.2
- pydotplus==2.0.2
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-env==0.6.2
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- rdflib==5.0.0
- requests==2.27.1
- scipy==1.5.4
- simplejson==3.20.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- traits==6.4.1
- typing-extensions==4.1.1
- urllib3==1.26.20
- yapf==0.32.0
- zipp==3.6.0
prefix: /opt/conda/envs/nipype
| [
"nipype/interfaces/afni/tests/test_auto_Allineate.py::test_Allineate_inputs"
]
| []
| [
"nipype/interfaces/afni/tests/test_auto_Allineate.py::test_Allineate_outputs"
]
| []
| Apache License 2.0 | 2,317 | [
"nipype/interfaces/afni/preprocess.py",
"nipype/interfaces/base/core.py"
]
| [
"nipype/interfaces/afni/preprocess.py",
"nipype/interfaces/base/core.py"
]
|
|
Sygic__sygic-maps-services-python-2 | 936371adf4a71c6cde5751af7fe9bbead1a470b7 | 2018-03-21 13:09:35 | 936371adf4a71c6cde5751af7fe9bbead1a470b7 | diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..6410fb5
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,11 @@
+language: python
+
+matrix:
+ include:
+ - { python: '3.6', env: TOXENV=py36 }
+
+install:
+ - pip install tox
+
+script:
+ - tox
\ No newline at end of file
diff --git a/README.md b/README.md
index 856e6d1..c6255e4 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ Example of using Sygic [Geocoding API](https://www.sygic.com/developers/maps-api
### Client initialization
```python
-import sygicmaps as s
+import sygicmaps.client as s
# Create a client with your API key
client = s.Client(key='Your API key')
diff --git a/sygicmaps/client.py b/sygicmaps/client.py
index 9e93c78..fe95913 100644
--- a/sygicmaps/client.py
+++ b/sygicmaps/client.py
@@ -6,16 +6,23 @@ import requests
from sygicmaps.input import Input
-SERVICES_URL = "https://eu-geocoding.api.sygic.com/v0/api/"
+SERVICES_URL = "https://{}-geocoding.api.sygic.com"
-GEOCODE_BATCH_URL_PATH = "batch/geocode"
+GEOCODE_URL_PATH = "/v0/api/geocode"
+GEOCODE_BATCH_URL_PATH = "/v0/api/batch/geocode"
+REVERSE_GEOCODE_URL_PATH = "/v0/api/reversegeocode"
class Client(object):
- def __init__(self, key=None):
+ def __init__(self, key=None, region='eu', custom_url=None):
if not key:
raise ValueError("API key is not set.")
+ if not custom_url:
+ self.services_url = SERVICES_URL.format(region)
+ else:
+ self.services_url = custom_url
+
self.session = requests.Session()
self.key = key
@@ -51,7 +58,7 @@ class Client(object):
requests_method = self.session.get
- url = SERVICES_URL + "/geocode"
+ url = self.services_url + GEOCODE_URL_PATH
response = requests_method(url, params=params)
body = response.json()
@@ -68,7 +75,7 @@ class Client(object):
requests_method = self.session.get
- url = SERVICES_URL + "/reversegeocode"
+ url = self.services_url + REVERSE_GEOCODE_URL_PATH
response = requests_method(url, params=params)
body = response.json()
@@ -90,7 +97,7 @@ class Client(object):
post_data = list(json.loads(json_string))
post_data = list(map(self.__remove_nulls, post_data))
- url = SERVICES_URL + GEOCODE_BATCH_URL_PATH
+ url = self.services_url + GEOCODE_BATCH_URL_PATH
params = {"key": self.key}
post_body = json.dumps(post_data)
r = requests.post(url, data=post_body, params=params, headers={'Content-type': 'application/json'})
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..c8233f7
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,6 @@
+[tox]
+envlist = py36
+
+[testenv]
+deps = -rrequirements_tests.txt
+commands = nosetests
\ No newline at end of file
| There is no way to specify NAM / EU regions for geocoding. | Sygic/sygic-maps-services-python | diff --git a/requirements_tests.txt b/requirements_tests.txt
new file mode 100644
index 0000000..aa5d1ac
--- /dev/null
+++ b/requirements_tests.txt
@@ -0,0 +1,2 @@
+nose
+requests_mock
\ No newline at end of file
diff --git a/test/client_tests.py b/test/client_tests.py
new file mode 100644
index 0000000..86f7ce5
--- /dev/null
+++ b/test/client_tests.py
@@ -0,0 +1,66 @@
+import sygicmaps as c
+import requests_mock
+import unittest as ut
+import json
+
+
+class ClientTests(ut.TestCase):
+ def __get_sample_response_json(self):
+ sample_response_json = """ {
+ "results": [
+ {
+ "location": {
+ "country": "Germany",
+ "city": "Berlin",
+ "street": "Bernauer Straße",
+ "house_number": "36",
+ "zip": "13355",
+ "admin_level_1": "Berlin"
+ },
+ "geometry": {
+ "lat": 52.53874,
+ "lon": 13.39849
+ },
+ "confidence": 0.25454903381642513,
+ "distance": 0
+ }
+ ],
+ "status": "OK",
+ "copyright": "© 2018 Sygic a.s."
+ } """
+ return json.loads(sample_response_json)
+
+
+ @requests_mock.mock()
+ def test_default_call(self, m):
+ response_json = self.__get_sample_response_json()
+
+ print(response_json)
+ response_json_str = json.dumps(response_json)
+ response_results_json_str = json.dumps(response_json['results'])
+
+ m.get('https://eu-geocoding.api.sygic.com/v0/api/geocode', text=response_json_str)
+
+ client = c.Client(key="test-key")
+ result = client.geocode("Zochova 10 Bratislava")
+
+ result_json = json.dumps(result)
+
+ self.assertEqual(response_results_json_str, result_json)
+
+ @requests_mock.mock()
+ def test_region_parameter(self, m):
+ response_json = self.__get_sample_response_json()
+
+ print(response_json)
+ response_json_str = json.dumps(response_json)
+ response_results_json_str = json.dumps(response_json['results'])
+
+ m.get('https://na-geocoding.api.sygic.com/v0/api/geocode', text=response_json_str)
+
+ client = c.Client(key="test-key", region="na")
+ result = client.geocode("Zochova 10 Bratislava")
+
+ result_json = json.dumps(result)
+
+ self.assertEqual(response_results_json_str, result_json)
\ No newline at end of file
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"requests_mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
requests==2.32.3
requests-mock==1.12.1
-e git+https://github.com/Sygic/sygic-maps-services-python.git@936371adf4a71c6cde5751af7fe9bbead1a470b7#egg=sygicmaps
tomli==2.2.1
urllib3==2.3.0
| name: sygic-maps-services-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- requests==2.32.3
- requests-mock==1.12.1
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/sygic-maps-services-python
| [
"test/client_tests.py::ClientTests::test_default_call",
"test/client_tests.py::ClientTests::test_region_parameter"
]
| []
| []
| []
| MIT License | 2,318 | [
".travis.yml",
"sygicmaps/client.py",
"README.md",
"tox.ini"
]
| [
".travis.yml",
"sygicmaps/client.py",
"README.md",
"tox.ini"
]
|
|
oasis-open__cti-python-stix2-145 | 33cfc4bb2786e6da0960bdaf9c8d19e4daebd938 | 2018-03-21 14:32:58 | 2d689815d743611a8f3ccd48ce5e2d1ec70695e5 | diff --git a/docs/conf.py b/docs/conf.py
index 49416a0..e53829b 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -74,7 +74,7 @@ def get_property_type(prop):
return prop_class
-class STIXPropertyDocumenter(ClassDocumenter):
+class STIXAttributeDocumenter(ClassDocumenter):
"""Custom Sphinx extension to auto-document STIX properties.
Needed because descendants of _STIXBase use `_properties` dictionaries
@@ -97,23 +97,16 @@ class STIXPropertyDocumenter(ClassDocumenter):
obj = self.object
self.add_line(':Properties:', '<stixattr>')
for prop_name, prop in obj._properties.items():
- # Skip 'type'
- if prop_name == 'type':
- continue
-
# Add metadata about the property
prop_type = get_property_type(prop)
if prop_type == 'List':
prop_type = 'List of %ss' % get_property_type(prop.contained)
if prop.required:
prop_type += ', required'
- if 'Timestamp' in prop_type and hasattr(prop, 'default'):
- prop_type += ', default: current date/time'
prop_str = '**%s** (*%s*)' % (prop_name, prop_type)
self.add_line(' - %s' % prop_str, '<stixattr>')
-
self.add_line('', '<stixattr>')
def setup(app):
- app.add_autodocumenter(STIXPropertyDocumenter)
+ app.add_autodocumenter(STIXAttributeDocumenter)
diff --git a/docs/guide/taxii.ipynb b/docs/guide/taxii.ipynb
index 4045a98..ad06093 100644
--- a/docs/guide/taxii.ipynb
+++ b/docs/guide/taxii.ipynb
@@ -261,65 +261,25 @@
"\n",
"tc_store.add(ind)\n"
]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Bug and Workaround\n",
- "\n",
- "You may get an error similar to the following when adding STIX objects to a TAXIICollectionStore or TAXIICollectionSink:\n",
- "\n",
- "```\n",
- "TypeError: Object of type ThreatActor is not JSON serializable\n",
- "```\n",
- "\n",
- "This is a known bug and we are working to fix it. For more information, see [this GitHub issue](https://github.com/oasis-open/cti-python-stix2/issues/125) In the meantime, try this workaround:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "tc_sink.add(json.loads(Bundle(ta).serialize()))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Or bypass the TAXIICollection altogether and interact with the collection itself:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "collection.add_objects(json.loads(Bundle(ta).serialize()))"
- ]
}
],
"metadata": {
"kernelspec": {
- "display_name": "Python 3",
+ "display_name": "Python 2",
"language": "python",
- "name": "python3"
+ "name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
- "version": 3
+ "version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.6.3"
+ "pygments_lexer": "ipython2",
+ "version": "2.7.12"
}
},
"nbformat": 4,
diff --git a/stix2/base.py b/stix2/base.py
index 898f489..6bc3dd4 100644
--- a/stix2/base.py
+++ b/stix2/base.py
@@ -191,7 +191,7 @@ class _STIXBase(collections.Mapping):
**kwargs: The arguments for a json.dumps() call.
Returns:
- str: The serialized JSON object.
+ dict: The serialized JSON object.
Note:
The argument ``pretty=True`` will output the STIX object following
diff --git a/stix2/datastore/filesystem.py b/stix2/datastore/filesystem.py
index 26d0c58..b525932 100644
--- a/stix2/datastore/filesystem.py
+++ b/stix2/datastore/filesystem.py
@@ -301,11 +301,27 @@ class FileSystemSource(DataSource):
for path in include_paths:
for root, dirs, files in os.walk(path):
for file_ in files:
+ if not file_.endswith(".json"):
+ # skip non '.json' files as more likely to be random non-STIX files
+ continue
+
if not id_ or id_ == file_.split(".")[0]:
# have to load into memory regardless to evaluate other filters
- stix_obj = json.load(open(os.path.join(root, file_)))
- if stix_obj.get('type', '') == 'bundle':
- stix_obj = stix_obj['objects'][0]
+ try:
+ stix_obj = json.load(open(os.path.join(root, file_)))
+
+ if stix_obj["type"] == "bundle":
+ stix_obj = stix_obj["objects"][0]
+
+ # naive STIX type checking
+ stix_obj["type"]
+ stix_obj["id"]
+
+ except (ValueError, KeyError): # likely not a JSON file
+ print("filesytem TypeError raised")
+ raise TypeError("STIX JSON object at '{0}' could either not be parsed to "
+ "JSON or was not valid STIX JSON".format(os.path.join(root, file_)))
+
# check against other filters, add if match
all_data.extend(apply_common_filters([stix_obj], query))
diff --git a/stix2/datastore/taxii.py b/stix2/datastore/taxii.py
index cb136d9..5af4354 100644
--- a/stix2/datastore/taxii.py
+++ b/stix2/datastore/taxii.py
@@ -67,31 +67,27 @@ class TAXIICollectionSink(DataSink):
"""
if isinstance(stix_data, _STIXBase):
# adding python STIX object
- if stix_data["type"] == "bundle":
- bundle = stix_data.serialize(encoding="utf-8")
- else:
- bundle = Bundle(stix_data, allow_custom=self.allow_custom).serialize(encoding="utf-8")
+ bundle = dict(Bundle(stix_data, allow_custom=self.allow_custom))
elif isinstance(stix_data, dict):
# adding python dict (of either Bundle or STIX obj)
if stix_data["type"] == "bundle":
- bundle = parse(stix_data, allow_custom=self.allow_custom, version=version).serialize(encoding="utf-8")
+ bundle = stix_data
else:
- bundle = Bundle(stix_data, allow_custom=self.allow_custom).serialize(encoding="utf-8")
+ bundle = dict(Bundle(stix_data, allow_custom=self.allow_custom))
elif isinstance(stix_data, list):
# adding list of something - recurse on each
for obj in stix_data:
self.add(obj, version=version)
- return
elif isinstance(stix_data, str):
# adding json encoded string of STIX content
stix_data = parse(stix_data, allow_custom=self.allow_custom, version=version)
if stix_data["type"] == "bundle":
- bundle = stix_data.serialize(encoding="utf-8")
+ bundle = dict(stix_data)
else:
- bundle = Bundle(stix_data, allow_custom=self.allow_custom).serialize(encoding="utf-8")
+ bundle = dict(Bundle(stix_data, allow_custom=self.allow_custom))
else:
raise TypeError("stix_data must be as STIX object(or list of),json formatted STIX (or list of), or a json formatted STIX bundle")
| Rundown status of existing issue: FileSystem query bug
Bug: When querying FileSystem, it came back with JSON parse error. Specifically had to use a filter with “type” field to work. @mbastian1135 was trying to reproduce this, but having problems | oasis-open/cti-python-stix2 | diff --git a/stix2/test/test_filesystem.py b/stix2/test/test_filesystem.py
index 020fee5..f59136e 100644
--- a/stix2/test/test_filesystem.py
+++ b/stix2/test/test_filesystem.py
@@ -1,3 +1,4 @@
+import json
import os
import shutil
@@ -45,6 +46,41 @@ def fs_sink():
shutil.rmtree(os.path.join(FS_PATH, "campaign"), True)
[email protected]
+def bad_json_files():
+ # create erroneous JSON files for tests to make sure handled gracefully
+
+ with open(os.path.join(FS_PATH, "intrusion-set", "intrusion-set--test-non-json.txt"), "w+") as f:
+ f.write("Im not a JSON file")
+
+ with open(os.path.join(FS_PATH, "intrusion-set", "intrusion-set--test-bad-json.json"), "w+") as f:
+ f.write("Im not a JSON formatted file")
+
+ yield True # dummy yield so can have teardown
+
+ os.remove(os.path.join(FS_PATH, "intrusion-set", "intrusion-set--test-non-json.txt"))
+ os.remove(os.path.join(FS_PATH, "intrusion-set", "intrusion-set--test-bad-json.json"))
+
+
[email protected]
+def bad_stix_files():
+ # create erroneous STIX JSON files for tests to make sure handled correctly
+
+ # bad STIX object
+ stix_obj = {
+ "id": "intrusion-set--test-bad-stix",
+ "spec_version": "2.0"
+ # no "type" field
+ }
+
+ with open(os.path.join(FS_PATH, "intrusion-set", "intrusion-set--test-non-stix.json"), "w+") as f:
+ f.write(json.dumps(stix_obj))
+
+ yield True # dummy yield so can have teardown
+
+ os.remove(os.path.join(FS_PATH, "intrusion-set", "intrusion-set--test-non-stix.json"))
+
+
@pytest.fixture(scope='module')
def rel_fs_store():
cam = Campaign(id=CAMPAIGN_ID, **CAMPAIGN_KWARGS)
@@ -76,6 +112,26 @@ def test_filesystem_sink_nonexistent_folder():
assert "for STIX data does not exist" in str(excinfo)
+def test_filesystem_source_bad_json_file(fs_source, bad_json_files):
+ # this tests the handling of two bad json files
+ # - one file should just be skipped (silently) as its a ".txt" extension
+ # - one file should be parsed and raise Exception bc its not JSON
+ try:
+ fs_source.get("intrusion-set--test-bad-json")
+ except TypeError as e:
+ assert "intrusion-set--test-bad-json" in str(e)
+ assert "could either not be parsed to JSON or was not valid STIX JSON" in str(e)
+
+
+def test_filesystem_source_bad_stix_file(fs_source, bad_stix_files):
+ # this tests handling of bad STIX json object
+ try:
+ fs_source.get("intrusion-set--test-non-stix")
+ except TypeError as e:
+ assert "intrusion-set--test-non-stix" in str(e)
+ assert "could either not be parsed to JSON or was not valid STIX JSON" in str(e)
+
+
def test_filesytem_source_get_object(fs_source):
# get object
mal = fs_source.get("malware--6b616fc1-1505-48e3-8b2c-0d19337bff38")
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 5
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pre-commit"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
antlr4-python3-runtime==4.9.3
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
bump2version==1.0.1
bumpversion==0.6.0
certifi==2021.5.30
cfgv==3.3.1
charset-normalizer==2.0.12
coverage==6.2
decorator==5.1.1
defusedxml==0.7.1
distlib==0.3.9
docutils==0.18.1
entrypoints==0.4
filelock==3.4.1
identify==2.4.4
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.2.3
iniconfig==1.1.1
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
Jinja2==3.0.3
jsonschema==3.2.0
jupyter-client==7.1.2
jupyter-core==4.9.2
jupyterlab-pygments==0.1.2
MarkupSafe==2.0.1
mistune==0.8.4
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
nbsphinx==0.8.8
nest-asyncio==1.6.0
nodeenv==1.6.0
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.1
pyzmq==25.1.2
requests==2.27.1
simplejson==3.20.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-prompt==1.5.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
-e git+https://github.com/oasis-open/cti-python-stix2.git@33cfc4bb2786e6da0960bdaf9c8d19e4daebd938#egg=stix2
stix2-patterns==2.0.0
taxii2-client==2.3.0
testpath==0.6.0
toml==0.10.2
tomli==1.2.3
tornado==6.1
tox==3.28.0
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.16.2
wcwidth==0.2.13
webencodings==0.5.1
zipp==3.6.0
| name: cti-python-stix2
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- antlr4-python3-runtime==4.9.3
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- bump2version==1.0.1
- bumpversion==0.6.0
- cfgv==3.3.1
- charset-normalizer==2.0.12
- coverage==6.2
- decorator==5.1.1
- defusedxml==0.7.1
- distlib==0.3.9
- docutils==0.18.1
- entrypoints==0.4
- filelock==3.4.1
- identify==2.4.4
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.2.3
- iniconfig==1.1.1
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- jinja2==3.0.3
- jsonschema==3.2.0
- jupyter-client==7.1.2
- jupyter-core==4.9.2
- jupyterlab-pygments==0.1.2
- markupsafe==2.0.1
- mistune==0.8.4
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbsphinx==0.8.8
- nest-asyncio==1.6.0
- nodeenv==1.6.0
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.1
- pyzmq==25.1.2
- requests==2.27.1
- simplejson==3.20.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-prompt==1.5.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- stix2-patterns==2.0.0
- taxii2-client==2.3.0
- testpath==0.6.0
- toml==0.10.2
- tomli==1.2.3
- tornado==6.1
- tox==3.28.0
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.16.2
- wcwidth==0.2.13
- webencodings==0.5.1
- zipp==3.6.0
prefix: /opt/conda/envs/cti-python-stix2
| [
"stix2/test/test_filesystem.py::test_filesystem_source_bad_json_file"
]
| []
| [
"stix2/test/test_filesystem.py::test_filesystem_source_nonexistent_folder",
"stix2/test/test_filesystem.py::test_filesystem_sink_nonexistent_folder",
"stix2/test/test_filesystem.py::test_filesystem_source_bad_stix_file",
"stix2/test/test_filesystem.py::test_filesytem_source_get_object",
"stix2/test/test_filesystem.py::test_filesytem_source_get_nonexistent_object",
"stix2/test/test_filesystem.py::test_filesytem_source_all_versions",
"stix2/test/test_filesystem.py::test_filesytem_source_query_single",
"stix2/test/test_filesystem.py::test_filesytem_source_query_multiple",
"stix2/test/test_filesystem.py::test_filesystem_sink_add_python_stix_object",
"stix2/test/test_filesystem.py::test_filesystem_sink_add_stix_object_dict",
"stix2/test/test_filesystem.py::test_filesystem_sink_add_stix_bundle_dict",
"stix2/test/test_filesystem.py::test_filesystem_sink_add_json_stix_object",
"stix2/test/test_filesystem.py::test_filesystem_sink_json_stix_bundle",
"stix2/test/test_filesystem.py::test_filesystem_sink_add_objects_list",
"stix2/test/test_filesystem.py::test_filesystem_store_get_stored_as_bundle",
"stix2/test/test_filesystem.py::test_filesystem_store_get_stored_as_object",
"stix2/test/test_filesystem.py::test_filesystem_store_all_versions",
"stix2/test/test_filesystem.py::test_filesystem_store_query",
"stix2/test/test_filesystem.py::test_filesystem_store_query_single_filter",
"stix2/test/test_filesystem.py::test_filesystem_store_empty_query",
"stix2/test/test_filesystem.py::test_filesystem_store_query_multiple_filters",
"stix2/test/test_filesystem.py::test_filesystem_store_query_dont_include_type_folder",
"stix2/test/test_filesystem.py::test_filesystem_store_add",
"stix2/test/test_filesystem.py::test_filesystem_store_add_as_bundle",
"stix2/test/test_filesystem.py::test_filesystem_add_bundle_object",
"stix2/test/test_filesystem.py::test_filesystem_store_add_invalid_object",
"stix2/test/test_filesystem.py::test_filesystem_object_with_custom_property",
"stix2/test/test_filesystem.py::test_filesystem_object_with_custom_property_in_bundle",
"stix2/test/test_filesystem.py::test_filesystem_custom_object",
"stix2/test/test_filesystem.py::test_relationships",
"stix2/test/test_filesystem.py::test_relationships_by_type",
"stix2/test/test_filesystem.py::test_relationships_by_source",
"stix2/test/test_filesystem.py::test_relationships_by_target",
"stix2/test/test_filesystem.py::test_relationships_by_target_and_type",
"stix2/test/test_filesystem.py::test_relationships_by_target_and_source",
"stix2/test/test_filesystem.py::test_related_to",
"stix2/test/test_filesystem.py::test_related_to_by_source",
"stix2/test/test_filesystem.py::test_related_to_by_target"
]
| []
| BSD 3-Clause "New" or "Revised" License | 2,319 | [
"docs/conf.py",
"stix2/base.py",
"docs/guide/taxii.ipynb",
"stix2/datastore/filesystem.py",
"stix2/datastore/taxii.py"
]
| [
"docs/conf.py",
"stix2/base.py",
"docs/guide/taxii.ipynb",
"stix2/datastore/filesystem.py",
"stix2/datastore/taxii.py"
]
|
|
spencerahill__aospy-254 | 389961acd046396b3b237b5b2749d70b345b1e6b | 2018-03-21 18:32:13 | f8af04e7e9deec9fccd0337a074e9da174e83504 | spencerahill: Travis failures are both kernel timeouts on our test of the tutorial notebook: https://travis-ci.org/spencerahill/aospy/jobs/356502217#L1082
These seem unrelated to this commit. | diff --git a/.stickler.yml b/.stickler.yml
new file mode 100644
index 0000000..8745306
--- /dev/null
+++ b/.stickler.yml
@@ -0,0 +1,3 @@
+linters:
+ flake8:
+ fixer: true
diff --git a/.travis.yml b/.travis.yml
index bef0ec4..b773e28 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -10,8 +10,6 @@ matrix:
env: CONDA_ENV=py27
- python: 2.7
env: CONDA_ENV=py27-min
- - python: 3.4
- env: CONDA_ENV=py34
- python: 3.5
env: CONDA_ENV=py35
- python: 3.6
diff --git a/aospy/automate.py b/aospy/automate.py
index 0f3a0d7..b7a3aae 100644
--- a/aospy/automate.py
+++ b/aospy/automate.py
@@ -24,6 +24,11 @@ _VARIABLES_STR = 'variables'
_TAG_ATTR_MODIFIERS = dict(all='', default='default_')
+class AospyException(Exception):
+ """Base exception class for the aospy package."""
+ pass
+
+
def _get_attr_by_tag(obj, tag, attr_name):
"""Get attribute from an object via a string tag.
@@ -84,11 +89,6 @@ def _input_func_py2_py3():
return input
-class AospyException(Exception):
- """Base exception class for the aospy package."""
- pass
-
-
def _user_verify(input_func=_input_func_py2_py3(),
prompt='Perform these computations? [y/n] '):
"""Prompt the user for verification."""
@@ -487,4 +487,10 @@ def submit_mult_calcs(calc_suite_specs, exec_options=None):
_user_verify()
calc_suite = CalcSuite(calc_suite_specs)
calcs = calc_suite.create_calcs()
+ if not calcs:
+ raise AospyException(
+ "The specified combination of parameters yielded zero "
+ "calculations. Most likely, one of the parameters is "
+ "inadvertently empty."
+ )
return _exec_calcs(calcs, **exec_options)
diff --git a/appveyor.yml b/appveyor.yml
index 71ea082..5ee68d9 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -10,11 +10,6 @@ environment:
PYTHON_ARCH: "32"
CONDA_ENV: "py27"
- - PYTHON: "C:\\Python34-conda64"
- PYTHON_VERSION: "3.4"
- PYTHON_ARCH: "64"
- CONDA_ENV: "py34"
-
- PYTHON: "C:\\Python35-conda64"
PYTHON_VERSION: "3.5"
PYTHON_ARCH: "64"
diff --git a/ci/environment-py34.yml b/ci/environment-py34.yml
deleted file mode 100644
index 66f76b0..0000000
--- a/ci/environment-py34.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-name: test_env
-channels:
- - conda-forge
-dependencies:
- - python=3.4
- - scipy=0.18
- - mkl=2017.0.3
- - netCDF4
- - xarray
- - dask
- - distributed
- - pytest
- - future
- - matplotlib
- - jupyter
- - flake8
- - pip:
- - coveralls
- - pytest-cov
- - pytest-catchlog
diff --git a/ci/install_python.ps1 b/ci/install_python.ps1
index f210245..66bca52 100644
--- a/ci/install_python.ps1
+++ b/ci/install_python.ps1
@@ -8,7 +8,7 @@ $BASE_URL = "https://www.python.org/ftp/python/"
function DownloadMiniconda ($python_version, $platform_suffix) {
$webclient = New-Object System.Net.WebClient
- $py3versions = "3.4", "3.5", "3.6"
+ $py3versions = "3.5", "3.6"
if ($py3versions -contains $python_version) {
$filename = "Miniconda3-latest-Windows-" + $platform_suffix + ".exe"
} else {
diff --git a/docs/install.rst b/docs/install.rst
index 87bce46..2f70175 100644
--- a/docs/install.rst
+++ b/docs/install.rst
@@ -7,7 +7,7 @@ Supported platforms
-------------------
- Operating system: Windows, MacOS/Mac OS X, and Linux
-- Python: 2.7, 3.4, 3.5, and 3.6
+- Python: 2.7, 3.5, and 3.6
.. note::
@@ -30,11 +30,6 @@ The recommended installation method is via `conda
conda install -c conda-forge aospy
-.. note:: Python 3.4 users: `conda-forge
- <https://conda-forge.github.io/>`_ no longer supports builds
- of packages on Python 3.4. Please use one of the
- alternative installation methods described below.
-
Alternative method #1: pip
--------------------------
diff --git a/docs/whats-new.rst b/docs/whats-new.rst
index e2c4882..6ef5b18 100644
--- a/docs/whats-new.rst
+++ b/docs/whats-new.rst
@@ -11,6 +11,9 @@ v0.3 (unreleased)
Breaking Changes
~~~~~~~~~~~~~~~~
+- Drop support for Python 3.4, since our core upstream dependency
+ xarray is also dropping it as of their 0.11 release (:pull:`255`).
+ By `Spencer Hill <https://github.com/spencerahill>`_.
- Deprecate ``Constant`` class and ``constants.py`` module.
Physical constants used internally by aospy are now stored
in ``_constants.py`` (fixes :issue:`50` via :pull:`223`).
@@ -32,6 +35,11 @@ Documentation
Enhancements
~~~~~~~~~~~~
+- Raise an exception with an informative message if
+ ``submit_mult_calcs`` (and thus the main script) generates zero
+ calculations, which can happen if one of the parameters is
+ accidentally set to an empty list (closes :issue:`253` via
+ :pull:`254`). By `Spencer Hill <https://github.com/spencerahill>`_.
- Suppress warnings from xarray when loading data whose dates extend
outside the range supported by the numpy.datetime64 datatype. aospy
has its own logic to deal with these cases (closes :issue:`221` via
diff --git a/setup.py b/setup.py
index 529013b..b237c6e 100644
--- a/setup.py
+++ b/setup.py
@@ -51,7 +51,6 @@ setuptools.setup(
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Atmospheric Science'
| Warn (or perhaps raise?) when the main script is constructed to submit zero calculations
For instance if one forgets to specify `input_vertical_datatypes` and instead uses an empty list, the main script will run and silently compute nothing. This is confusing for users who might not be familiar with the internals in `automate.py`, which create `Calc` objects based on the product of the input options (so if one option is an empty list, zero objects will be created). | spencerahill/aospy | diff --git a/aospy/test/test_automate.py b/aospy/test/test_automate.py
index a7ce120..52c4c96 100644
--- a/aospy/test/test_automate.py
+++ b/aospy/test/test_automate.py
@@ -268,6 +268,13 @@ def test_submit_mult_calcs(calcsuite_init_specs_single_calc, exec_options):
calcsuite_init_specs_single_calc['output_time_regional_reductions'])
+def test_submit_mult_calcs_no_calcs(calcsuite_init_specs):
+ specs = calcsuite_init_specs.copy()
+ specs['input_vertical_datatypes'] = []
+ with pytest.raises(AospyException):
+ submit_mult_calcs(specs)
+
+
@pytest.mark.parametrize(
('exec_options'),
[dict(parallelize=True, write_to_tar=False),
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_added_files",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 7
} | 0.2 | {
"env_vars": null,
"env_yml_path": [
"ci/environment-py36.yml"
],
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "environment.yml",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-catchlog"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/spencerahill/aospy.git@389961acd046396b3b237b5b2749d70b345b1e6b#egg=aospy
argon2-cffi @ file:///home/conda/feedstock_root/build_artifacts/argon2-cffi_1633990451307/work
async_generator @ file:///home/conda/feedstock_root/build_artifacts/async_generator_1722652753231/work
attrs @ file:///home/conda/feedstock_root/build_artifacts/attrs_1671632566681/work
backcall @ file:///home/conda/feedstock_root/build_artifacts/backcall_1592338393461/work
backports.functools-lru-cache @ file:///home/conda/feedstock_root/build_artifacts/backports.functools_lru_cache_1702571698061/work
bleach @ file:///home/conda/feedstock_root/build_artifacts/bleach_1696630167146/work
bokeh @ file:///home/conda/feedstock_root/build_artifacts/bokeh_1625756939897/work
certifi==2021.5.30
cffi @ file:///home/conda/feedstock_root/build_artifacts/cffi_1631636256886/work
cftime @ file:///home/conda/feedstock_root/build_artifacts/cftime_1632539733990/work
charset-normalizer==2.0.12
click==7.1.2
cloudpickle @ file:///home/conda/feedstock_root/build_artifacts/cloudpickle_1674202310934/work
contextvars==2.4
coverage==6.2
coveralls==3.3.1
cycler @ file:///home/conda/feedstock_root/build_artifacts/cycler_1635519461629/work
cytoolz==0.11.0
dask @ file:///home/conda/feedstock_root/build_artifacts/dask-core_1614995065708/work
decorator @ file:///home/conda/feedstock_root/build_artifacts/decorator_1641555617451/work
defusedxml @ file:///home/conda/feedstock_root/build_artifacts/defusedxml_1615232257335/work
distributed @ file:///home/conda/feedstock_root/build_artifacts/distributed_1615002625500/work
docopt==0.6.2
entrypoints @ file:///home/conda/feedstock_root/build_artifacts/entrypoints_1643888246732/work
flake8 @ file:///home/conda/feedstock_root/build_artifacts/flake8_1659645013175/work
fsspec @ file:///home/conda/feedstock_root/build_artifacts/fsspec_1674184942191/work
future @ file:///home/conda/feedstock_root/build_artifacts/future_1610147328086/work
HeapDict==1.0.1
idna==3.10
immutables @ file:///home/conda/feedstock_root/build_artifacts/immutables_1628601257972/work
importlib-metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1630267465156/work
iniconfig @ file:///home/conda/feedstock_root/build_artifacts/iniconfig_1603384189793/work
ipykernel @ file:///home/conda/feedstock_root/build_artifacts/ipykernel_1620912934572/work/dist/ipykernel-5.5.5-py3-none-any.whl
ipython @ file:///home/conda/feedstock_root/build_artifacts/ipython_1609697613279/work
ipython_genutils @ file:///home/conda/feedstock_root/build_artifacts/ipython_genutils_1716278396992/work
ipywidgets @ file:///home/conda/feedstock_root/build_artifacts/ipywidgets_1679421482533/work
jedi @ file:///home/conda/feedstock_root/build_artifacts/jedi_1605054537831/work
Jinja2 @ file:///home/conda/feedstock_root/build_artifacts/jinja2_1636510082894/work
jsonschema @ file:///home/conda/feedstock_root/build_artifacts/jsonschema_1634752161479/work
jupyter @ file:///home/conda/feedstock_root/build_artifacts/jupyter_1696255489086/work
jupyter-client @ file:///home/conda/feedstock_root/build_artifacts/jupyter_client_1642858610849/work
jupyter-console @ file:///home/conda/feedstock_root/build_artifacts/jupyter_console_1676328545892/work
jupyter-core @ file:///home/conda/feedstock_root/build_artifacts/jupyter_core_1631852698933/work
jupyterlab-pygments @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_pygments_1601375948261/work
jupyterlab-widgets @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_widgets_1655961217661/work
kiwisolver @ file:///home/conda/feedstock_root/build_artifacts/kiwisolver_1610099771815/work
locket @ file:///home/conda/feedstock_root/build_artifacts/locket_1650660393415/work
MarkupSafe @ file:///home/conda/feedstock_root/build_artifacts/markupsafe_1621455668064/work
matplotlib @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-suite_1611858699142/work
mccabe @ file:///home/conda/feedstock_root/build_artifacts/mccabe_1643049622439/work
mistune @ file:///home/conda/feedstock_root/build_artifacts/mistune_1673904152039/work
more-itertools @ file:///home/conda/feedstock_root/build_artifacts/more-itertools_1690211628840/work
msgpack @ file:///home/conda/feedstock_root/build_artifacts/msgpack-python_1610121702224/work
nbclient @ file:///home/conda/feedstock_root/build_artifacts/nbclient_1637327213451/work
nbconvert @ file:///home/conda/feedstock_root/build_artifacts/nbconvert_1605401832871/work
nbformat @ file:///home/conda/feedstock_root/build_artifacts/nbformat_1617383142101/work
nest_asyncio @ file:///home/conda/feedstock_root/build_artifacts/nest-asyncio_1705850609492/work
netCDF4 @ file:///home/conda/feedstock_root/build_artifacts/netcdf4_1633096406418/work
notebook @ file:///home/conda/feedstock_root/build_artifacts/notebook_1616419146127/work
numpy @ file:///home/conda/feedstock_root/build_artifacts/numpy_1626681920064/work
olefile @ file:///home/conda/feedstock_root/build_artifacts/olefile_1602866521163/work
packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1637239678211/work
pandas==1.1.5
pandocfilters @ file:///home/conda/feedstock_root/build_artifacts/pandocfilters_1631603243851/work
parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1595548966091/work
partd @ file:///home/conda/feedstock_root/build_artifacts/partd_1617910651905/work
pexpect @ file:///home/conda/feedstock_root/build_artifacts/pexpect_1667297516076/work
pickleshare @ file:///home/conda/feedstock_root/build_artifacts/pickleshare_1602536217715/work
Pillow @ file:///home/conda/feedstock_root/build_artifacts/pillow_1630696616009/work
pluggy @ file:///home/conda/feedstock_root/build_artifacts/pluggy_1631522669284/work
prometheus-client @ file:///home/conda/feedstock_root/build_artifacts/prometheus_client_1689032443210/work
prompt-toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1670414775770/work
psutil @ file:///home/conda/feedstock_root/build_artifacts/psutil_1610127101219/work
ptyprocess @ file:///home/conda/feedstock_root/build_artifacts/ptyprocess_1609419310487/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl
py @ file:///home/conda/feedstock_root/build_artifacts/py_1636301881863/work
pycodestyle @ file:///home/conda/feedstock_root/build_artifacts/pycodestyle_1659638152915/work
pycparser @ file:///home/conda/feedstock_root/build_artifacts/pycparser_1636257122734/work
pyflakes @ file:///home/conda/feedstock_root/build_artifacts/pyflakes_1659210156976/work
Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1672682006896/work
pyparsing @ file:///home/conda/feedstock_root/build_artifacts/pyparsing_1724616129934/work
PyQt5==5.12.3
PyQt5_sip==4.19.18
PyQtChart==5.12
PyQtWebEngine==5.12.1
pyrsistent @ file:///home/conda/feedstock_root/build_artifacts/pyrsistent_1610146795286/work
pytest==6.2.5
pytest-catchlog==1.2.2
pytest-cov==4.0.0
python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1626286286081/work
pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1693930252784/work
PyYAML==5.4.1
pyzmq @ file:///home/conda/feedstock_root/build_artifacts/pyzmq_1631793305981/work
qtconsole @ file:///home/conda/feedstock_root/build_artifacts/qtconsole-base_1640876679830/work
QtPy @ file:///home/conda/feedstock_root/build_artifacts/qtpy_1643828301492/work
requests==2.27.1
scipy @ file:///home/conda/feedstock_root/build_artifacts/scipy_1629411471490/work
Send2Trash @ file:///home/conda/feedstock_root/build_artifacts/send2trash_1682601222253/work
six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work
sortedcontainers @ file:///home/conda/feedstock_root/build_artifacts/sortedcontainers_1621217038088/work
tblib @ file:///home/conda/feedstock_root/build_artifacts/tblib_1616261298899/work
terminado @ file:///home/conda/feedstock_root/build_artifacts/terminado_1631128154882/work
testpath @ file:///home/conda/feedstock_root/build_artifacts/testpath_1645693042223/work
toml @ file:///home/conda/feedstock_root/build_artifacts/toml_1604308577558/work
tomli==1.2.3
toolz @ file:///home/conda/feedstock_root/build_artifacts/toolz_1657485559105/work
tornado @ file:///home/conda/feedstock_root/build_artifacts/tornado_1610094701020/work
traitlets @ file:///home/conda/feedstock_root/build_artifacts/traitlets_1631041982274/work
typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/typing_extensions_1644850595256/work
urllib3==1.26.20
wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1699959196938/work
webencodings @ file:///home/conda/feedstock_root/build_artifacts/webencodings_1694681268211/work
widgetsnbextension @ file:///home/conda/feedstock_root/build_artifacts/widgetsnbextension_1655939017940/work
xarray @ file:///home/conda/feedstock_root/build_artifacts/xarray_1621474818012/work
zict==2.0.0
zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1633302054558/work
| name: aospy
channels:
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=2_gnu
- alsa-lib=1.2.7.2=h166bdaf_0
- argon2-cffi=21.1.0=py36h8f6f2f9_0
- async_generator=1.10=pyhd8ed1ab_1
- attrs=22.2.0=pyh71513ae_0
- backcall=0.2.0=pyh9f0ad1d_0
- backports=1.0=pyhd8ed1ab_4
- backports.functools_lru_cache=2.0.0=pyhd8ed1ab_0
- bleach=6.1.0=pyhd8ed1ab_0
- bokeh=2.3.3=py36h5fab9bb_0
- bzip2=1.0.8=h4bc722e_7
- c-ares=1.34.4=hb9d3cd8_0
- ca-certificates=2025.1.31=hbcca054_0
- certifi=2021.5.30=py36h5fab9bb_0
- cffi=1.14.6=py36hd8eec40_1
- cftime=1.5.1=py36he33b4a0_0
- click=7.1.2=pyh9f0ad1d_0
- cloudpickle=2.2.1=pyhd8ed1ab_0
- contextvars=2.4=py_0
- curl=7.87.0=h6312ad2_0
- cycler=0.11.0=pyhd8ed1ab_0
- cytoolz=0.11.0=py36h8f6f2f9_3
- dask=2021.3.0=pyhd8ed1ab_0
- dask-core=2021.3.0=pyhd8ed1ab_0
- dbus=1.13.6=h5008d03_3
- decorator=5.1.1=pyhd8ed1ab_0
- defusedxml=0.7.1=pyhd8ed1ab_0
- distributed=2021.3.0=py36h5fab9bb_0
- entrypoints=0.4=pyhd8ed1ab_0
- expat=2.6.4=h5888daf_0
- flake8=5.0.4=pyhd8ed1ab_0
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=h77eed37_3
- fontconfig=2.14.2=h14ed4e7_0
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- freetype=2.12.1=h267a509_2
- fsspec=2023.1.0=pyhd8ed1ab_0
- future=0.18.2=py36h5fab9bb_3
- gettext=0.23.1=h5888daf_0
- gettext-tools=0.23.1=h5888daf_0
- glib=2.80.2=hf974151_0
- glib-tools=2.80.2=hb6ce0ca_0
- gst-plugins-base=1.20.3=h57caac4_2
- gstreamer=1.20.3=hd4edc92_2
- hdf4=4.2.15=h9772cbc_5
- hdf5=1.12.1=nompi_h2386368_104
- heapdict=1.0.1=py_0
- icu=69.1=h9c3ff4c_0
- immutables=0.16=py36h8f6f2f9_0
- importlib-metadata=4.8.1=py36h5fab9bb_0
- importlib_metadata=4.8.1=hd8ed1ab_1
- iniconfig=1.1.1=pyh9f0ad1d_0
- ipykernel=5.5.5=py36hcb3619a_0
- ipython=7.16.1=py36he448a4c_2
- ipython_genutils=0.2.0=pyhd8ed1ab_1
- ipywidgets=7.7.4=pyhd8ed1ab_0
- jedi=0.17.2=py36h5fab9bb_1
- jinja2=3.0.3=pyhd8ed1ab_0
- jpeg=9e=h0b41bf4_3
- jsonschema=4.1.2=pyhd8ed1ab_0
- jupyter=1.0.0=pyhd8ed1ab_10
- jupyter_client=7.1.2=pyhd8ed1ab_0
- jupyter_console=6.5.1=pyhd8ed1ab_0
- jupyter_core=4.8.1=py36h5fab9bb_0
- jupyterlab_pygments=0.1.2=pyh9f0ad1d_0
- jupyterlab_widgets=1.1.1=pyhd8ed1ab_0
- keyutils=1.6.1=h166bdaf_0
- kiwisolver=1.3.1=py36h605e78d_1
- krb5=1.20.1=hf9c8cef_0
- lcms2=2.12=hddcbb42_0
- ld_impl_linux-64=2.43=h712a8e2_4
- lerc=3.0=h9c3ff4c_0
- libasprintf=0.23.1=h8e693c7_0
- libasprintf-devel=0.23.1=h8e693c7_0
- libblas=3.9.0=20_linux64_openblas
- libcblas=3.9.0=20_linux64_openblas
- libclang=13.0.1=default_hb5137d0_10
- libcurl=7.87.0=h6312ad2_0
- libdeflate=1.10=h7f98852_0
- libedit=3.1.20250104=pl5321h7949ede_0
- libev=4.33=hd590300_2
- libevent=2.1.10=h9b69904_4
- libexpat=2.6.4=h5888daf_0
- libffi=3.4.6=h2dba641_0
- libgcc=14.2.0=h767d61c_2
- libgcc-ng=14.2.0=h69a702a_2
- libgettextpo=0.23.1=h5888daf_0
- libgettextpo-devel=0.23.1=h5888daf_0
- libgfortran=14.2.0=h69a702a_2
- libgfortran-ng=14.2.0=h69a702a_2
- libgfortran5=14.2.0=hf1ad2bd_2
- libglib=2.80.2=hf974151_0
- libgomp=14.2.0=h767d61c_2
- libiconv=1.18=h4ce23a2_1
- liblapack=3.9.0=20_linux64_openblas
- libllvm13=13.0.1=hf817b99_2
- liblzma=5.6.4=hb9d3cd8_0
- liblzma-devel=5.6.4=hb9d3cd8_0
- libnetcdf=4.8.1=nompi_h329d8a1_102
- libnghttp2=1.51.0=hdcd2b5c_0
- libnsl=2.0.1=hd590300_0
- libogg=1.3.5=h4ab18f5_0
- libopenblas=0.3.25=pthreads_h413a1c8_0
- libopus=1.3.1=h7f98852_1
- libpng=1.6.43=h2797004_0
- libpq=14.5=h2baec63_5
- libsodium=1.0.18=h36c2ea0_1
- libsqlite=3.46.0=hde9e2c9_0
- libssh2=1.10.0=haa6b8db_3
- libstdcxx=14.2.0=h8f9b012_2
- libstdcxx-ng=14.2.0=h4852527_2
- libtiff=4.3.0=h0fcbabc_4
- libuuid=2.38.1=h0b41bf4_0
- libvorbis=1.3.7=h9c3ff4c_0
- libwebp-base=1.5.0=h851e524_0
- libxcb=1.13=h7f98852_1004
- libxkbcommon=1.0.3=he3ba5ed_0
- libxml2=2.9.14=haae042b_4
- libzip=1.9.2=hc869a4a_1
- libzlib=1.2.13=h4ab18f5_6
- locket=1.0.0=pyhd8ed1ab_0
- markupsafe=2.0.1=py36h8f6f2f9_0
- matplotlib=3.3.4=py36h5fab9bb_0
- matplotlib-base=3.3.4=py36hd391965_0
- mccabe=0.7.0=pyhd8ed1ab_0
- mistune=0.8.4=pyh1a96a4e_1006
- more-itertools=10.0.0=pyhd8ed1ab_0
- msgpack-python=1.0.2=py36h605e78d_1
- mysql-common=8.0.32=h14678bc_0
- mysql-libs=8.0.32=h54cf53e_0
- nbclient=0.5.9=pyhd8ed1ab_0
- nbconvert=6.0.7=py36h5fab9bb_3
- nbformat=5.1.3=pyhd8ed1ab_0
- ncurses=6.5=h2d0b736_3
- nest-asyncio=1.6.0=pyhd8ed1ab_0
- netcdf4=1.5.7=nompi_py36h775750b_103
- notebook=6.3.0=py36h5fab9bb_0
- nspr=4.36=h5888daf_0
- nss=3.100=hca3bf56_0
- numpy=1.19.5=py36hfc0c790_2
- olefile=0.46=pyh9f0ad1d_1
- openjpeg=2.5.0=h7d73246_0
- openssl=1.1.1w=hd590300_0
- packaging=21.3=pyhd8ed1ab_0
- pandas=1.1.5=py36h284efc9_0
- pandoc=2.19.2=h32600fe_2
- pandocfilters=1.5.0=pyhd8ed1ab_0
- parso=0.7.1=pyh9f0ad1d_0
- partd=1.2.0=pyhd8ed1ab_0
- pcre2=10.43=hcad00b1_0
- pexpect=4.8.0=pyh1a96a4e_2
- pickleshare=0.7.5=py_1003
- pillow=8.3.2=py36h676a545_0
- pip=21.3.1=pyhd8ed1ab_0
- pluggy=1.0.0=py36h5fab9bb_1
- prometheus_client=0.17.1=pyhd8ed1ab_0
- prompt-toolkit=3.0.36=pyha770c72_0
- prompt_toolkit=3.0.36=hd8ed1ab_0
- psutil=5.8.0=py36h8f6f2f9_1
- pthread-stubs=0.4=hb9d3cd8_1002
- ptyprocess=0.7.0=pyhd3deb0d_0
- py=1.11.0=pyh6c4a22f_0
- pycodestyle=2.9.1=pyhd8ed1ab_0
- pycparser=2.21=pyhd8ed1ab_0
- pyflakes=2.5.0=pyhd8ed1ab_0
- pygments=2.14.0=pyhd8ed1ab_0
- pyparsing=3.1.4=pyhd8ed1ab_0
- pyqt=5.12.3=py36h5fab9bb_7
- pyqt-impl=5.12.3=py36h7ec31b9_7
- pyqt5-sip=4.19.18=py36hc4f0c31_7
- pyqtchart=5.12=py36h7ec31b9_7
- pyqtwebengine=5.12.1=py36h7ec31b9_7
- pyrsistent=0.17.3=py36h8f6f2f9_2
- pytest=6.2.5=py36h5fab9bb_0
- python=3.6.15=hb7a2778_0_cpython
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.6=2_cp36m
- pytz=2023.3.post1=pyhd8ed1ab_0
- pyyaml=5.4.1=py36h8f6f2f9_1
- pyzmq=22.3.0=py36h7068817_0
- qt=5.12.9=h1304e3e_6
- qtconsole-base=5.2.2=pyhd8ed1ab_1
- qtpy=2.0.1=pyhd8ed1ab_0
- readline=8.2=h8c095d6_2
- scipy=1.5.3=py36h81d768a_1
- send2trash=1.8.2=pyh41d4057_0
- setuptools=58.0.4=py36h5fab9bb_2
- six=1.16.0=pyh6c4a22f_0
- sortedcontainers=2.4.0=pyhd8ed1ab_0
- sqlite=3.46.0=h6d4b2fc_0
- tblib=1.7.0=pyhd8ed1ab_0
- terminado=0.12.1=py36h5fab9bb_0
- testpath=0.6.0=pyhd8ed1ab_0
- tk=8.6.13=noxft_h4845f30_101
- toml=0.10.2=pyhd8ed1ab_0
- toolz=0.12.0=pyhd8ed1ab_0
- tornado=6.1=py36h8f6f2f9_1
- traitlets=4.3.3=pyhd8ed1ab_2
- typing-extensions=4.1.1=hd8ed1ab_0
- typing_extensions=4.1.1=pyha770c72_0
- wcwidth=0.2.10=pyhd8ed1ab_0
- webencodings=0.5.1=pyhd8ed1ab_2
- wheel=0.37.1=pyhd8ed1ab_0
- widgetsnbextension=3.6.1=pyha770c72_0
- xarray=0.18.2=pyhd8ed1ab_0
- xorg-libxau=1.0.12=hb9d3cd8_0
- xorg-libxdmcp=1.1.5=hb9d3cd8_0
- xz=5.6.4=hbcc6ac9_0
- xz-gpl-tools=5.6.4=hbcc6ac9_0
- xz-tools=5.6.4=hb9d3cd8_0
- yaml=0.2.5=h7f98852_2
- zeromq=4.3.5=h59595ed_1
- zict=2.0.0=py_0
- zipp=3.6.0=pyhd8ed1ab_0
- zlib=1.2.13=h4ab18f5_6
- zstd=1.5.6=ha6fb4c9_0
- pip:
- charset-normalizer==2.0.12
- coverage==6.2
- coveralls==3.3.1
- docopt==0.6.2
- idna==3.10
- pytest-catchlog==1.2.2
- pytest-cov==4.0.0
- requests==2.27.1
- tomli==1.2.3
- urllib3==1.26.20
prefix: /opt/conda/envs/aospy
| [
"aospy/test/test_automate.py::test_submit_mult_calcs_no_calcs"
]
| [
"aospy/test/test_automate.py::test_submit_mult_calcs_external_client[exec_options0]",
"aospy/test/test_automate.py::test_submit_mult_calcs_external_client[exec_options1]",
"aospy/test/test_automate.py::test_submit_mult_calcs[exec_options0]",
"aospy/test/test_automate.py::test_submit_mult_calcs[exec_options1]",
"aospy/test/test_automate.py::test_submit_mult_calcs[exec_options2]",
"aospy/test/test_automate.py::test_submit_mult_calcs[exec_options3]",
"aospy/test/test_automate.py::test_submit_mult_calcs[None]",
"aospy/test/test_automate.py::test_submit_two_calcs_external_client[exec_options0]",
"aospy/test/test_automate.py::test_submit_two_calcs_external_client[exec_options1]",
"aospy/test/test_automate.py::test_submit_two_calcs[exec_options0]",
"aospy/test/test_automate.py::test_submit_two_calcs[exec_options1]",
"aospy/test/test_automate.py::test_submit_two_calcs[exec_options2]",
"aospy/test/test_automate.py::test_submit_two_calcs[exec_options3]",
"aospy/test/test_automate.py::test_submit_two_calcs[None]",
"aospy/test/test_automate.py::test_n_workers_for_local_cluster"
]
| [
"aospy/test/test_automate.py::test_get_attr_by_tag[obj0-all-models-expected0]",
"aospy/test/test_automate.py::test_get_attr_by_tag[obj1-default-models-expected1]",
"aospy/test/test_automate.py::test_get_attr_by_tag[obj2-all-runs-expected2]",
"aospy/test/test_automate.py::test_get_attr_by_tag[obj3-default-runs-expected3]",
"aospy/test/test_automate.py::test_get_attr_by_tag_invalid",
"aospy/test/test_automate.py::test_permuted_dict_of_specs",
"aospy/test/test_automate.py::test_merge_dicts",
"aospy/test/test_automate.py::test_input_func_py2_py3",
"aospy/test/test_automate.py::test_user_verify",
"aospy/test/test_automate.py::test_get_all_objs_of_type[Var-expected0]",
"aospy/test/test_automate.py::test_get_all_objs_of_type[Proj-expected1]",
"aospy/test/test_automate.py::TestCalcSuite::test_init",
"aospy/test/test_automate.py::TestCalcSuite::test_permute_core_specs",
"aospy/test/test_automate.py::TestCalcSuite::test_get_regions",
"aospy/test/test_automate.py::TestCalcSuite::test_get_variables",
"aospy/test/test_automate.py::TestCalcSuite::test_get_aux_specs",
"aospy/test/test_automate.py::TestCalcSuite::test_permute_aux_specs",
"aospy/test/test_automate.py::test_prune_invalid_time_reductions[var0]",
"aospy/test/test_automate.py::test_prune_invalid_time_reductions[var1]"
]
| []
| Apache License 2.0 | 2,320 | [
"ci/environment-py34.yml",
"ci/install_python.ps1",
"docs/whats-new.rst",
"setup.py",
".stickler.yml",
".travis.yml",
"docs/install.rst",
"aospy/automate.py",
"appveyor.yml"
]
| [
"ci/environment-py34.yml",
"ci/install_python.ps1",
"docs/whats-new.rst",
"setup.py",
".stickler.yml",
".travis.yml",
"docs/install.rst",
"aospy/automate.py",
"appveyor.yml"
]
|
marshmallow-code__marshmallow-756 | 08fba280e37f19ae24a82a93ec6868b6704e2f64 | 2018-03-21 22:45:33 | 8e217c8d6fefb7049ab3389f31a8d35824fa2d96 | lafrech: TODO: update docs.
sloria: Behavior looks good. This should be good to merge after docs are updated. | diff --git a/docs/quickstart.rst b/docs/quickstart.rst
index db101bd1..1ee80e53 100644
--- a/docs/quickstart.rst
+++ b/docs/quickstart.rst
@@ -481,6 +481,25 @@ In the context of a web API, the ``dump_only`` and ``load_only`` parameters are
created_at = fields.DateTime(dump_only=True)
+Specify default Serialization/Deserialization values
+----------------------------------------------------
+
+Default values can be provided to a :class:`Field <fields.Field>` for both serialization and deserialization.
+
+`missing` is used for deserialization if the field is not found in the input data. Likewise, `default` is used for serialization if the input value is missing.
+
+.. code-block:: python
+
+ class UserSchema(Schema):
+ id = fields.UUID(missing=uuid.uuid1)
+ birthdate = fields.DateTime(default=dt.datetime(2017, 9, 29))
+
+ UserSchema().load({})
+ # {'id': UUID('337d946c-32cd-11e8-b475-0022192ed31b')}
+ UserSchema().dump({})
+ # {'birthdate': '2017-09-29T00:00:00+00:00'}
+
+
Next Steps
----------
diff --git a/docs/upgrading.rst b/docs/upgrading.rst
index 9683f2a8..1c439c80 100644
--- a/docs/upgrading.rst
+++ b/docs/upgrading.rst
@@ -393,6 +393,23 @@ The ``json_module`` class Meta option is deprecated in favor of ``render_module`
class Meta:
render_module = ujson
+
+``missing`` and ``default`` ``Field`` parameters are passed in deserialized form
+********************************************************************************
+
+.. code-block:: python
+
+ # 2.x
+ class UserSchema(Schema):
+ id = fields.UUID(missing=lambda: str(uuid.uuid1()))
+ birthdate = fields.DateTime(default=lambda: dt.datetime(2017, 9, 29).isoformat())
+
+ # 3.x
+ class UserSchema(Schema):
+ id = fields.UUID(missing=uuid.uuid1)
+ birthdate = fields.DateTime(default=dt.datetime(2017, 9, 29))
+
+
Pass ``default`` as a keyword argument
**************************************
diff --git a/marshmallow/fields.py b/marshmallow/fields.py
index 832ae1a8..946865e1 100755
--- a/marshmallow/fields.py
+++ b/marshmallow/fields.py
@@ -105,7 +105,7 @@ class Field(FieldABC):
inputs are excluded from serialized output.
"""
# Some fields, such as Method fields and Function fields, are not expected
- # to exists as attributes on the objects to serialize. Set this to False
+ # to exist as attributes on the objects to serialize. Set this to False
# for those fields
_CHECK_ATTRIBUTE = True
_creation_index = 0 # Used for sorting
@@ -237,12 +237,11 @@ class Field(FieldABC):
"""
if self._CHECK_ATTRIBUTE:
value = self.get_value(obj, attr, accessor=accessor)
+ if value is missing_ and hasattr(self, 'default'):
+ default = self.default
+ value = default() if callable(default) else default
if value is missing_:
- if hasattr(self, 'default'):
- if callable(self.default):
- return self.default()
- else:
- return self.default
+ return value
else:
value = None
return self._serialize(value, attr, obj)
@@ -256,6 +255,9 @@ class Field(FieldABC):
# Validate required fields, deserialize, then validate
# deserialized value
self._validate_missing(value)
+ if value is missing_:
+ _miss = self.missing
+ return _miss() if callable(_miss) else _miss
if getattr(self, 'allow_none', False) is True and value is None:
return None
output = self._deserialize(value, attr, data)
diff --git a/marshmallow/marshalling.py b/marshmallow/marshalling.py
index 1f3b2a32..162c722e 100644
--- a/marshmallow/marshalling.py
+++ b/marshmallow/marshalling.py
@@ -276,11 +276,6 @@ class Unmarshaller(ErrorStore):
(partial_is_collection and attr_name in partial)
):
continue
- _miss = field_obj.missing
- raw_value = _miss() if callable(_miss) else _miss
- if raw_value is missing and not field_obj.required:
- continue
-
getter = lambda val: field_obj.deserialize(val, field_name, data)
value = self.call_and_store(
getter_func=getter,
| missing and default values should be specified in deserialized form
Marshmallow expects `missing` and `default` values to be specified in a pre-serialized form, which is inconvenient and unintuitive.
Here is a schema which I would expect to work:
``` python
import datetime
import uuid
class TestSchema(Schema):
id = fields.UUID(
missing=uuid.uuid1)
ts = fields.DateTime(
format='rfc',
missing=datetime.datetime.now)
```
However, it does not:
``` python
schema = TestSchema()
pprint.pprint(tuple(schema.load({})))
({}, {'id': [u'Not a valid UUID.'], 'ts': [u'Not a valid datetime.']})
```
Instead, I have to convert my `default` and `missing` values to primitive types, which undermines the purpose of a library like marshmallow that already knows how to do this. For data types like datetimes, which have many possible serialized formats, I have to keep the formatting of my `default` and `missing` values in sync with my field properties, which is fragile and convoluted:
``` python
from email.Utils import formatdate
class TestSchema(Schema):
id = fields.UUID(
missing=lambda: str(uuid.uuid1()))
ts = fields.DateTime(
format='rfc',
missing=formatdate)
```
In the case of `default` values, marshmallow does not attempt to validate them, so no error is raised, but the resulting document is not deserializable.
| marshmallow-code/marshmallow | diff --git a/tests/test_deserialization.py b/tests/test_deserialization.py
index 881c6098..b3a6bfd4 100644
--- a/tests/test_deserialization.py
+++ b/tests/test_deserialization.py
@@ -1105,26 +1105,30 @@ class TestSchemaDeserialization:
assert 'nicknames' not in result
def test_deserialize_with_missing_param_value(self):
+ bdate = dt.datetime(2017, 9, 29)
+
class AliasingUserSerializer(Schema):
name = fields.String()
- years = fields.Integer(missing=10)
+ birthdate = fields.DateTime(missing=bdate)
data = {
'name': 'Mick',
}
result = AliasingUserSerializer().load(data)
assert result['name'] == 'Mick'
- assert result['years'] == 10
+ assert result['birthdate'] == bdate
def test_deserialize_with_missing_param_callable(self):
+ bdate = dt.datetime(2017, 9, 29)
+
class AliasingUserSerializer(Schema):
name = fields.String()
- years = fields.Integer(missing=lambda: 13 + 7)
+ birthdate = fields.DateTime(missing=lambda: bdate)
data = {
'name': 'Mick',
}
result = AliasingUserSerializer().load(data)
assert result['name'] == 'Mick'
- assert result['years'] == 20
+ assert result['birthdate'] == bdate
def test_deserialize_with_missing_param_none(self):
class AliasingUserSerializer(Schema):
diff --git a/tests/test_serialization.py b/tests/test_serialization.py
index 24826eb5..e8e6c360 100644
--- a/tests/test_serialization.py
+++ b/tests/test_serialization.py
@@ -29,10 +29,6 @@ class TestFieldSerialization:
def user(self):
return User("Foo", email="[email protected]", age=42)
- def test_default(self, user):
- field = fields.Field(default='nan')
- assert field.serialize('age', {}) == 'nan'
-
@pytest.mark.parametrize(('value', 'expected'),
[
(42, float(42)),
@@ -54,10 +50,6 @@ class TestFieldSerialization:
field = fields.Number(as_string=True, allow_none=True)
assert field.serialize('age', user) is None
- def test_callable_default(self, user):
- field = fields.Field(default=lambda: 'nan')
- assert field.serialize('age', {}) == 'nan'
-
def test_function_field_passed_func(self, user):
field = fields.Function(lambda obj: obj.name.upper())
assert "FOO" == field.serialize("key", user)
@@ -815,6 +807,29 @@ class TestFieldSerialization:
else:
assert res is None
+class TestSchemaSerialization:
+
+ def test_serialize_with_missing_param_value(self):
+ class AliasingUserSerializer(Schema):
+ name = fields.String()
+ birthdate = fields.DateTime(default=dt.datetime(2017, 9, 29))
+ data = {
+ 'name': 'Mick',
+ }
+ result = AliasingUserSerializer().dump(data)
+ assert result['name'] == 'Mick'
+ assert result['birthdate'] == '2017-09-29T00:00:00+00:00'
+
+ def test_serialize_with_missing_param_callable(self):
+ class AliasingUserSerializer(Schema):
+ name = fields.String()
+ birthdate = fields.DateTime(default=lambda: dt.datetime(2017, 9, 29))
+ data = {
+ 'name': 'Mick',
+ }
+ result = AliasingUserSerializer().dump(data)
+ assert result['name'] == 'Mick'
+ assert result['birthdate'] == '2017-09-29T00:00:00+00:00'
def test_serializing_named_tuple():
Point = namedtuple('Point', ['x', 'y'])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 4
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[reco]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": [],
"python": "3.9",
"reqs_path": [
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
flake8==3.5.0
iniconfig==2.1.0
invoke==0.22.0
-e git+https://github.com/marshmallow-code/marshmallow.git@08fba280e37f19ae24a82a93ec6868b6704e2f64#egg=marshmallow
mccabe==0.6.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
py==1.11.0
pycodestyle==2.3.1
pyflakes==1.6.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.6.1
pytz==2017.3
simplejson==3.13.2
six==1.17.0
toml==0.10.2
tomli==2.2.1
tox==3.12.1
typing_extensions==4.13.0
virtualenv==20.29.3
| name: marshmallow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- flake8==3.5.0
- iniconfig==2.1.0
- invoke==0.22.0
- mccabe==0.6.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- py==1.11.0
- pycodestyle==2.3.1
- pyflakes==1.6.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.6.1
- pytz==2017.3
- simplejson==3.13.2
- six==1.17.0
- toml==0.10.2
- tomli==2.2.1
- tox==3.12.1
- typing-extensions==4.13.0
- virtualenv==20.29.3
prefix: /opt/conda/envs/marshmallow
| [
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_param_value",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_param_callable",
"tests/test_serialization.py::TestSchemaSerialization::test_serialize_with_missing_param_value",
"tests/test_serialization.py::TestSchemaSerialization::test_serialize_with_missing_param_callable"
]
| []
| [
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[String]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Integer]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Boolean]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Float]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Number]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[DateTime]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[LocalDateTime]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Time]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Date]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[TimeDelta]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Dict]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Url]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Email]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[FormattedString]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[UUID]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Decimal]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[String]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Integer]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Boolean]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Float]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Number]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[DateTime]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[LocalDateTime]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Time]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Date]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[TimeDelta]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Dict]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Url]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Email]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[FormattedString]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[UUID]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Decimal]",
"tests/test_deserialization.py::TestDeserializingNone::test_allow_none_is_true_if_missing_is_true",
"tests/test_deserialization.py::TestDeserializingNone::test_list_field_deserialize_none_to_empty_list",
"tests/test_deserialization.py::TestFieldDeserialization::test_float_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_float_field_deserialization[bad]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_float_field_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_float_field_deserialization[in_val2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_integer_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_strict_integer_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_with_places",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_with_places_and_rounding",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_deserialization_string",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_special_values",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_special_values_not_permitted",
"tests/test_deserialization.py::TestFieldDeserialization::test_string_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_truthy_values",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_truthy_values_invalid[notvalid]",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_truthy_values_invalid[123]",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_empty_truthy",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_falsy_values",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[not-a-datetime]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[42]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[in_value3]",
"tests/test_deserialization.py::TestFieldDeserialization::test_datetime_passed_year_is_invalid",
"tests/test_deserialization.py::TestFieldDeserialization::test_datetime_passed_date_is_invalid",
"tests/test_deserialization.py::TestFieldDeserialization::test_custom_date_format_datetime_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_rfc_datetime_field_deserialization[rfc]",
"tests/test_deserialization.py::TestFieldDeserialization::test_rfc_datetime_field_deserialization[rfc822]",
"tests/test_deserialization.py::TestFieldDeserialization::test_iso_datetime_field_deserialization[iso]",
"tests/test_deserialization.py::TestFieldDeserialization::test_iso_datetime_field_deserialization[iso8601]",
"tests/test_deserialization.py::TestFieldDeserialization::test_localdatetime_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_time_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[badvalue]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[in_data2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[42]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_precision",
"tests/test_deserialization.py::TestFieldDeserialization::test_timedelta_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[badvalue]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[in_value2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[9999999999]",
"tests/test_deserialization.py::TestFieldDeserialization::test_date_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_date_field_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_date_field_deserialization[123]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_date_field_deserialization[in_value2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_dict_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_structured_dict_value_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_structured_dict_key_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_structured_dict_key_value_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_url_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_relative_url_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_url_field_schemes_argument",
"tests/test_deserialization.py::TestFieldDeserialization::test_email_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_function_field_deserialization_is_noop_by_default",
"tests/test_deserialization.py::TestFieldDeserialization::test_function_field_deserialization_with_callable",
"tests/test_deserialization.py::TestFieldDeserialization::test_function_field_deserialization_with_context",
"tests/test_deserialization.py::TestFieldDeserialization::test_function_field_passed_deserialize_only_is_load_only",
"tests/test_deserialization.py::TestFieldDeserialization::test_function_field_passed_deserialize_and_serialize_is_not_load_only",
"tests/test_deserialization.py::TestFieldDeserialization::test_uuid_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[malformed]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[123]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[in_value2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_deserialization_function_must_be_callable",
"tests/test_deserialization.py::TestFieldDeserialization::test_method_field_deserialization_is_noop_by_default",
"tests/test_deserialization.py::TestFieldDeserialization::test_deserialization_method",
"tests/test_deserialization.py::TestFieldDeserialization::test_deserialization_method_must_be_a_method",
"tests/test_deserialization.py::TestFieldDeserialization::test_method_field_deserialize_only",
"tests/test_deserialization.py::TestFieldDeserialization::test_datetime_list_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_invalid_item",
"tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_multiple_invalid_items",
"tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_value_that_is_not_a_list[notalist]",
"tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_value_that_is_not_a_list[42]",
"tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_value_that_is_not_a_list[value2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_constant_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_constant_is_always_included_in_deserialized_data",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validator_function",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validator_class_that_returns_bool",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validator_that_raises_error_with_list",
"tests/test_deserialization.py::TestFieldDeserialization::test_validator_must_return_false_to_raise_error",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_validator_with_nonascii_input",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validators",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_custom_error_message",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_non_utf8_value",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_to_dict",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_values",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_many",
"tests/test_deserialization.py::TestSchemaDeserialization::test_exclude",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_single_deserialization_to_dict",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_list_deserialization_to_dict",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_single_none_not_allowed",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_many_non_not_allowed",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_single_required_missing",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_many_required_missing",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_only_basestring",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_only_basestring_with_list_data",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_none_deserialization",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param_symmetry",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param_error_returns_field_name_not_attribute_name",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param_error_returns_data_key_not_attribute_name",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_data_key_param",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_dump_only_param",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_param_none",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialization_raises_with_errors",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialization_raises_with_errors_with_multiple_validators",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialization_many_raises_errors",
"tests/test_deserialization.py::TestSchemaDeserialization::test_validation_errors_are_stored",
"tests/test_deserialization.py::TestSchemaDeserialization::test_multiple_errors_can_be_stored_for_a_field",
"tests/test_deserialization.py::TestSchemaDeserialization::test_multiple_errors_can_be_stored_for_an_email_field",
"tests/test_deserialization.py::TestSchemaDeserialization::test_multiple_errors_can_be_stored_for_a_url_field",
"tests/test_deserialization.py::TestSchemaDeserialization::test_required_value_only_passed_to_validators_if_provided",
"tests/test_deserialization.py::TestSchemaDeserialization::test_partial_deserialization[True]",
"tests/test_deserialization.py::TestSchemaDeserialization::test_partial_deserialization[False]",
"tests/test_deserialization.py::TestSchemaDeserialization::test_partial_fields_deserialization",
"tests/test_deserialization.py::TestSchemaDeserialization::test_partial_fields_validation",
"tests/test_deserialization.py::TestValidation::test_integer_with_validator",
"tests/test_deserialization.py::TestValidation::test_integer_with_validators[field0]",
"tests/test_deserialization.py::TestValidation::test_integer_with_validators[field1]",
"tests/test_deserialization.py::TestValidation::test_integer_with_validators[field2]",
"tests/test_deserialization.py::TestValidation::test_float_with_validators[field0]",
"tests/test_deserialization.py::TestValidation::test_float_with_validators[field1]",
"tests/test_deserialization.py::TestValidation::test_float_with_validators[field2]",
"tests/test_deserialization.py::TestValidation::test_string_validator",
"tests/test_deserialization.py::TestValidation::test_function_validator",
"tests/test_deserialization.py::TestValidation::test_function_validators[field0]",
"tests/test_deserialization.py::TestValidation::test_function_validators[field1]",
"tests/test_deserialization.py::TestValidation::test_function_validators[field2]",
"tests/test_deserialization.py::TestValidation::test_method_validator",
"tests/test_deserialization.py::TestValidation::test_nested_data_is_stored_when_validation_fails",
"tests/test_deserialization.py::TestValidation::test_false_value_validation",
"tests/test_deserialization.py::test_required_field_failure[String]",
"tests/test_deserialization.py::test_required_field_failure[Integer]",
"tests/test_deserialization.py::test_required_field_failure[Boolean]",
"tests/test_deserialization.py::test_required_field_failure[Float]",
"tests/test_deserialization.py::test_required_field_failure[Number]",
"tests/test_deserialization.py::test_required_field_failure[DateTime]",
"tests/test_deserialization.py::test_required_field_failure[LocalDateTime]",
"tests/test_deserialization.py::test_required_field_failure[Time]",
"tests/test_deserialization.py::test_required_field_failure[Date]",
"tests/test_deserialization.py::test_required_field_failure[TimeDelta]",
"tests/test_deserialization.py::test_required_field_failure[Dict]",
"tests/test_deserialization.py::test_required_field_failure[Url]",
"tests/test_deserialization.py::test_required_field_failure[Email]",
"tests/test_deserialization.py::test_required_field_failure[UUID]",
"tests/test_deserialization.py::test_required_field_failure[Decimal]",
"tests/test_deserialization.py::test_required_message_can_be_changed[My",
"tests/test_deserialization.py::test_required_message_can_be_changed[message1]",
"tests/test_deserialization.py::test_required_message_can_be_changed[message2]",
"tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[True]",
"tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[False]",
"tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[42]",
"tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[None]",
"tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[data4]",
"tests/test_serialization.py::TestFieldSerialization::test_number[42-42.0]",
"tests/test_serialization.py::TestFieldSerialization::test_number[0-0.0]",
"tests/test_serialization.py::TestFieldSerialization::test_number[None-None]",
"tests/test_serialization.py::TestFieldSerialization::test_number_as_string",
"tests/test_serialization.py::TestFieldSerialization::test_number_as_string_passed_none",
"tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_func",
"tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_serialize_only_is_dump_only",
"tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_deserialize_and_serialize_is_not_dump_only",
"tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_serialize",
"tests/test_serialization.py::TestFieldSerialization::test_function_field_does_not_swallow_attribute_error",
"tests/test_serialization.py::TestFieldSerialization::test_function_field_load_only",
"tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_serialize_with_context",
"tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_uncallable_object",
"tests/test_serialization.py::TestFieldSerialization::test_integer_field",
"tests/test_serialization.py::TestFieldSerialization::test_integer_as_string_field",
"tests/test_serialization.py::TestFieldSerialization::test_integer_field_default",
"tests/test_serialization.py::TestFieldSerialization::test_integer_field_default_set_to_none",
"tests/test_serialization.py::TestFieldSerialization::test_uuid_field",
"tests/test_serialization.py::TestFieldSerialization::test_decimal_field",
"tests/test_serialization.py::TestFieldSerialization::test_decimal_field_string",
"tests/test_serialization.py::TestFieldSerialization::test_decimal_field_special_values",
"tests/test_serialization.py::TestFieldSerialization::test_decimal_field_special_values_not_permitted",
"tests/test_serialization.py::TestFieldSerialization::test_decimal_field_fixed_point_representation",
"tests/test_serialization.py::TestFieldSerialization::test_boolean_field_serialization",
"tests/test_serialization.py::TestFieldSerialization::test_function_with_uncallable_param",
"tests/test_serialization.py::TestFieldSerialization::test_email_field_serialize_none",
"tests/test_serialization.py::TestFieldSerialization::test_dict_field_serialize_none",
"tests/test_serialization.py::TestFieldSerialization::test_dict_field_invalid_dict_but_okay",
"tests/test_serialization.py::TestFieldSerialization::test_dict_field_serialize",
"tests/test_serialization.py::TestFieldSerialization::test_dict_field_serialize_ordereddict",
"tests/test_serialization.py::TestFieldSerialization::test_structured_dict_value_serialize",
"tests/test_serialization.py::TestFieldSerialization::test_structured_dict_key_serialize",
"tests/test_serialization.py::TestFieldSerialization::test_structured_dict_key_value_serialize",
"tests/test_serialization.py::TestFieldSerialization::test_structured_dict_validates",
"tests/test_serialization.py::TestFieldSerialization::test_url_field_serialize_none",
"tests/test_serialization.py::TestFieldSerialization::test_method_field_with_method_missing",
"tests/test_serialization.py::TestFieldSerialization::test_method_field_passed_serialize_only_is_dump_only",
"tests/test_serialization.py::TestFieldSerialization::test_method_field_passed_deserialize_only_is_load_only",
"tests/test_serialization.py::TestFieldSerialization::test_method_field_with_uncallable_attribute",
"tests/test_serialization.py::TestFieldSerialization::test_method_field_does_not_swallow_attribute_error",
"tests/test_serialization.py::TestFieldSerialization::test_method_with_no_serialize_is_missing",
"tests/test_serialization.py::TestFieldSerialization::test_serialize_with_data_key_param",
"tests/test_serialization.py::TestFieldSerialization::test_serialize_with_attribute_and_data_key_uses_data_key",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_serializes_to_iso_by_default",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_invalid_serialization[invalid]",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_invalid_serialization[value1]",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_invalid_serialization[24]",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_field_rfc822[rfc]",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_field_rfc822[rfc822]",
"tests/test_serialization.py::TestFieldSerialization::test_localdatetime_rfc_field",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_iso8601[iso]",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_iso8601[iso8601]",
"tests/test_serialization.py::TestFieldSerialization::test_localdatetime_iso",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_format",
"tests/test_serialization.py::TestFieldSerialization::test_string_field",
"tests/test_serialization.py::TestFieldSerialization::test_formattedstring_field",
"tests/test_serialization.py::TestFieldSerialization::test_formattedstring_field_on_schema",
"tests/test_serialization.py::TestFieldSerialization::test_string_field_default_to_empty_string",
"tests/test_serialization.py::TestFieldSerialization::test_time_field",
"tests/test_serialization.py::TestFieldSerialization::test_invalid_time_field_serialization[badvalue]",
"tests/test_serialization.py::TestFieldSerialization::test_invalid_time_field_serialization[]",
"tests/test_serialization.py::TestFieldSerialization::test_invalid_time_field_serialization[in_data2]",
"tests/test_serialization.py::TestFieldSerialization::test_invalid_time_field_serialization[42]",
"tests/test_serialization.py::TestFieldSerialization::test_date_field",
"tests/test_serialization.py::TestFieldSerialization::test_invalid_date_field_serialization[badvalue]",
"tests/test_serialization.py::TestFieldSerialization::test_invalid_date_field_serialization[]",
"tests/test_serialization.py::TestFieldSerialization::test_invalid_date_field_serialization[in_data2]",
"tests/test_serialization.py::TestFieldSerialization::test_invalid_date_field_serialization[42]",
"tests/test_serialization.py::TestFieldSerialization::test_timedelta_field",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_list_field",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_with_error",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_list_serialize_single_value",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_serialize_none_returns_none",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_respect_inner_attribute",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_respect_inner_attribute_single_value",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_generator_single_value",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_generators_multiple_values",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_generators_error",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_generators_empty_generator_returns_none_for_every_non_returning_yield_statement",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_set",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_custom_class_with_iterator_protocol",
"tests/test_serialization.py::TestFieldSerialization::test_bad_list_field",
"tests/test_serialization.py::TestFieldSerialization::test_serialize_does_not_apply_validators",
"tests/test_serialization.py::TestFieldSerialization::test_constant_field_serialization",
"tests/test_serialization.py::TestFieldSerialization::test_constant_is_always_included_in_serialized_data",
"tests/test_serialization.py::TestFieldSerialization::test_constant_field_serialize_when_omitted",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[String]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Integer]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Boolean]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Float]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Number]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[DateTime]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[LocalDateTime]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Time]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Date]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[TimeDelta]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Dict]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Url]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Email]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[FormattedString]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[UUID]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Decimal]",
"tests/test_serialization.py::test_serializing_named_tuple",
"tests/test_serialization.py::test_serializing_named_tuple_with_meta",
"tests/test_serialization.py::test_serializing_slice"
]
| []
| MIT License | 2,321 | [
"docs/quickstart.rst",
"marshmallow/marshalling.py",
"docs/upgrading.rst",
"marshmallow/fields.py"
]
| [
"docs/quickstart.rst",
"marshmallow/marshalling.py",
"docs/upgrading.rst",
"marshmallow/fields.py"
]
|
voxpupuli__puppetboard-457 | 1bf3ee679a2e363b4813d76dda632056894fd2d3 | 2018-03-21 23:27:58 | 9e6aea56c7f222d1af4c5c42bf1d21c766b5dd22 | diff --git a/README.rst b/README.rst
index c0e7921..ac4dee0 100644
--- a/README.rst
+++ b/README.rst
@@ -190,7 +190,8 @@ connect. Therefor you'll also have to supply the following settings:
* ``PUPPETDB_CERT = /path/to/public/keyfile.crt``
For information about how to generate the correct keys please refer to the
-`pypuppetdb documentation`_.
+`pypuppetdb documentation`_. Alternatively is possible to explicitly specify
+the protocol to be used setting the ``PUPPETDB_PROTO`` variable.
Other settings that might be interesting in no particular order:
diff --git a/puppetboard/core.py b/puppetboard/core.py
index 203c373..77f8453 100644
--- a/puppetboard/core.py
+++ b/puppetboard/core.py
@@ -47,7 +47,8 @@ def get_puppetdb():
ssl_verify=app.config['PUPPETDB_SSL_VERIFY'],
ssl_key=app.config['PUPPETDB_KEY'],
ssl_cert=app.config['PUPPETDB_CERT'],
- timeout=app.config['PUPPETDB_TIMEOUT'],)
+ timeout=app.config['PUPPETDB_TIMEOUT'],
+ protocol=app.config['PUPPETDB_PROTO'],)
PUPPETDB = puppetdb
return PUPPETDB
diff --git a/puppetboard/default_settings.py b/puppetboard/default_settings.py
index d33126c..e8f3a40 100644
--- a/puppetboard/default_settings.py
+++ b/puppetboard/default_settings.py
@@ -2,6 +2,7 @@ import os
PUPPETDB_HOST = 'localhost'
PUPPETDB_PORT = 8080
+PUPPETDB_PROTO = None
PUPPETDB_SSL_VERIFY = True
PUPPETDB_KEY = None
PUPPETDB_CERT = None
diff --git a/puppetboard/docker_settings.py b/puppetboard/docker_settings.py
index b797906..f137c6a 100644
--- a/puppetboard/docker_settings.py
+++ b/puppetboard/docker_settings.py
@@ -2,7 +2,7 @@ import os
PUPPETDB_HOST = os.getenv('PUPPETDB_HOST', 'puppetdb')
PUPPETDB_PORT = int(os.getenv('PUPPETDB_PORT', '8080'))
-# Since this is an env it will alwas be a string, we need
+# Since this is an env it will always be a string, we need
# to conver that string to a bool
SSL_VERIFY = os.getenv('PUPPETDB_SSL_VERIFY', 'True')
if SSL_VERIFY.upper() == 'TRUE':
@@ -14,6 +14,7 @@ else:
PUPPETDB_KEY = os.getenv('PUPPETDB_KEY', None)
PUPPETDB_CERT = os.getenv('PUPPETDB_CERT', None)
+PUPPETDB_PROTO = os.getenv('PUPPETDB_PROTO', None)
PUPPETDB_TIMEOUT = int(os.getenv('PUPPETDB_TIMEOUT', '20'))
DEFAULT_ENVIRONMENT = os.getenv('DEFAULT_ENVIRONMENT', 'production')
SECRET_KEY = os.getenv('SECRET_KEY', os.urandom(24))
| Allow to force the protocol to connect to PuppetDB
When the `PUPPETDB_CERT` and `PUPPETDB_KEY` are not set, the Pypuppetdb library will automatically choose 'http' as the protocol to be used for the connection to PuppetDB.
It seems to me that as of now is not possible to configure PuppetBoard to connect to PuppetDB via HTTPS without a key/cert pair.
I think it's a quick and easy feature to add, so I'm sending a PR for it, if you'll deem acceptable. | voxpupuli/puppetboard | diff --git a/test/test_docker_settings.py b/test/test_docker_settings.py
index fe18a48..02ce89a 100644
--- a/test/test_docker_settings.py
+++ b/test/test_docker_settings.py
@@ -40,6 +40,12 @@ def test_set_host_port(cleanUpEnv):
assert docker_settings.PUPPETDB_PORT == 9081
+def test_set_proto(cleanUpEnv):
+ os.environ['PUPPETDB_PROTO'] = 'https'
+ reload(docker_settings)
+ assert docker_settings.PUPPETDB_PROTO == 'https'
+
+
def test_cert_true_test(cleanUpEnv):
os.environ['PUPPETDB_SSL_VERIFY'] = 'True'
reload(docker_settings)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 4
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-pep8",
"bandit"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
bandit==1.7.1
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
CommonMark==0.7.2
coverage==6.2
dataclasses==0.8
execnet==1.9.0
Flask==2.0.3
Flask-WTF==1.0.1
future==1.0.0
gitdb==4.0.9
GitPython==3.1.18
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
itsdangerous==2.0.1
Jinja2==3.0.3
MarkupSafe==2.0.1
packaging==21.3
pbr==6.1.1
pep8==1.7.1
pluggy==1.0.0
-e git+https://github.com/voxpupuli/puppetboard.git@1bf3ee679a2e363b4813d76dda632056894fd2d3#egg=puppetboard
py==1.11.0
pyparsing==3.1.4
pypuppetdb==2.5.2
pytest==7.0.1
pytest-cache==1.0
pytest-cov==4.0.0
pytest-pep8==1.0.6
PyYAML==6.0.1
requests==2.27.1
smmap==5.0.0
stevedore==3.5.2
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
Werkzeug==2.0.3
WTForms==3.0.0
zipp==3.6.0
| name: puppetboard
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- bandit==1.7.1
- charset-normalizer==2.0.12
- click==8.0.4
- commonmark==0.7.2
- coverage==6.2
- dataclasses==0.8
- execnet==1.9.0
- flask==2.0.3
- flask-wtf==1.0.1
- future==1.0.0
- gitdb==4.0.9
- gitpython==3.1.18
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- itsdangerous==2.0.1
- jinja2==3.0.3
- markupsafe==2.0.1
- packaging==21.3
- pbr==6.1.1
- pep8==1.7.1
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pypuppetdb==2.5.2
- pytest==7.0.1
- pytest-cache==1.0
- pytest-cov==4.0.0
- pytest-pep8==1.0.6
- pyyaml==6.0.1
- requests==2.27.1
- smmap==5.0.0
- stevedore==3.5.2
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- werkzeug==2.0.3
- wtforms==3.0.0
- zipp==3.6.0
prefix: /opt/conda/envs/puppetboard
| [
"test/test_docker_settings.py::test_set_proto"
]
| []
| [
"test/test_docker_settings.py::test_default_host_port",
"test/test_docker_settings.py::test_set_host_port",
"test/test_docker_settings.py::test_cert_true_test",
"test/test_docker_settings.py::test_cert_false_test",
"test/test_docker_settings.py::test_cert_path",
"test/test_docker_settings.py::test_inventory_facts_default",
"test/test_docker_settings.py::test_invtory_facts_custom",
"test/test_docker_settings.py::test_graph_facts_defautl",
"test/test_docker_settings.py::test_graph_facts_custom",
"test/test_docker_settings.py::test_default_table_selctor",
"test/test_docker_settings.py::test_env_table_selector",
"test/test_docker_settings.py::test_env_column_options"
]
| []
| Apache License 2.0 | 2,322 | [
"README.rst",
"puppetboard/core.py",
"puppetboard/docker_settings.py",
"puppetboard/default_settings.py"
]
| [
"README.rst",
"puppetboard/core.py",
"puppetboard/docker_settings.py",
"puppetboard/default_settings.py"
]
|
|
yevhen-m__flake8-fancy-header-4 | 54f4b80e65995b1163365bc1c1a6aa0fed159109 | 2018-03-22 12:57:53 | 54f4b80e65995b1163365bc1c1a6aa0fed159109 | diff --git a/flake8_fancy_header/__init__.py b/flake8_fancy_header/__init__.py
index 4dc17be..d871073 100644
--- a/flake8_fancy_header/__init__.py
+++ b/flake8_fancy_header/__init__.py
@@ -1,9 +1,15 @@
+"""
+===================
+flake8_fancy_header
+===================
+"""
+
import sys
major, minor = sys.version_info[0], sys.version_info[1]
PY_37_OR_GREATER = (major, minor) >= (3, 7)
if PY_37_OR_GREATER:
- from .checker import FancyHeaderChecker
+ from .checker import FancyHeaderChecker # flake8: noqa
else:
- from .checker import FancyHeaderCheckerBefore37 as FancyHeaderChecker
+ from .checker import FancyHeaderCheckerBefore37 as FancyHeaderChecker # flake8: noqa
diff --git a/flake8_fancy_header/checker.py b/flake8_fancy_header/checker.py
index 2261a33..eaa9baa 100644
--- a/flake8_fancy_header/checker.py
+++ b/flake8_fancy_header/checker.py
@@ -1,10 +1,22 @@
+"""
+===========================
+flake8_fancy_header.checker
+===========================
+"""
+
__version__ = '0.1.0'
import ast
from os import getcwd
-from os.path import join, normpath, splitext
+from os.path import exists, dirname, join, normpath, splitext
+
+PROJECT_ROOT_MARKERS = [
+ '.git',
+ 'setup.cfg',
+ 'setup.py',
+]
class BaseChecker(object):
@@ -18,12 +30,25 @@ class BaseChecker(object):
self.tree = tree
self.filename = filename
- def get_header_value(self):
- # Assume that python runs from project's root, so we can get its
- # working directory and resolve from it.
- cwd = getcwd() + '/'
- filename = normpath(join(cwd, self.filename)).split(cwd, 1)[1]
+ def get_project_root_dir(self, current_dir=None):
+ if current_dir == '/':
+ # Reached fs root, fallback to cwd
+ return getcwd()
+
+ if current_dir is None:
+ current_dir = getcwd()
+ for marker in PROJECT_ROOT_MARKERS:
+ if exists(join(current_dir, marker)):
+ return current_dir
+
+ return self.get_project_root_dir(current_dir=dirname(current_dir))
+
+ def get_header_value(self):
+ filename = (
+ normpath(join(getcwd(), self.filename))
+ .split(self.get_project_root_dir() + '/', 1)[1]
+ )
import_path = splitext(filename)[0].replace('/', '.')
if import_path.endswith('__init__'):
import_path = import_path.rsplit('.', 1)[0]
diff --git a/setup.py b/setup.py
index 1c7bc6d..777605d 100644
--- a/setup.py
+++ b/setup.py
@@ -1,3 +1,9 @@
+"""
+=====
+setup
+=====
+"""
+
import setuptools
install_requires = [
| Checking filename in header
I think assumption you've made here is incorrect:
https://github.com/yevhen-m/flake8-fancy-header/blob/master/flake8_fancy_header/checker.py#L24
Path python started from have nothing to deal with name of the file it checks. For example, my checker runs from different directory. Can you just check that given path exist on given machine and stop.
For example you have `fokker.hotels.sell` just check that relpath exist for give file.
Other solution you can be search for first `setup.cfg` down in the path. | yevhen-m/flake8-fancy-header | diff --git a/tests/__init__.py b/tests/__init__.py
index e69de29..31cbc3e 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -0,0 +1,5 @@
+"""
+=====
+tests
+=====
+"""
diff --git a/tests/test_checker.py b/tests/test_checker.py
index d16f912..b229032 100644
--- a/tests/test_checker.py
+++ b/tests/test_checker.py
@@ -1,7 +1,13 @@
+"""
+==================
+tests.test_checker
+==================
+"""
+
import ast
-import os
import unittest
+from os.path import dirname
from unittest.mock import patch, Mock
from flake8_fancy_header import FancyHeaderChecker
@@ -11,6 +17,7 @@ from flake8_fancy_header import FancyHeaderChecker
'flake8_fancy_header.checker.getcwd',
Mock(return_value='/home/User/Project'),
)
+@patch('flake8_fancy_header.checker.exists', Mock(return_value=False))
class CheckerTestCase(unittest.TestCase):
def test_empty_module(self):
@@ -138,3 +145,25 @@ package.spam
filename='/home/User/Project/package/spam.py',
)
self.assertEqual(len(list(checker.run())), 0)
+
+
+class CheckerTestCase1(unittest.TestCase):
+
+ @patch('flake8_fancy_header.checker.exists')
+ @patch('flake8_fancy_header.checker.getcwd')
+ def test_checker_not_from_project_root(self, getcwd, exists):
+ getcwd.return_value = '/home/User/Project/package/subpackage'
+ # Pretend that project root marker was found in /home/User/Project
+ exists.side_effect = lambda path: dirname(path) == '/home/User/Project'
+ module = ast.parse('''\
+"""
+=========================
+package.subpackage.module
+=========================
+"""
+ ''')
+ checker = FancyHeaderChecker(
+ tree=module,
+ filename='module.py',
+ )
+ self.assertEqual(len(list(checker.run())), 0)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
flake8==5.0.4
-e git+https://github.com/yevhen-m/flake8-fancy-header.git@54f4b80e65995b1163365bc1c1a6aa0fed159109#egg=flake8_fancy_header
importlib-metadata==4.2.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: flake8-fancy-header
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- flake8==5.0.4
- importlib-metadata==4.2.0
- mccabe==0.7.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
prefix: /opt/conda/envs/flake8-fancy-header
| [
"tests/test_checker.py::CheckerTestCase::test_checker_with_absolute_filename",
"tests/test_checker.py::CheckerTestCase::test_checker_with_absolute_filename_and_submodule",
"tests/test_checker.py::CheckerTestCase::test_empty_module",
"tests/test_checker.py::CheckerTestCase::test_module_with_docstring",
"tests/test_checker.py::CheckerTestCase::test_module_with_invalid_header",
"tests/test_checker.py::CheckerTestCase::test_module_with_relative_filename",
"tests/test_checker.py::CheckerTestCase::test_module_with_valid_header",
"tests/test_checker.py::CheckerTestCase::test_module_with_valid_header_2",
"tests/test_checker.py::CheckerTestCase::test_module_with_valid_header_and_leading_comment",
"tests/test_checker.py::CheckerTestCase::test_module_with_valid_header_for_init_module",
"tests/test_checker.py::CheckerTestCase::test_module_without_docstring",
"tests/test_checker.py::CheckerTestCase::test_valid_header_in_submodule",
"tests/test_checker.py::CheckerTestCase1::test_checker_not_from_project_root"
]
| []
| []
| []
| MIT License | 2,323 | [
"flake8_fancy_header/__init__.py",
"flake8_fancy_header/checker.py",
"setup.py"
]
| [
"flake8_fancy_header/__init__.py",
"flake8_fancy_header/checker.py",
"setup.py"
]
|
|
google__yapf-542 | c873f37236c6fb962b6a181bae6a2f2cab23ba6a | 2018-03-22 13:11:52 | c873f37236c6fb962b6a181bae6a2f2cab23ba6a | diff --git a/CHANGELOG b/CHANGELOG
index 92806ad..e3cc518 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -2,6 +2,11 @@
# All notable changes to this project will be documented in this file.
# This project adheres to [Semantic Versioning](http://semver.org/).
+## Unreleased
+### Added
+- The `BLANK_LINE_BEFORE_MODULE_DOCSTRING` knob adds a blank line before a
+ module's docstring.
+
## [0.21.0] 2018-03-18
### Added
- Introduce a new option of formatting multiline literals. Add
diff --git a/README.rst b/README.rst
index 3b43d29..ccc74d9 100644
--- a/README.rst
+++ b/README.rst
@@ -322,6 +322,9 @@ Knobs
def method():
pass
+``BLANK_LINE_BEFORE_MODULE_DOCSTRING``
+ Insert a blank line before a module docstring.
+
``BLANK_LINE_BEFORE_CLASS_DOCSTRING``
Insert a blank line before a class-level docstring.
diff --git a/yapf/yapflib/reformatter.py b/yapf/yapflib/reformatter.py
index a4965ab..0485d7e 100644
--- a/yapf/yapflib/reformatter.py
+++ b/yapf/yapflib/reformatter.py
@@ -473,6 +473,10 @@ def _CalculateNumberOfNewlines(first_token, indent_depth, prev_uwline,
style.Get('BLANK_LINE_BEFORE_CLASS_DOCSTRING')):
# Enforce a blank line before a class's docstring.
return ONE_BLANK_LINE
+ elif (prev_uwline.first.value.startswith('#') and
+ style.Get('BLANK_LINE_BEFORE_MODULE_DOCSTRING')):
+ # Enforce a blank line before a module's docstring.
+ return ONE_BLANK_LINE
# The docstring shouldn't have a newline before it.
return NO_BLANK_LINES
diff --git a/yapf/yapflib/style.py b/yapf/yapflib/style.py
index 5f2fdac..7fd2177 100644
--- a/yapf/yapflib/style.py
+++ b/yapf/yapflib/style.py
@@ -71,6 +71,8 @@ _STYLE_HELP = dict(
..."""),
BLANK_LINE_BEFORE_CLASS_DOCSTRING=textwrap.dedent("""\
Insert a blank line before a class-level docstring."""),
+ BLANK_LINE_BEFORE_MODULE_DOCSTRING=textwrap.dedent("""\
+ Insert a blank line before a module docstring."""),
BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION=textwrap.dedent("""\
Number of blank lines surrounding top-level function and class
definitions."""),
@@ -261,6 +263,7 @@ def CreatePEP8Style():
ALLOW_SPLIT_BEFORE_DICT_VALUE=True,
BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF=False,
BLANK_LINE_BEFORE_CLASS_DOCSTRING=False,
+ BLANK_LINE_BEFORE_MODULE_DOCSTRING=False,
BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION=2,
COALESCE_BRACKETS=False,
COLUMN_LIMIT=79,
@@ -406,6 +409,7 @@ _STYLE_OPTION_VALUE_CONVERTER = dict(
ALLOW_SPLIT_BEFORE_DICT_VALUE=_BoolConverter,
BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF=_BoolConverter,
BLANK_LINE_BEFORE_CLASS_DOCSTRING=_BoolConverter,
+ BLANK_LINE_BEFORE_MODULE_DOCSTRING=_BoolConverter,
BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION=int,
COALESCE_BRACKETS=_BoolConverter,
COLUMN_LIMIT=int,
| Space between modeline/shebang and module docstring
Imagine this file:
``` python
#!/usr/bin/env python
# -*- coding: utf-8 name> -*-
"""Some module docstring."""
def foobar():
pass
```
Running yapf on it produces this diff:
``` diff
--- yapf.py (original)
+++ yapf.py (reformatted)
@@ -1,6 +1,5 @@
#!/usr/bin/env python
# -*- coding: utf-8 name> -*-
-
"""Some module docstring."""
```
I would actually have expected yapf **not** to remove the blank line between the shebang/encoding and the module docstring (i.e. do nothing).
| google/yapf | diff --git a/yapftests/reformatter_basic_test.py b/yapftests/reformatter_basic_test.py
index a1ca779..01ed3d9 100644
--- a/yapftests/reformatter_basic_test.py
+++ b/yapftests/reformatter_basic_test.py
@@ -2218,6 +2218,59 @@ s = 'foo \\
finally:
style.SetGlobalStyle(style.CreateChromiumStyle())
+ def testBlankLineBeforeModuleDocstring(self):
+ unformatted_code = textwrap.dedent('''\
+ #!/usr/bin/env python
+ # -*- coding: utf-8 name> -*-
+
+ """Some module docstring."""
+
+
+ def foobar():
+ pass
+ ''')
+ expected_code = textwrap.dedent('''\
+ #!/usr/bin/env python
+ # -*- coding: utf-8 name> -*-
+ """Some module docstring."""
+
+
+ def foobar():
+ pass
+ ''')
+ uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
+ self.assertEqual(expected_code, reformatter.Reformat(uwlines))
+
+ try:
+ style.SetGlobalStyle(
+ style.CreateStyleFromConfig(
+ '{based_on_style: pep8, '
+ 'blank_line_before_module_docstring: True}'))
+ unformatted_code = textwrap.dedent('''\
+ #!/usr/bin/env python
+ # -*- coding: utf-8 name> -*-
+ """Some module docstring."""
+
+
+ def foobar():
+ pass
+ ''')
+ expected_formatted_code = textwrap.dedent('''\
+ #!/usr/bin/env python
+ # -*- coding: utf-8 name> -*-
+
+ """Some module docstring."""
+
+
+ def foobar():
+ pass
+ ''')
+ uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
+ self.assertCodeEqual(expected_formatted_code,
+ reformatter.Reformat(uwlines))
+ finally:
+ style.SetGlobalStyle(style.CreateChromiumStyle())
+
def testTupleCohesion(self):
unformatted_code = textwrap.dedent("""\
def f():
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 4
} | 0.21 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
-e git+https://github.com/google/yapf.git@c873f37236c6fb962b6a181bae6a2f2cab23ba6a#egg=yapf
| name: yapf
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/yapf
| [
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testBlankLineBeforeModuleDocstring"
]
| [
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testAsyncAsNonKeyword"
]
| [
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testArgsAndKwargsFormatting",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testBinaryOperators",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testBlankLineBeforeClassDocstring",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testBlankLinesAtEndOfFile",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testBlankLinesBeforeDecorators",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testBlankLinesBeforeFunctionsNotInColumnZero",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testClosingBracketIndent",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testClosingBracketsInlinedInCall",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testCoalesceBracketsOnDict",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testCommentBeforeFuncDef",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testCommentBetweenDecorators",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testCommentColumnLimitOverflow",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testComments",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testCommentsInDataLiteral",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testCommentsWithContinuationMarkers",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testCommentsWithTrailingSpaces",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testComprehensionForAndIf",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testContiguousList",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testContinuationIndent",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testContinuationMarkerAfterStringWithContinuation",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testContinuationMarkers",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testContinuationSpaceRetention",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testDictSetGenerator",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testDictionaryElementsOnOneLine",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testDictionaryMakerFormatting",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testDictionaryOnOwnLine",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testDictionaryValuesOnOwnLines",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testDocstringAndMultilineComment",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testDocstrings",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testDontAddBlankLineAfterMultilineString",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testDontSplitKeywordValueArguments",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testEllipses",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testEmptyContainers",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testEndingComment",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testEndingWhitespaceAfterSimpleStatement",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testExcessCharacters",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testExcessLineCountWithDefaultKeywords",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testExpressionPenalties",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testFormattingListComprehensions",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testFunctionCallArguments",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testFunctionCallContinuationLine",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testFunctionCallInDict",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testFunctionCallInNestedDict",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testI18n",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testI18nCommentsInDataLiteral",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testI18nNonFormatting",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testIfConditionalParens",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testIfExpressionWithFunctionCall",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testImportAsList",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testLineDepthOfSingleLineStatement",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testLineWrapInForExpression",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testListComprehension",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testListComprehensionPreferNoBreakForTrivialExpression",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testListComprehensionPreferOneLine",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testListComprehensionPreferOneLineOverArithmeticSplit",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testListComprehensionPreferThreeLinesForLineWrap",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testListWithFunctionCalls",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testMatchingParenSplittingMatching",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testMultilineComment",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testMultilineCommentReformatted",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testMultilineDictionaryKeys",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testMultilineDocstringAndMultilineComment",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testMultilineLambdas",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testMultilineShebang",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testMultilineString",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testMultipleContinuationMarkers",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testMultipleUgliness",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testNamedAssignNotAtEndOfLine",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testNestedDictionary",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testNestedListsInDictionary",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoBreakOutsideOfBracket",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoKeywordArgumentBreakage",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoPenaltySplitting",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoQueueSeletionInMiddleOfLine",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoSpaceBetweenUnaryOpAndOpeningParen",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoSpacesAroundKeywordDefaultValues",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoSpacesBetweenOpeningBracketAndStartingOperator",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoSpacesBetweenSubscriptsAndCalls",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoSplittingAroundTermOperators",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoSplittingBeforeEndingSubscriptBracket",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoSplittingOnSingleArgument",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoSplittingWhenBinPacking",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoSplittingWithinSubscriptList",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testNotInParams",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testNotSplittingAfterSubscript",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testOpeningAndClosingBrackets",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testOverColumnLimit",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testRelativeImportStatements",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testRelaxArraySubscriptAffinity",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSimple",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSimpleFunctions",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSimpleFunctionsWithTrailingComments",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSimpleMultilineCode",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSimpleMultilineWithComments",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSingleComment",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSingleLineFunctions",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSingleLineList",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSpaceAfterNotOperator",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplitAfterComment",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplitListWithComment",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplitListWithInterspersedComments",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplitListWithTerminatingComma",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplitStringsIfSurroundedByParens",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplittingArgumentsTerminatedByComma",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplittingArraysSensibly",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplittingBeforeFirstArgumentOnCompoundStatement",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplittingBeforeFirstArgumentOnFunctionCall",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplittingBeforeFirstArgumentOnFunctionDefinition",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplittingBeforeFirstElementListArgument",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplittingOneArgumentList",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testStableDictionaryFormatting",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testStableInlinedDictionaryFormatting",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testSubscriptExpression",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testTrailerOnSingleLine",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testTrailingCommaAndBracket",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testTupleCohesion",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testTupleCommaBeforeLastParen",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testUnaryNotOperator",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testUnaryOpInDictionaryValue",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testUnbreakableNot",
"yapftests/reformatter_basic_test.py::BasicReformatterTest::testUnformattedAfterMultilineString"
]
| []
| Apache License 2.0 | 2,324 | [
"README.rst",
"yapf/yapflib/style.py",
"CHANGELOG",
"yapf/yapflib/reformatter.py"
]
| [
"README.rst",
"yapf/yapflib/style.py",
"CHANGELOG",
"yapf/yapflib/reformatter.py"
]
|
|
zopefoundation__DocumentTemplate-18 | 6af41a0957407a40e02210926c63810d7bbe53ff | 2018-03-22 18:38:05 | 6af41a0957407a40e02210926c63810d7bbe53ff | tseaver: @icemac I'm not sure what test would be needed: the externally-visible behavior of the method has not changed. Or is your point that `sort_sequence` doesn't have appropriate coverage?
And likewise, a changelog entry shouldn't be needed since no externally-visible change has occurred. | diff --git a/CHANGES.rst b/CHANGES.rst
index 18c8924..209e47d 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -9,6 +9,8 @@ Changelog
- No longer use icons which got deleted in Zope 4.
+- Fix sorting in <dtml-in> for duplicate entries in Python 3.
+
3.0b2 (2017-11-03)
------------------
diff --git a/src/DocumentTemplate/DT_In.py b/src/DocumentTemplate/DT_In.py
index f9b8150..4c6872d 100644
--- a/src/DocumentTemplate/DT_In.py
+++ b/src/DocumentTemplate/DT_In.py
@@ -330,6 +330,7 @@
'''
+from operator import itemgetter
import sys
import re
@@ -837,7 +838,10 @@ class InClass(object):
by = SortBy(multsort, sf_list)
s.sort(by)
else:
- s.sort()
+ # In python 3 a key is required when tuples in the list have
+ # the same sort key to prevent attempting to compare the second
+ # item which is dict.
+ s.sort(key=itemgetter(0))
sequence = []
for k, client in s:
diff --git a/src/TreeDisplay/TreeTag.py b/src/TreeDisplay/TreeTag.py
index 69e28b8..414200c 100644
--- a/src/TreeDisplay/TreeTag.py
+++ b/src/TreeDisplay/TreeTag.py
@@ -395,10 +395,10 @@ def tpRenderTABLE(self, id, root_url, url, state, substate, diff, data,
if exp:
ptreeData['tree-item-expanded'] = 1
output('<a name="%s" href="%s?%stree-c=%s#%s">-</a>' %
- (id, root_url, param, s, id, script))
+ (id, root_url, param, s, id))
else:
output('<a name="%s" href="%s?%stree-e=%s#%s">+</a>' %
- (id, root_url, param, s, id, script))
+ (id, root_url, param, s, id))
output('</td>\n')
else:
| <dtml-in list_of_dicts mapping sort=name> breaks for duplicate names
Scenario:
```python
list_of_dicts = [{'name': 'a'}, {'name': 'b'}, {'name': 'a'}]
```
In DTML call:
```
<dtml-in list_of_dicts mapping sort=name>
```
It breaks because internally `dtml-in` changes the list to
```python
[('a', {'name': 'a'}), ('b', {'name': 'b'}), ('a', {'name': 'a'})]
```
There is no problem as long as the list does not contain duplicates of the sort key. If there are duplicates, Python has to compare the dicts when sorting. This works fine on Python 2 but it is no loner defined on Python 3. | zopefoundation/DocumentTemplate | diff --git a/src/DocumentTemplate/tests/test_DT_In.py b/src/DocumentTemplate/tests/test_DT_In.py
new file mode 100644
index 0000000..ba517d2
--- /dev/null
+++ b/src/DocumentTemplate/tests/test_DT_In.py
@@ -0,0 +1,34 @@
+import unittest
+
+
+class DummySection(object):
+ blocks = ['dummy']
+
+
+class TestIn(unittest.TestCase):
+ """Testing ..DT_in.InClass."""
+
+ def _getTargetClass(self):
+ from DocumentTemplate.DT_In import InClass
+ return InClass
+
+ def _makeOne(self, *args):
+ blocks = [('in', ' '.join(args), DummySection())]
+ return self._getTargetClass()(blocks)
+
+ def test_sort_sequence(self):
+ """It does not break on duplicate sort keys at a list of dicts."""
+ stmt = self._makeOne('seq', 'mapping', 'sort=key')
+ seq = [
+ {'key': 'c', 'data': '3'},
+ {'key': 'a', 'data': '1'},
+ {'key': 'b', 'data': '2'},
+ {'key': 'a', 'data': '2'},
+ ]
+ result = stmt.sort_sequence(seq, 'key')
+ self.assertEqual([
+ {'key': 'a', 'data': '1'},
+ {'key': 'a', 'data': '2'},
+ {'key': 'b', 'data': '2'},
+ {'key': 'c', 'data': '3'},
+ ], result)
diff --git a/src/DocumentTemplate/tests/test_DT_Var.py b/src/DocumentTemplate/tests/test_DT_Var.py
index 3270824..c81051f 100644
--- a/src/DocumentTemplate/tests/test_DT_Var.py
+++ b/src/DocumentTemplate/tests/test_DT_Var.py
@@ -73,11 +73,11 @@ class TestUrlQuoting(unittest.TestCase):
utf8_value = unicode_value.encode('UTF-8')
quoted_utf8_value = b'G%C3%BCnther%20M%C3%BCller'
- self.assertEquals(url_quote(unicode_value), quoted_unicode_value)
- self.assertEquals(url_quote(utf8_value), quoted_utf8_value)
+ self.assertEqual(url_quote(unicode_value), quoted_unicode_value)
+ self.assertEqual(url_quote(utf8_value), quoted_utf8_value)
- self.assertEquals(url_unquote(quoted_unicode_value), unicode_value)
- self.assertEquals(url_unquote(quoted_utf8_value), utf8_value)
+ self.assertEqual(url_unquote(quoted_unicode_value), unicode_value)
+ self.assertEqual(url_unquote(quoted_utf8_value), utf8_value)
def test_url_quoting_plus(self):
from DocumentTemplate.DT_Var import url_quote_plus
@@ -87,10 +87,10 @@ class TestUrlQuoting(unittest.TestCase):
utf8_value = unicode_value.encode('UTF-8')
quoted_utf8_value = b'G%C3%BCnther+M%C3%BCller'
- self.assertEquals(url_quote_plus(unicode_value), quoted_unicode_value)
- self.assertEquals(url_quote_plus(utf8_value), quoted_utf8_value)
+ self.assertEqual(url_quote_plus(unicode_value), quoted_unicode_value)
+ self.assertEqual(url_quote_plus(utf8_value), quoted_utf8_value)
- self.assertEquals(
+ self.assertEqual(
url_unquote_plus(quoted_unicode_value), unicode_value)
- self.assertEquals(
+ self.assertEqual(
url_unquote_plus(quoted_utf8_value), utf8_value)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | 3.02 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | AccessControl==5.7
Acquisition==4.13
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
AuthEncoding==4.3
BTrees==4.11.3
certifi==2021.5.30
cffi==1.15.1
DateTime==4.9
-e git+https://github.com/zopefoundation/DocumentTemplate.git@6af41a0957407a40e02210926c63810d7bbe53ff#egg=DocumentTemplate
ExtensionClass==4.9
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
multipart==1.1.0
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
Persistence==3.6
persistent==4.9.3
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycparser==2.21
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
python-gettext==4.1
pytz==2025.2
RestrictedPython==6.2
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
transaction==3.1.0
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zExceptions==4.3
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
zope.browser==2.4
zope.component==5.1.0
zope.configuration==4.4.1
zope.contenttype==4.6
zope.deferredimport==4.4
zope.deprecation==4.4.0
zope.event==4.6
zope.exceptions==4.6
zope.hookable==5.4
zope.i18n==4.9.0
zope.i18nmessageid==5.1.1
zope.interface==5.5.2
zope.location==4.3
zope.proxy==4.6.1
zope.publisher==6.1.0
zope.schema==6.2.1
zope.security==5.8
zope.sequencesort==4.2
zope.structuredtext==4.4
zope.testing==5.0.1
| name: DocumentTemplate
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- accesscontrol==5.7
- acquisition==4.13
- authencoding==4.3
- btrees==4.11.3
- cffi==1.15.1
- datetime==4.9
- extensionclass==4.9
- multipart==1.1.0
- persistence==3.6
- persistent==4.9.3
- pycparser==2.21
- python-gettext==4.1
- pytz==2025.2
- restrictedpython==6.2
- six==1.17.0
- transaction==3.1.0
- zexceptions==4.3
- zope-browser==2.4
- zope-component==5.1.0
- zope-configuration==4.4.1
- zope-contenttype==4.6
- zope-deferredimport==4.4
- zope-deprecation==4.4.0
- zope-event==4.6
- zope-exceptions==4.6
- zope-hookable==5.4
- zope-i18n==4.9.0
- zope-i18nmessageid==5.1.1
- zope-interface==5.5.2
- zope-location==4.3
- zope-proxy==4.6.1
- zope-publisher==6.1.0
- zope-schema==6.2.1
- zope-security==5.8
- zope-sequencesort==4.2
- zope-structuredtext==4.4
- zope-testing==5.0.1
prefix: /opt/conda/envs/DocumentTemplate
| [
"src/DocumentTemplate/tests/test_DT_In.py::TestIn::test_sort_sequence"
]
| []
| [
"src/DocumentTemplate/tests/test_DT_Var.py::TestNewlineToBr::test_newline_to_br",
"src/DocumentTemplate/tests/test_DT_Var.py::TestNewlineToBr::test_newline_to_br_tainted",
"src/DocumentTemplate/tests/test_DT_Var.py::TestUrlQuoting::test_url_quoting",
"src/DocumentTemplate/tests/test_DT_Var.py::TestUrlQuoting::test_url_quoting_plus"
]
| []
| Zope Public License 2.1 | 2,325 | [
"src/TreeDisplay/TreeTag.py",
"src/DocumentTemplate/DT_In.py",
"CHANGES.rst"
]
| [
"src/TreeDisplay/TreeTag.py",
"src/DocumentTemplate/DT_In.py",
"CHANGES.rst"
]
|
CORE-GATECH-GROUP__serpent-tools-100 | 433d55710b85805c9dee609ad77f81bc3c202b72 | 2018-03-23 15:07:37 | 1d475a4dd8982532d097286468b2d616345c8ab8 | diff --git a/docs/examples/Branching.rst b/docs/examples/Branching.rst
index 32e10ca..7544198 100644
--- a/docs/examples/Branching.rst
+++ b/docs/examples/Branching.rst
@@ -1,3 +1,9 @@
+.. |branchReader| replace:: :py:class:`~serpentTools.parsers.branching.BranchingReader`
+
+.. |branchContainer| replace:: :py:class:`~serpentTools.objects.containers.BranchContainer`
+
+.. |homogUniv| replace:: :py:class:`~serpentTools.objects.containers.HomogUniv`
+
.. _branching-ex:
Branching Reader
@@ -34,37 +40,36 @@ The simplest way to read these files is using the
>>> import serpentTools
>>> branchFile = 'demo.coe'
- INFO : serpentTools: Using version 0.2.1
>>> r0 = serpentTools.read(branchFile)
INFO : serpentTools: Inferred reader for demo.coe: BranchingReader
INFO : serpentTools: Preparing to read demo.coe
INFO : serpentTools: Done reading branching file
-The branches are stored in custom
-:py:class:`~serpentTools.objects.containers.BranchContainer` objects in the
-``branches`` dictionary
+The branches are stored in custom |branchContainer| objects in the
+:py:attr:`~serpentTools.parsers.branching.BranchingReader.branches`
+dictionary
.. code::
>>> r0.branches
- {('B1000', 'FT1200'):
- <serpentTools.objects.containers.BranchContainer at 0x2220c762438>,
+ {('B1000', 'FT1200'):
+ <serpentTools.objects.containers.BranchContainer at 0x7f2c8d8c9b00>,
('B1000', 'FT600'):
- <serpentTools.objects.containers.BranchContainer at 0x2220c787908>,
- ('B1000', 'nom'):
- <serpentTools.objects.containers.BranchContainer at 0x2220c737ef0>,
- ('B750', 'FT1200'):
- <serpentTools.objects.containers.BranchContainer at 0x2220c752cf8>,
- ('B750', 'FT600'):
- <serpentTools.objects.containers.BranchContainer at 0x2220c77c208>,
- ('B750', 'nom'):
- <serpentTools.objects.containers.BranchContainer at 0x2220c72c860>,
- ('nom', 'FT1200'):
- <serpentTools.objects.containers.BranchContainer at 0x2220c7455f8>,
- ('nom', 'FT600'):
- <serpentTools.objects.containers.BranchContainer at 0x2220c76dac8>,
- ('nom', 'nom'):
- <serpentTools.objects.containers.BranchContainer at 0x2220c7231d0>}
+ <serpentTools.objects.containers.BranchContainer at 0x7f2c8cfecfd0>,
+ ('B1000', 'nom'):
+ <serpentTools.objects.containers.BranchContainer at 0x7f2c8d052b00>,
+ ('B750', 'FT1200'):
+ <serpentTools.objects.containers.BranchContainer at 0x7f2c8d8cc400>,
+ ('B750', 'FT600'):
+ <serpentTools.objects.containers.BranchContainer at 0x7f2c8cfe58d0>,
+ ('B750', 'nom'):
+ <serpentTools.objects.containers.BranchContainer at 0x7f2c8d041470>,
+ ('nom', 'FT1200'):
+ <serpentTools.objects.containers.BranchContainer at 0x7f2c8cfda208>,
+ ('nom', 'FT600'):
+ <serpentTools.objects.containers.BranchContainer at 0x7f2c8cfdf1d0>,
+ ('nom', 'nom'):
+ <serpentTools.objects.containers.BranchContainer at 0x7f2c8d03eda0>}
Here, the keys are tuples of strings indicating what
perturbations/branch states were applied for each ``SERPENT`` solution.
@@ -83,7 +88,9 @@ Examining a particular case
var V1_name V1_value
-cards. These are stored in the ``stateData`` attribute
+cards. These are stored in the
+:py:attr:`~serpentTools.objects.containers.BranchContainer.stateData`
+attribute
.. code::
@@ -106,9 +113,10 @@ Group Constant Data
Group constants are converted from ``SERPENT_STYLE`` to
``mixedCase`` to fit the overall style of the project.
-The :py:class:`~serpentTools.objects.containers.BranchContainer` stores group
-constant data in :py:class:`~serpentTools.objects.containers.HomogUniv`
-objects in the ``universes`` dictionary
+The |branchContainer| stores group
+constant data in |homogUniv| objects in the
+:py:attr:`~serpentTools.parsers.branching.BranchingReader.branches`
+dictionary
.. code::
@@ -129,42 +137,43 @@ objects in the ``universes`` dictionary
(40, 1.0, 2): <serpentTools.objects.containers.HomogUniv at 0x2220c78bdd8>,
(40, 10.0, 3): <serpentTools.objects.containers.HomogUniv at 0x2220c791a58>}
-The keys here are vectors indicating the universe ID, burnup [MWd/kgU],
-and burnup index corresponding to the point in the burnup schedule.
+The keys here are vectors indicating the universe ID, burnup, and burnup
+index corresponding to the point in the burnup schedule. ``SERPENT``
+prints negative values of burnup to indicate units of days, which is
+reflected in the
+:py:attr:`~serpentTools.objects.containers.BranchContainer.hasDays`
+attribute. ``hasDays-> True`` indicates
+that the values of burnup, second item in the above tuple, are in terms
+of days, not MWd/kgU.
These universes can be obtained by indexing this dictionary, or by using
the :py:meth:`~serpentTools.objects.containers.BranchContainer.getUniv` method
.. code::
- >>> univ0 = b0.universes[0, 1, 2]
+ >>> univ0 = b0.universes[0, 1, 1]
>>> print(univ0)
+ <HomogUniv 0: burnup: 1.000 MWd/kgu, step: 1>
>>> print(univ0.name)
+ 0
>>> print(univ0.bu)
+ 1.0
>>> print(univ0.step)
+ 1
>>> print(univ0.day)
- <HomogUniv from demo.coe>
- 0
- 1.0
- 2
- 0
+ None
+ >>> print(b0.hasDays)
+ False
>>> univ1 = b0.getUniv(0, burnup=1)
- >>> univ2 = b0.getUniv(0, index=2)
+ >>> univ2 = b0.getUniv(0, index=1)
>>> assert univ0 is univ1 is univ2
-Since the coefficient files do not store the day value of burnup, all
-:py:class:`~serpentTools.objects.containers.HomogUniv` objects created by the
-:py:class:`~serpentTools.objects.containers.BranchContainer` default to day
-zero.
-
Group constant data is stored in five dictionaries:
-1. ``infExp``: Expected values for infinite medium group constants
-2. ``infUnc``: Relative uncertainties for infinite medium group
- constants
-3. ``b1Exp``: Expected values for leakge-corrected group constants
-4. ``b1Unc``: Relative uncertainties for leakge-corrected group
- constants
-5. ``metaData``: items that do not fit the in the above categories
+1. :py:attr:`~serpentTools.objects.containers.HomogUniv.infExp`: Expected values for infinite medium group constants
+2. :py:attr:`~serpentTools.objects.containers.HomogUniv.infUnc`: Relative uncertainties for infinite medium group constants
+3. :py:attr:`~serpentTools.objects.containers.HomogUniv.b1Exp`: Expected values for leakge-corrected group constants
+4. :py:attr:`~serpentTools.objects.containers.HomogUniv.b1Unc`: Relative uncertainties for leakge-corrected group constants
+5. :py:attr:`~serpentTools.objects.containers.HomogUniv.metaData`: items that do not fit the in the above categories
.. code::
@@ -206,7 +215,7 @@ Iteration
The branching reader has a
:py:meth:`~serpentTools.parsers.branching.BranchingReader.iterBranches`
method that works to yield branch names and their associated
-:py:class:`~serpentTools.objects.containers.BranchContainer` objects. This can
+|branchContainer| objects. This can
be used to efficiently iterate over all the branches presented in the file.
.. code::
@@ -231,9 +240,8 @@ User Control
The ``SERPENT``
`set coefpara <http://serpent.vtt.fi/mediawiki/index.php/Input_syntax_manual#set_coefpara>`_
card already restricts the data present in the coeffient file to user
-control, and the :py:class:`~serpentTools.parsers.branching.BranchingReader`
-includes similar control. Below are the various settings that the
-:py:class:`~serpentTools.parsers.branching.BranchingReader` uses to read and
+control, and the |branchReader| includes similar control.
+Below are the various settings that the |branchReader| uses to read and
process coefficient files.
.. code::
@@ -290,7 +298,7 @@ that the default action is to store all state data variables as strings.
As demonstrated in the :ref:`group-const-variables` example, use of
``xs.variableGroups`` and ``xs.variableExtras`` controls what data is
-stored on the :py:class:`~serpentTools.objects.containers.HomogUniv`
+stored on the |homogUniv|
objects. By default, all variables present in the coefficient file are stored.
.. code::
@@ -327,8 +335,7 @@ variables explicitly requested are present
Conclusion
----------
-The :py:class:`~serpentTools.parsers.branching.BranchingReader` is capable of
-reading coefficient files created
+The |branchReader| is capable of reading coefficient files created
by the ``SERPENT`` automated branching process. The data is stored
according to the branch parameters, universe information, and burnup.
This reader also supports user control of the processing by selecting
diff --git a/examples/Branching.ipynb b/examples/Branching.ipynb
index d2f1836..ca01663 100644
--- a/examples/Branching.ipynb
+++ b/examples/Branching.ipynb
@@ -4,7 +4,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Copyright (c) 2017 Andrew Johnson, Dan Kotlyar, GTRC\n",
+ "Copyright (c) 2017-2018 Andrew Johnson, Dan Kotlyar, Stefano Terlizzi, Gavin Ridley, GTRC\n",
"\n",
"THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."
]
@@ -39,17 +39,9 @@
},
{
"cell_type": "code",
- "execution_count": 1,
+ "execution_count": 18,
"metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "INFO : serpentTools: Using version 1.0b0+37.ga1e314a.dirty\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"import serpentTools\n",
"branchFile = 'demo.coe'"
@@ -66,7 +58,7 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 19,
"metadata": {},
"outputs": [
{
@@ -92,33 +84,33 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": 20,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{('B1000',\n",
- " 'FT1200'): <serpentTools.objects.containers.BranchContainer at 0x2220c762438>,\n",
+ " 'FT1200'): <serpentTools.objects.containers.BranchContainer at 0x7f2c8d8c9b00>,\n",
" ('B1000',\n",
- " 'FT600'): <serpentTools.objects.containers.BranchContainer at 0x2220c787908>,\n",
+ " 'FT600'): <serpentTools.objects.containers.BranchContainer at 0x7f2c8cfecfd0>,\n",
" ('B1000',\n",
- " 'nom'): <serpentTools.objects.containers.BranchContainer at 0x2220c737ef0>,\n",
+ " 'nom'): <serpentTools.objects.containers.BranchContainer at 0x7f2c8d052b00>,\n",
" ('B750',\n",
- " 'FT1200'): <serpentTools.objects.containers.BranchContainer at 0x2220c752cf8>,\n",
+ " 'FT1200'): <serpentTools.objects.containers.BranchContainer at 0x7f2c8d8cc400>,\n",
" ('B750',\n",
- " 'FT600'): <serpentTools.objects.containers.BranchContainer at 0x2220c77c208>,\n",
+ " 'FT600'): <serpentTools.objects.containers.BranchContainer at 0x7f2c8cfe58d0>,\n",
" ('B750',\n",
- " 'nom'): <serpentTools.objects.containers.BranchContainer at 0x2220c72c860>,\n",
+ " 'nom'): <serpentTools.objects.containers.BranchContainer at 0x7f2c8d041470>,\n",
" ('nom',\n",
- " 'FT1200'): <serpentTools.objects.containers.BranchContainer at 0x2220c7455f8>,\n",
+ " 'FT1200'): <serpentTools.objects.containers.BranchContainer at 0x7f2c8cfda208>,\n",
" ('nom',\n",
- " 'FT600'): <serpentTools.objects.containers.BranchContainer at 0x2220c76dac8>,\n",
+ " 'FT600'): <serpentTools.objects.containers.BranchContainer at 0x7f2c8cfdf1d0>,\n",
" ('nom',\n",
- " 'nom'): <serpentTools.objects.containers.BranchContainer at 0x2220c7231d0>}"
+ " 'nom'): <serpentTools.objects.containers.BranchContainer at 0x7f2c8d03eda0>}"
]
},
- "execution_count": 5,
+ "execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
@@ -136,7 +128,7 @@
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": 21,
"metadata": {},
"outputs": [
{
@@ -157,15 +149,17 @@
"metadata": {},
"source": [
"`SERPENT` allows the user to define variables for each branch through:\n",
- "```\n",
+ "\n",
+ "`\n",
"var V1_name V1_value\n",
- "```\n",
+ "`\n",
+ "\n",
"cards. These are stored in the `stateData` attribute"
]
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": 22,
"metadata": {},
"outputs": [
{
@@ -178,7 +172,7 @@
" 'VERSION': '2.1.29'}"
]
},
- "execution_count": 7,
+ "execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
@@ -206,74 +200,76 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "The `BranchContainer` stores group constant data in `HomogUniv` objects in the `universes` dictionary"
+ "The `BranchContainer` stores group constant data in `HomogUniv` objects in the `universes` dictionary. "
]
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": 23,
"metadata": {},
"outputs": [
{
- "data": {
- "text/plain": [
- "{(0, 0.0, 1): <serpentTools.objects.containers.HomogUniv at 0x2220c781ac8>,\n",
- " (0, 1.0, 2): <serpentTools.objects.containers.HomogUniv at 0x2220c78b5f8>,\n",
- " (0, 10.0, 3): <serpentTools.objects.containers.HomogUniv at 0x2220c791240>,\n",
- " (10, 0.0, 1): <serpentTools.objects.containers.HomogUniv at 0x2220c787a58>,\n",
- " (10, 1.0, 2): <serpentTools.objects.containers.HomogUniv at 0x2220c78b6a0>,\n",
- " (10, 10.0, 3): <serpentTools.objects.containers.HomogUniv at 0x2220c791320>,\n",
- " (20, 0.0, 1): <serpentTools.objects.containers.HomogUniv at 0x2220c787cc0>,\n",
- " (20, 1.0, 2): <serpentTools.objects.containers.HomogUniv at 0x2220c78b908>,\n",
- " (20, 10.0, 3): <serpentTools.objects.containers.HomogUniv at 0x2220c791588>,\n",
- " (30, 0.0, 1): <serpentTools.objects.containers.HomogUniv at 0x2220c78b048>,\n",
- " (30, 1.0, 2): <serpentTools.objects.containers.HomogUniv at 0x2220c78bb70>,\n",
- " (30, 10.0, 3): <serpentTools.objects.containers.HomogUniv at 0x2220c7917f0>,\n",
- " (40, 0.0, 1): <serpentTools.objects.containers.HomogUniv at 0x2220c78b1d0>,\n",
- " (40, 1.0, 2): <serpentTools.objects.containers.HomogUniv at 0x2220c78bdd8>,\n",
- " (40, 10.0, 3): <serpentTools.objects.containers.HomogUniv at 0x2220c791a58>}"
- ]
- },
- "execution_count": 8,
- "metadata": {},
- "output_type": "execute_result"
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "(0, 1.0, 1)\n",
+ "(10, 1.0, 1)\n",
+ "(20, 1.0, 1)\n",
+ "(30, 1.0, 1)\n",
+ "(20, 0.0, 0)\n",
+ "(40, 0.0, 0)\n",
+ "(20, 10.0, 2)\n",
+ "(10, 10.0, 2)\n",
+ "(0, 0.0, 0)\n",
+ "(10, 0.0, 0)\n",
+ "(0, 10.0, 2)\n",
+ "(30, 0.0, 0)\n",
+ "(40, 10.0, 2)\n",
+ "(40, 1.0, 1)\n",
+ "(30, 10.0, 2)\n"
+ ]
}
],
"source": [
- "b0.universes"
+ "for key in b0.universes:\n",
+ " print(key)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "The keys here are vectors indicating the universe ID, burnup [MWd/kgU], and burnup index corresponding to the point in the burnup schedule. These universes can be obtained by indexing this dictionary, or by using the `getUniv` method"
+ "The keys here are vectors indicating the universe ID, burnup, and burnup index corresponding to the point in the burnup schedule. `SERPENT` prints negative values of burnup to indicate units of days, which is reflected in the `hasDays` attribute. `hasDays-> True` indicates that the values of burnup, second item in the above tuple, are in terms of days, not MWd/kgU.\n",
+ "\n",
+ "These universes can be obtained by indexing this dictionary, or by using the `getUniv` method."
]
},
{
"cell_type": "code",
- "execution_count": 9,
+ "execution_count": 24,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- "<HomogUniv from demo.coe>\n",
+ "<HomogUniv 0: burnup: 1.000 MWd/kgu, step: 1>\n",
"0\n",
"1.0\n",
- "2\n",
- "0\n"
+ "1\n",
+ "None\n",
+ "False\n"
]
}
],
"source": [
- "univ0 = b0.universes[0, 1, 2]\n",
+ "univ0 = b0.universes[0, 1, 1]\n",
"print(univ0)\n",
"print(univ0.name)\n",
"print(univ0.bu)\n",
"print(univ0.step)\n",
- "print(univ0.day)"
+ "print(univ0.day)\n",
+ "print(b0.hasDays)"
]
},
{
@@ -285,34 +281,12 @@
},
{
"cell_type": "code",
- "execution_count": 10,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "univ1 = b0.getUniv(0, burnup=1)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 11,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "univ2 = b0.getUniv(0, index=2)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 12,
- "metadata": {
- "collapsed": true
- },
+ "execution_count": 25,
+ "metadata": {},
"outputs": [],
"source": [
+ "univ1 = b0.getUniv(0, burnup=1)\n",
+ "univ2 = b0.getUniv(0, index=1)\n",
"assert univ0 is univ1 is univ2"
]
},
@@ -326,12 +300,14 @@
"1. `infUnc`: Relative uncertainties for infinite medium group constants\n",
"1. `b1Exp`: Expected values for leakge-corrected group constants\n",
"1. `b1Unc`: Relative uncertainties for leakge-corrected group constants\n",
- "1. `metaData`: items that do not fit the in the above categories"
+ "1. `metaData`: items that do not fit the in the above categories, such as energy group structure, k-infinity, etc.\n",
+ "\n",
+ "For this problem, the coefficient file does not have uncertainties, nor these metadata arguments. For this reason, the `infUnc`, `b1Unc`, and `metaData` dictionaries are emtpy."
]
},
{
"cell_type": "code",
- "execution_count": 13,
+ "execution_count": 26,
"metadata": {},
"outputs": [
{
@@ -345,7 +321,7 @@
" 'infTot': array([ 0.310842, 0.618286])}"
]
},
- "execution_count": 13,
+ "execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
@@ -356,7 +332,7 @@
},
{
"cell_type": "code",
- "execution_count": 14,
+ "execution_count": 27,
"metadata": {},
"outputs": [
{
@@ -365,7 +341,7 @@
"{}"
]
},
- "execution_count": 14,
+ "execution_count": 27,
"metadata": {},
"output_type": "execute_result"
}
@@ -376,7 +352,7 @@
},
{
"cell_type": "code",
- "execution_count": 15,
+ "execution_count": 28,
"metadata": {},
"outputs": [
{
@@ -390,7 +366,7 @@
" 'b1Tot': array([ 0.314521, 0.618361])}"
]
},
- "execution_count": 15,
+ "execution_count": 28,
"metadata": {},
"output_type": "execute_result"
}
@@ -401,7 +377,7 @@
},
{
"cell_type": "code",
- "execution_count": 16,
+ "execution_count": 29,
"metadata": {},
"outputs": [
{
@@ -410,7 +386,7 @@
"{}"
]
},
- "execution_count": 16,
+ "execution_count": 29,
"metadata": {},
"output_type": "execute_result"
}
@@ -428,7 +404,7 @@
},
{
"cell_type": "code",
- "execution_count": 17,
+ "execution_count": 30,
"metadata": {},
"outputs": [
{
@@ -437,7 +413,7 @@
"array([ 0.00271604, 0.059773 ])"
]
},
- "execution_count": 17,
+ "execution_count": 30,
"metadata": {},
"output_type": "execute_result"
}
@@ -448,7 +424,7 @@
},
{
"cell_type": "code",
- "execution_count": 18,
+ "execution_count": 31,
"metadata": {},
"outputs": [
{
@@ -476,22 +452,22 @@
},
{
"cell_type": "code",
- "execution_count": 20,
+ "execution_count": 32,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- "('nom', 'nom') <BranchContainer for nom, nom from demo.coe>\n",
- "('B750', 'nom') <BranchContainer for B750, nom from demo.coe>\n",
- "('B1000', 'nom') <BranchContainer for B1000, nom from demo.coe>\n",
- "('nom', 'FT1200') <BranchContainer for nom, FT1200 from demo.coe>\n",
- "('B750', 'FT1200') <BranchContainer for B750, FT1200 from demo.coe>\n",
"('B1000', 'FT1200') <BranchContainer for B1000, FT1200 from demo.coe>\n",
+ "('B750', 'FT1200') <BranchContainer for B750, FT1200 from demo.coe>\n",
+ "('nom', 'FT1200') <BranchContainer for nom, FT1200 from demo.coe>\n",
"('nom', 'FT600') <BranchContainer for nom, FT600 from demo.coe>\n",
+ "('B750', 'nom') <BranchContainer for B750, nom from demo.coe>\n",
+ "('nom', 'nom') <BranchContainer for nom, nom from demo.coe>\n",
+ "('B1000', 'FT600') <BranchContainer for B1000, FT600 from demo.coe>\n",
"('B750', 'FT600') <BranchContainer for B750, FT600 from demo.coe>\n",
- "('B1000', 'FT600') <BranchContainer for B1000, FT600 from demo.coe>\n"
+ "('B1000', 'nom') <BranchContainer for B1000, nom from demo.coe>\n"
]
}
],
@@ -511,10 +487,8 @@
},
{
"cell_type": "code",
- "execution_count": 21,
- "metadata": {
- "collapsed": true
- },
+ "execution_count": 33,
+ "metadata": {},
"outputs": [],
"source": [
"import six\n",
@@ -523,33 +497,21 @@
},
{
"cell_type": "code",
- "execution_count": 22,
+ "execution_count": 34,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- "branching.areUncsPresent\n",
- "\t default: False\n",
+ "xs.getInfXS\n",
+ "\t default: True\n",
+ "\t description: If true, store the infinite medium cross sections.\n",
"\t type: <class 'bool'>\n",
- "\t description: True if the values in the .coe file contain uncertainties\n",
- "branching.intVariables\n",
- "\t default: []\n",
- "\t description: Name of state data variables to convert to integers for each branch\n",
- "\t type: <class 'list'>\n",
"branching.floatVariables\n",
"\t default: []\n",
"\t description: Names of state data variables to convert to floats for each branch\n",
"\t type: <class 'list'>\n",
- "xs.getInfXS\n",
- "\t default: True\n",
- "\t description: If true, store the infinite medium cross sections.\n",
- "\t type: <class 'bool'>\n",
- "xs.getB1XS\n",
- "\t default: True\n",
- "\t description: If true, store the critical leakage cross sections.\n",
- "\t type: <class 'bool'>\n",
"xs.variableGroups\n",
"\t default: []\n",
"\t description: Name of variable groups from variables.yaml to be expanded into SERPENT variable to be stored\n",
@@ -557,7 +519,19 @@
"xs.variableExtras\n",
"\t default: []\n",
"\t description: Full SERPENT name of variables to be read\n",
- "\t type: <class 'list'>\n"
+ "\t type: <class 'list'>\n",
+ "branching.intVariables\n",
+ "\t default: []\n",
+ "\t description: Name of state data variables to convert to integers for each branch\n",
+ "\t type: <class 'list'>\n",
+ "branching.areUncsPresent\n",
+ "\t default: False\n",
+ "\t description: True if the values in the .coe file contain uncertainties\n",
+ "\t type: <class 'bool'>\n",
+ "xs.getB1XS\n",
+ "\t default: True\n",
+ "\t description: If true, store the critical leakage cross sections.\n",
+ "\t type: <class 'bool'>\n"
]
}
],
@@ -579,10 +553,8 @@
},
{
"cell_type": "code",
- "execution_count": 23,
- "metadata": {
- "collapsed": true
- },
+ "execution_count": 35,
+ "metadata": {},
"outputs": [],
"source": [
"assert isinstance(b0.stateData['BOR'], str)"
@@ -592,7 +564,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "As demonstrated in the `Settings` example, use of `xs.variableGroups` and `xs.variableExtras` controls what data is stored on the `HomogUniv` objects. By default, all variables present in the coefficient file are stored."
+ "As demonstrated in the [Settings example](https://github.com/CORE-GATECH-GROUP/serpent-tools/blob/master/examples/Settings.ipynb), use of `xs.variableGroups` and `xs.variableExtras` controls what data is stored on the `HomogUniv` objects. By default, all variables present in the coefficient file are stored."
]
},
{
@@ -654,9 +626,7 @@
{
"cell_type": "code",
"execution_count": 39,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"assert isinstance(b1.stateData['BOR'], float)\n",
@@ -736,7 +706,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.6.1"
+ "version": "3.5.2"
}
},
"nbformat": 4,
diff --git a/serpentTools/objects/containers.py b/serpentTools/objects/containers.py
index 745a613..8213dd2 100644
--- a/serpentTools/objects/containers.py
+++ b/serpentTools/objects/containers.py
@@ -4,6 +4,7 @@ Contents
--------
:py:class:`~serpentTools.objects.containers.HomogUniv`
:py:class:`~serpentTools.objects.containers.BranchContainer
+:py:class:`~serpentTools.objects.containers.DetectorBase`
:py:class:`~serpentTools.objects.containers.Detector`
"""
@@ -22,6 +23,17 @@ DET_COLS = ('value', 'energy', 'universe', 'cell', 'material', 'lattice',
"""Name of the columns of the data"""
+__all__ = ('DET_COLS', 'HomogUniv', 'BranchContainer', 'Detector',
+ 'DetectorBase')
+
+
+def isNonNeg(value):
+ """Return true if a value is None or non-negative"""
+ if value is None:
+ return True
+ return value >= 0
+
+
class HomogUniv(NamedObject):
"""
Class for storing homogenized universe specifications and retrieving them
@@ -41,12 +53,12 @@ class HomogUniv(NamedObject):
----------
name: str
name of the universe
- bu: float
- burnup value
- step: float
+ bu: float or int
+ non-negative burnup value
+ step: int
temporal step
- day: float
- depletion day
+ day: float or int
+ non-negative depletion day
infExp: dict
Expected values for infinite medium group constants
infUnc: dict
@@ -57,10 +69,26 @@ class HomogUniv(NamedObject):
Relative uncertainties for leakage-corrected group constants
metadata: dict
Other values that do not not conform to inf/b1 dictionaries
+
+ Raises
+ ------
+ SerpentToolsException:
+ If a negative value of burnup, step, or burnup days is passed
+
"""
def __init__(self, name, bu, step, day):
+ if not all(isNonNeg(xx) for xx in (bu, step, day)):
+ tail = ['{}: {}'.format(name, val)
+ for name, val in zip(('burnup', 'index', 'days'),
+ (bu, step, day))]
+ raise SerpentToolsException(
+ "Will not create universe with negative burnup\n{}"
+ .format(', '.join(tail)))
NamedObject.__init__(self, name)
+ if step is not None and step == 0:
+ bu = bu if bu is not None else 0.0
+ day = day if day is not None else 0.0
self.bu = bu
self.step = step
self.day = day
@@ -71,9 +99,22 @@ class HomogUniv(NamedObject):
self.infUnc = {}
self.metadata = {}
+ def __str__(self):
+ extras = []
+ if self.bu is not None:
+ extras.append('burnup: {:5.3f} MWd/kgu'.format(self.bu))
+ if self.step:
+ extras.append('step: {}'.format(self.step))
+ if self.day is not None:
+ extras.append('{:5.3f} days'.format(self.day))
+ if extras:
+ extras = ': ' + ', '.join(extras)
+ return '<{} {}{}>'.format(self.__class__.__name__, self.name,
+ extras or '')
+
def addData(self, variableName, variableValue, uncertainty=False):
"""
- Sets the value of the variable and, optionally, the associate s.d.
+ sets the value of the variable and, optionally, the associate s.d.
.. warning::
@@ -658,6 +699,8 @@ class BranchContainer(object):
Keys are tuples of
``(universeID, burnup, burnIndex)``
"""
+ __mismatchedBurnup = ("Was not expecting a {} value of burnup. "
+ "Expect burnup in units of {}")
def __init__(self, filePath, branchID, branchNames, stateData):
self.filePath = filePath
@@ -667,6 +710,7 @@ class BranchContainer(object):
self.branchNames = branchNames
self.__orderedUniverses = None
self.__keys = set()
+ self.__hasDays = None
def __str__(self):
return '<BranchContainer for {} from {}>'.format(
@@ -693,8 +737,11 @@ class BranchContainer(object):
Add a universe to this branch.
Data for the universes are produced at specific points in time.
- The additional arguments help track when the data for this
+ The additional arguments help track of when the data for this
universe were created.
+ A negative value of ``burnup`` indicates the units on burnup are
+ really ``days``. Therefore, the value of ``burnDays`` and ``burnup``
+ will be swapped.
.. warning::
@@ -705,7 +752,8 @@ class BranchContainer(object):
univID: int or str
Identifier for this universe
burnup: float or int
- Value of burnup [MWd/kgU]
+ Value of burnup [MWd/kgU]. A negative value here indicates
+ the value is really in units of days.
burnIndex: int
Point in the depletion schedule
burnDays: int or float
@@ -713,11 +761,24 @@ class BranchContainer(object):
Returns
-------
- newUniv: serpentTools.objects.containers.HomogUniv
+ serpentTools.objects.containers.HomogUniv
+ Empty new universe
+
"""
+ if self.__hasDays is None and burnup:
+ self.__hasDays = burnup < 0
+ if burnup < 0:
+ if not self.__hasDays:
+ raise SerpentToolsException(self.__mismatchedBurnup.format(
+ 'negative', 'MWd/kgU'))
+ burnup, burnDays = None if burnup else 0, - burnup
+ else:
+ if self.__hasDays and not burnDays:
+ raise SerpentToolsException(self.__mismatchedBurnup.format(
+ 'positive', 'days'))
+ burnDays = None if burnup else 0
newUniv = HomogUniv(univID, burnup, burnIndex, burnDays)
- key = tuple(
- [univID, burnup, burnIndex] + ([burnDays] if burnDays else []))
+ key = (univID, burnup or burnDays, burnIndex)
if key in self.__keys:
warning('Overwriting existing universe {} in {}'
.format(key, str(self)))
@@ -743,8 +804,8 @@ class BranchContainer(object):
Returns
-------
- univ: serpentTools.objects.containers.HomogUniv
- Requested Universe
+ :py:class:`~serpentTools.objects.containers.HomogUniv`
+ Requested universe
Raises
------
@@ -755,7 +816,7 @@ class BranchContainer(object):
"""
if burnup is None and index is None:
raise SerpentToolsException('Burnup or index are required inputs')
- searchIndex = 2 if index is not None else 1
+ searchIndex = 2 if index is not None else 1
searchValue = index if index is not None else burnup
for key in self.__keys:
if key[0] == univID and key[searchIndex] == searchValue:
@@ -766,3 +827,11 @@ class BranchContainer(object):
raise KeyError(
'Could not find a universe that matched requested universe {} and '
'{} {}'.format(univID, searchName, searchValue))
+
+ @property
+ def hasDays(self):
+ """Returns True if the burnups in the file are in units of days"""
+ if self.__hasDays is None:
+ raise AttributeError("Need to load at least one universe with "
+ "non-zero burnup first.""")
+ return self.__hasDays
diff --git a/serpentTools/parsers/branching.py b/serpentTools/parsers/branching.py
index 795036e..8b18e52 100644
--- a/serpentTools/parsers/branching.py
+++ b/serpentTools/parsers/branching.py
@@ -17,6 +17,10 @@ class BranchingReader(XSReader):
----------
filePath: str
path to the depletion file
+ branches: dict
+ Dictionary of branch names and their corresponding
+ :py:class:`~serpentTools.objects.containers.BranchContainer`
+ objects
"""
def __init__(self, filePath):
@@ -98,7 +102,7 @@ class BranchingReader(XSReader):
def _processBranchUniverses(self, branch, burnup, burnupIndex):
"""Add universe data to this branch at this burnup."""
unvID, numVariables = [int(xx) for xx in self._advance()]
- univ = branch.addUniverse(unvID, burnup, burnupIndex)
+ univ = branch.addUniverse(unvID, burnup, burnupIndex - 1)
for step in range(numVariables):
splitList = self._advance(
possibleEndOfFile=step == numVariables - 1)
| Universes from branching file can be stored with negative burnup
## Summary of issue
The burnup values in coefficient files can be negative, indicating units of days, not MWd/kgU
[Link to SERPENT coef card](http://serpent.vtt.fi/mediawiki/index.php/Input_syntax_manual#coef_.28coefficient_matrix_definition.29). This leads to `HomogUniv` objects being created with negative values of burnup, which is _slightly_ nonsensical.
## Code for reproducing the issue
Read in a coefficient file that has negative values of burnup.
## Actual outcome including console output and error traceback if applicable
For some branchContainer `b` in the branching reader:
```
>>> b.universes.keys()
dict_keys([(3102, 0.0, 1), (3102, -200.0, 6), (3102, -328.5, 9), (3102, -246.375, 7), (3102, -280.0, 8), (3102, -164.25, 5), (3102, -82.125, 3), (3102, -120.0, 4), (3102, -40.0, 2)])
>>> univ = b.universes[(3102, -328.5, 9)]
>>> univ.bu
-328.5
>>> univ.day
0
```
## Expected outcome
Universes with non-negative burnup.
Maybe implement a better method for retrieving universes given values of days or burnup.
## Versions
* ``serpentTools.__version__`` 0.2.1+12.g68709f6 | CORE-GATECH-GROUP/serpent-tools | diff --git a/serpentTools/tests/test_branching.py b/serpentTools/tests/test_branching.py
index 8bfd381..be967f5 100644
--- a/serpentTools/tests/test_branching.py
+++ b/serpentTools/tests/test_branching.py
@@ -10,6 +10,7 @@ from numpy.testing import assert_allclose
from serpentTools.settings import rc
from serpentTools.tests import TEST_ROOT
from serpentTools.parsers import BranchingReader
+from serpentTools.objects.containers import BranchContainer
from serpentTools.messages import SerpentToolsException
@@ -22,7 +23,7 @@ class _BranchTesterHelper(unittest.TestCase):
cls.expectedBranches = {('nom', 'nom', 'nom')}
cls.expectedUniverses = {
# universe id, burnup, step
- (0, 0, 1),
+ (0, 0, 0),
}
with rc:
rc['serpentVersion'] = '2.1.29'
@@ -76,7 +77,7 @@ class BranchContainerTester(_BranchTesterHelper):
def setUpClass(cls):
_BranchTesterHelper.setUpClass()
cls.refBranchID = ('nom', 'nom', 'nom')
- cls.refUnivKey = (0, 0, 1)
+ cls.refUnivKey = (0, 0, 0)
cls.refBranch = cls.reader.branches[cls.refBranchID]
cls.refUniv = cls.refBranch.universes[cls.refUnivKey]
@@ -108,6 +109,18 @@ class BranchContainerTester(_BranchTesterHelper):
with self.assertRaises(SerpentToolsException):
self.refBranch.getUniv(key[0])
+ def test_cannotAddBurnupDays(self):
+ """
+ Verify that a universe cannot be added with burnup of opposite units.
+ """
+ containerWithBU = BranchContainer(None, None, None, None)
+ containerWithBU.addUniverse(101, 10, 1)
+ containerWithDays = BranchContainer(None, None, None, None)
+ containerWithDays.addUniverse(101, -10, 1)
+ with self.assertRaises(SerpentToolsException):
+ containerWithBU.addUniverse(101, -10, 1)
+ with self.assertRaises(SerpentToolsException):
+ containerWithDays.addUniverse(101, 10, 1)
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 4
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "numpy>=1.11.1 matplotlib>=1.5.0 pyyaml>=3.08 scipy",
"pip_packages": [
"pytest",
"jupyter",
"nbconvert"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | anyio==3.6.2
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
comm==0.1.4
contextvars==2.4
cycler @ file:///tmp/build/80754af9/cycler_1637851556182/work
dataclasses==0.8
decorator==5.1.1
defusedxml==0.7.1
entrypoints==0.4
idna==3.10
immutables==0.19
importlib-metadata==4.8.3
iniconfig==1.1.1
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
ipywidgets==7.8.5
jedi==0.17.2
Jinja2==3.0.3
json5==0.9.16
jsonschema==3.2.0
jupyter==1.1.1
jupyter-client==7.1.2
jupyter-console==6.4.3
jupyter-core==4.9.2
jupyter-server==1.13.1
jupyterlab==3.2.9
jupyterlab-pygments==0.1.2
jupyterlab-server==2.10.3
jupyterlab_widgets==1.1.11
kiwisolver @ file:///tmp/build/80754af9/kiwisolver_1612282412546/work
MarkupSafe==2.0.1
matplotlib @ file:///tmp/build/80754af9/matplotlib-suite_1613407855456/work
mistune==0.8.4
nbclassic==0.3.5
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
nest-asyncio==1.6.0
notebook==6.4.10
numpy @ file:///tmp/build/80754af9/numpy_and_numpy_base_1603483703303/work
olefile @ file:///Users/ktietz/demo/mc3/conda-bld/olefile_1629805411829/work
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
Pillow @ file:///tmp/build/80754af9/pillow_1625670622947/work
pluggy==1.0.0
prometheus-client==0.17.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pycparser==2.21
Pygments==2.14.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pyrsistent==0.18.0
pytest==7.0.1
python-dateutil @ file:///tmp/build/80754af9/python-dateutil_1626374649649/work
pytz==2025.2
PyYAML==5.4.1
pyzmq==25.1.2
requests==2.27.1
scipy @ file:///tmp/build/80754af9/scipy_1597686635649/work
Send2Trash==1.8.3
-e git+https://github.com/CORE-GATECH-GROUP/serpent-tools.git@433d55710b85805c9dee609ad77f81bc3c202b72#egg=serpentTools
six @ file:///tmp/build/80754af9/six_1644875935023/work
sniffio==1.2.0
terminado==0.12.1
testpath==0.6.0
tomli==1.2.3
tornado @ file:///tmp/build/80754af9/tornado_1606942266872/work
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
websocket-client==1.3.1
widgetsnbextension==3.6.10
zipp==3.6.0
| name: serpent-tools
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- blas=1.0=openblas
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- cycler=0.11.0=pyhd3eb1b0_0
- dbus=1.13.18=hb2f20db_0
- expat=2.6.4=h6a678d5_0
- fontconfig=2.14.1=h52c9d5c_1
- freetype=2.12.1=h4a9f257_0
- giflib=5.2.2=h5eee18b_0
- glib=2.69.1=h4ff587b_1
- gst-plugins-base=1.14.1=h6a678d5_1
- gstreamer=1.14.1=h5eee18b_1
- icu=58.2=he6710b0_3
- jpeg=9e=h5eee18b_3
- kiwisolver=1.3.1=py36h2531618_0
- lcms2=2.16=hb9589c4_0
- ld_impl_linux-64=2.40=h12ee557_0
- lerc=4.0.0=h6a678d5_0
- libdeflate=1.22=h5eee18b_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=7.5.0=ha8ba4b0_17
- libgfortran4=7.5.0=ha8ba4b0_17
- libgomp=11.2.0=h1234567_1
- libopenblas=0.3.18=hf726d26_0
- libpng=1.6.39=h5eee18b_0
- libstdcxx-ng=11.2.0=h1234567_1
- libtiff=4.5.1=hffd6297_1
- libuuid=1.41.5=h5eee18b_0
- libwebp=1.2.4=h11a3e52_1
- libwebp-base=1.2.4=h5eee18b_1
- libxcb=1.15=h7f8727e_0
- libxml2=2.9.14=h74e7548_0
- lz4-c=1.9.4=h6a678d5_1
- matplotlib=3.3.4=py36h06a4308_0
- matplotlib-base=3.3.4=py36h62a2d02_0
- ncurses=6.4=h6a678d5_0
- numpy=1.19.2=py36h6163131_0
- numpy-base=1.19.2=py36h75fe3a5_0
- olefile=0.46=pyhd3eb1b0_0
- openssl=1.1.1w=h7f8727e_0
- pcre=8.45=h295c915_0
- pillow=8.3.1=py36h5aabda8_0
- pip=21.2.2=py36h06a4308_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pyqt=5.9.2=py36h05f1152_2
- python=3.6.13=h12debd9_1
- python-dateutil=2.8.2=pyhd3eb1b0_0
- pyyaml=5.4.1=py36h27cfd23_1
- qt=5.9.7=h5867ecd_1
- readline=8.2=h5eee18b_0
- scipy=1.5.2=py36habc2bb6_0
- setuptools=58.0.4=py36h06a4308_0
- sip=4.19.8=py36hf484d3e_0
- six=1.16.0=pyhd3eb1b0_1
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tornado=6.1=py36h27cfd23_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- yaml=0.2.5=h7b6447c_0
- zlib=1.2.13=h5eee18b_1
- zstd=1.5.6=hc292b87_0
- pip:
- anyio==3.6.2
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- comm==0.1.4
- contextvars==2.4
- dataclasses==0.8
- decorator==5.1.1
- defusedxml==0.7.1
- entrypoints==0.4
- idna==3.10
- immutables==0.19
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- jedi==0.17.2
- jinja2==3.0.3
- json5==0.9.16
- jsonschema==3.2.0
- jupyter==1.1.1
- jupyter-client==7.1.2
- jupyter-console==6.4.3
- jupyter-core==4.9.2
- jupyter-server==1.13.1
- jupyterlab==3.2.9
- jupyterlab-pygments==0.1.2
- jupyterlab-server==2.10.3
- jupyterlab-widgets==1.1.11
- markupsafe==2.0.1
- mistune==0.8.4
- nbclassic==0.3.5
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nest-asyncio==1.6.0
- notebook==6.4.10
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prometheus-client==0.17.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pycparser==2.21
- pygments==2.14.0
- pyrsistent==0.18.0
- pytest==7.0.1
- pytz==2025.2
- pyzmq==25.1.2
- requests==2.27.1
- send2trash==1.8.3
- sniffio==1.2.0
- terminado==0.12.1
- testpath==0.6.0
- tomli==1.2.3
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- websocket-client==1.3.1
- widgetsnbextension==3.6.10
- zipp==3.6.0
prefix: /opt/conda/envs/serpent-tools
| [
"serpentTools/tests/test_branching.py::BranchTester::test_branchingUniverses",
"serpentTools/tests/test_branching.py::BranchContainerTester::test_cannotAddBurnupDays",
"serpentTools/tests/test_branching.py::BranchContainerTester::test_containerGetUniv",
"serpentTools/tests/test_branching.py::BranchContainerTester::test_loadedUniv"
]
| []
| [
"serpentTools/tests/test_branching.py::BranchTester::test_raiseError",
"serpentTools/tests/test_branching.py::BranchTester::test_variables"
]
| []
| MIT License | 2,326 | [
"serpentTools/parsers/branching.py",
"serpentTools/objects/containers.py",
"examples/Branching.ipynb",
"docs/examples/Branching.rst"
]
| [
"serpentTools/parsers/branching.py",
"serpentTools/objects/containers.py",
"examples/Branching.ipynb",
"docs/examples/Branching.rst"
]
|
|
desihub__desiutil-105 | 2ca867f079c18629bde67dd21c1d64642e75d115 | 2018-03-23 20:05:41 | e720a1e3b4e468af23e59ab618a3b2aaef383ae0 | diff --git a/doc/changes.rst b/doc/changes.rst
index ecd868d..400976d 100644
--- a/doc/changes.rst
+++ b/doc/changes.rst
@@ -7,8 +7,11 @@ Change Log
* Remove support for :command:`desiInstall` in environments other than
NERSC (PR `#101`_).
+* Try as best as possible that Python executable scripts are installed with
+ an explicit desiconda version (PR `#105`_).
.. _`#101`: https://github.com/desihub/desiutil/pull/101
+.. _`#105`: https://github.com/desihub/desiutil/pull/105
1.9.9 (2017-12-20)
------------------
diff --git a/py/desiutil/data/desiutil.module b/py/desiutil/data/desiutil.module
new file mode 100644
index 0000000..8d43cb0
--- /dev/null
+++ b/py/desiutil/data/desiutil.module
@@ -0,0 +1,77 @@
+#%Module1.0
+# The first line of this file tells Modules that this is a module file.
+# DO NOT ALTER IT!
+#
+# ABOUT THIS FILE
+#
+# This file is designed to be processed by Python. Specifically, this file
+# will be read into a string, and the .format() method will be applied to it.
+# This file is not a valid module file on its own.
+#
+# METADATA AND DOCUMENTATION SECTION
+#
+# This function is part of the Modules help system. You can modify
+# the second line if needed, but most products should
+# leave this alone.
+#
+proc ModulesHelp {{ }} {{
+ global product version
+ puts stderr "This module adds $product/$version to your environment."
+}}
+#
+# These variables are used below. The product variable should be set to
+# the name of the product and never changed. The version variable will
+# be set at install time, so it should be left alone. The conflict line
+# prevents multiple versions from being loaded simultaneously. Do not
+# change it.
+#
+set product {name}
+set version {version}
+conflict $product
+#
+# The line below is another part of the Modules help system. You can
+# modify the part in quotes if you really need to, but most products should
+# leave this alone.
+#
+module-whatis "Sets up $product/$version in your environment."
+#
+# DEPENDENCIES SECTION
+#
+# If your product requires other software to function, that should be declared
+# here. There are two types of dependencies: mandatory and optional.
+# A mandatory dependency is a module load command followed by a prereq
+# command. An optional dependency is not followed by a prereq statement.
+#
+# NO DEPENDENCIES
+#
+# ENVIRONMENT SECTION
+#
+# The PRODUCT_ROOT and PRODUCT_DIR variables are used to set other
+# environment variables but are not exported to the actual environment.
+# If you are not working at NERSC, but still want to use Modules, you
+# will need to set the DESI_PRODUCT_ROOT environment variable
+#
+if {{[info exists env(DESI_PRODUCT_ROOT)]}} {{
+ set code_root $env(DESI_PRODUCT_ROOT)/code
+}} else {{
+ set code_root {product_root}
+}}
+set PRODUCT_DIR $code_root/$product/$version
+#
+# This line creates an environment variable pointing to the install
+# directory of your product.
+#
+setenv [string toupper $product] $PRODUCT_DIR
+#
+# The lines below set various other environment variables. They assume the
+# template product layout. These will be set or commented as needed by the
+# desiInstall script.
+#
+{needs_bin}prepend-path PATH $PRODUCT_DIR/bin
+{needs_python}prepend-path PYTHONPATH $PRODUCT_DIR/lib/{pyversion}/site-packages
+{needs_trunk_py}prepend-path PYTHONPATH $PRODUCT_DIR{trunk_py_dir}
+{needs_ld_lib}prepend-path LD_LIBRARY_PATH $PRODUCT_DIR/lib
+{needs_idl}prepend-path IDL_PATH +$PRODUCT_DIR/pro
+#
+# Add any non-standard Module code below this point.
+#
diff --git a/py/desiutil/install.py b/py/desiutil/install.py
index 29b0000..3640bd5 100644
--- a/py/desiutil/install.py
+++ b/py/desiutil/install.py
@@ -10,16 +10,15 @@ This package contains code for installing DESI software products.
from __future__ import (absolute_import, division,
print_function, unicode_literals)
# The line above will help with 2to3 support.
-from sys import argv, executable, path, version_info
-import requests
+import os
+import sys
import tarfile
import re
+import shutil
+import requests
from subprocess import Popen, PIPE
-from datetime import date
from types import MethodType
-from os import chdir, environ, getcwd, makedirs, remove, symlink
-from os.path import abspath, basename, exists, isdir, join
-from shutil import copytree, rmtree
+from pkg_resources import resource_filename
from .git import last_tag
from .log import get_logger, DEBUG, INFO
from .modules import (init_modules, configure_module,
@@ -104,7 +103,7 @@ def dependencies(modulefile):
ValueError
If `modulefile` can't be found.
"""
- if not exists(modulefile):
+ if not os.path.exists(modulefile):
raise ValueError("Modulefile {0} does not exist!".format(modulefile))
with open(modulefile) as m:
lines = m.readlines()
@@ -133,8 +132,6 @@ class DesiInstall(object):
this holds the object that reads it.
default_nersc_dir_template : :class:`str`
The default code and Modules install directory for every NERSC host.
- executable : :class:`str`
- The command used to invoke the script.
fullproduct : :class:`str`
The path to the product including its URL, *e.g.*,
"https://github.com/desihub/desiutil".
@@ -153,8 +150,6 @@ class DesiInstall(object):
product_url : :class:`str`
The URL that will be used to download the code. This differs from
`fullproduct` in that it includes the tag or branch name.
- test : :class:`bool`
- Captures the value of the `test` argument passed to the constructor.
"""
default_nersc_dir_template = '/global/common/software/desi/{nersc_host}/desiconda/{desiconda_version}'
@@ -190,12 +185,12 @@ class DesiInstall(object):
'NERSC_HOST': None}
for e in check_env:
try:
- check_env[e] = environ[e]
+ check_env[e] = os.environ[e]
except KeyError:
self.log.warning('The environment variable %s is not set!',
e)
parser = ArgumentParser(description="Install DESI software.",
- prog=basename(argv[0]))
+ prog=os.path.basename(sys.argv[0]))
parser.add_argument('-a', '--anaconda', action='store', dest='anaconda',
default=self.anaconda_version(), metavar='VERSION',
help="Set the version of the DESI+Anaconda software stack.")
@@ -303,7 +298,7 @@ class DesiInstall(object):
self.log.critical(message)
raise DesiInstallException(message)
if (self.options.moduleshome is None or
- not isdir(self.options.moduleshome)):
+ not os.path.isdir(self.options.moduleshome)):
message = "You do not appear to have Modules set up."
self.log.critical(message)
raise DesiInstallException(message)
@@ -327,7 +322,7 @@ class DesiInstall(object):
for name, value in self.config.items("Known Products"):
known_products[name] = value
if '/' in self.options.product:
- self.baseproduct = basename(self.options.product)
+ self.baseproduct = os.path.basename(self.options.product)
else:
self.baseproduct = self.options.product
try:
@@ -339,7 +334,7 @@ class DesiInstall(object):
self.baseproduct, self.fullproduct)
self.log.warning('Add location to desiutil.install.known_products ' +
'if that is incorrect.')
- self.baseversion = basename(self.options.product_version)
+ self.baseversion = os.path.basename(self.options.product_version)
self.github = False
if 'github.com' in self.fullproduct:
self.github = True
@@ -362,16 +357,16 @@ class DesiInstall(object):
if self.github:
self.product_url = self.fullproduct + '.git'
else:
- self.product_url = join(self.fullproduct,
- self.options.product_version)
+ self.product_url = os.path.join(self.fullproduct,
+ self.options.product_version)
else:
if self.github:
- self.product_url = join(self.fullproduct, 'archive',
- self.options.product_version +
- '.tar.gz')
+ self.product_url = os.path.join(self.fullproduct, 'archive',
+ self.options.product_version +
+ '.tar.gz')
else:
- self.product_url = join(self.fullproduct, 'tags',
- self.options.product_version)
+ self.product_url = os.path.join(self.fullproduct, 'tags',
+ self.options.product_version)
self.log.debug("Using %s as the URL of this product.",
self.product_url)
return self.product_url
@@ -429,20 +424,21 @@ class DesiInstall(object):
DesiInstallException
If any download errors are detected.
"""
- self.working_dir = join(abspath('.'), '{0}-{1}'.format(
- self.baseproduct, self.baseversion))
- if isdir(self.working_dir):
+ self.working_dir = os.path.join(os.path.abspath('.'),
+ '{0}-{1}'.format(self.baseproduct,
+ self.baseversion))
+ if os.path.isdir(self.working_dir):
self.log.info("Detected old working directory, %s. Deleting...",
self.working_dir)
- self.log.debug("rmtree('%s')", self.working_dir)
+ self.log.debug("shutil.rmtree('%s')", self.working_dir)
if not self.options.test:
- rmtree(self.working_dir)
+ shutil.rmtree(self.working_dir)
if self.github:
if self.is_trunk or self.is_branch:
if self.is_branch:
try:
- r = requests.get(join(self.fullproduct, 'tree',
- self.baseversion))
+ r = requests.get(os.path.join(self.fullproduct, 'tree',
+ self.baseversion))
r.raise_for_status()
except requests.exceptions.HTTPError:
message = ("Branch {0} does not appear to exist. " +
@@ -466,10 +462,10 @@ class DesiInstall(object):
self.log.critical(message)
raise DesiInstallException(message)
if self.is_branch:
- original_dir = getcwd()
- self.log.debug("chdir('%s')", self.working_dir)
+ original_dir = os.getcwd()
+ self.log.debug("os.chdir('%s')", self.working_dir)
if not self.options.test:
- chdir(self.working_dir)
+ os.chdir(self.working_dir)
command = ['git', 'checkout', '-q', '-b', self.baseversion,
'origin/'+self.baseversion]
self.log.debug(' '.join(command))
@@ -485,9 +481,9 @@ class DesiInstall(object):
" {0}".format(err))
self.log.critical(message)
raise DesiInstallException(message)
- self.log.debug("chdir('%s')", original_dir)
+ self.log.debug("os.chdir('%s')", original_dir)
if not self.options.test:
- chdir(original_dir)
+ os.chdir(original_dir)
else:
if self.options.test:
self.log.debug("Test Mode. Skipping download of %s.",
@@ -508,12 +504,14 @@ class DesiInstall(object):
tf.extractall()
tf.close()
tgz.close()
- self.working_dir = join(abspath('.'), '{0}-{1}'.format(
- self.baseproduct, self.baseversion))
+ self.working_dir = os.path.join(os.path.abspath('.'),
+ '{0}-{1}'.format(self.baseproduct,
+ self.baseversion))
if self.baseversion.startswith('v'):
- nov = join(abspath('.'), '{0}-{1}'.format(
- self.baseproduct, self.baseversion[1:]))
- if exists(nov):
+ nov = os.path.join(os.path.abspath('.'),
+ '{0}-{1}'.format(self.baseproduct,
+ self.baseversion[1:]))
+ if os.path.exists(nov):
self.working_dir = nov
except tarfile.TarError as e:
message = "tar error while expanding product code!"
@@ -556,14 +554,14 @@ class DesiInstall(object):
self.log.debug("Forcing build type: make")
build_type.add('make')
else:
- if exists(join(self.working_dir, 'setup.py')):
+ if os.path.exists(os.path.join(self.working_dir, 'setup.py')):
self.log.debug("Detected build type: py")
build_type.add('py')
- if exists(join(self.working_dir, 'Makefile')):
+ if os.path.exists(os.path.join(self.working_dir, 'Makefile')):
self.log.debug("Detected build type: make")
build_type.add('make')
else:
- if isdir(join(self.working_dir, 'src')):
+ if os.path.isdir(os.path.join(self.working_dir, 'src')):
self.log.debug("Detected build type: src")
build_type.add('src')
return build_type
@@ -578,11 +576,11 @@ class DesiInstall(object):
The DESI+Anaconda version.
"""
try:
- desiconda = environ['DESICONDA']
+ desiconda = os.environ['DESICONDA']
except KeyError:
return 'current'
try:
- return basename(desiconda[:desiconda.index('/code/desiconda')])
+ return os.path.basename(desiconda[:desiconda.index('/conda')])
except ValueError:
return 'current'
@@ -614,23 +612,23 @@ class DesiInstall(object):
The directory selected for installation.
"""
try:
- self.nersc = environ['NERSC_HOST']
+ self.nersc = os.environ['NERSC_HOST']
except KeyError:
self.nersc = None
- if self.options.root is None or not isdir(self.options.root):
+ if self.options.root is None or not os.path.isdir(self.options.root):
if self.nersc is not None:
self.options.root = self.default_nersc_dir()
else:
message = "Root install directory is missing or not set."
self.log.critical(message)
raise DesiInstallException(message)
- self.install_dir = join(self.options.root, 'code', self.baseproduct,
- self.baseversion)
- if isdir(self.install_dir) and not self.options.test:
+ self.install_dir = os.path.join(self.options.root, 'code',
+ self.baseproduct, self.baseversion)
+ if os.path.isdir(self.install_dir) and not self.options.test:
if self.options.force:
- self.log.debug("rmtree('%s')", self.install_dir)
+ self.log.debug("shutil.rmtree('%s')", self.install_dir)
if not self.options.test:
- rmtree(self.install_dir)
+ shutil.rmtree(self.install_dir)
else:
message = ("Install directory, {0}, already exists!".format(
self.install_dir))
@@ -668,17 +666,11 @@ class DesiInstall(object):
:class:`list`
The list of dependencies.
"""
- self.module_file = join(self.working_dir, 'etc',
- self.baseproduct + '.module')
- if not exists(self.module_file):
- try:
- self.module_file = join(environ['DESIUTIL'], 'etc',
- 'desiutil.module')
- except KeyError:
- message = ("DESIUTIL is not set. " +
- "Is desiutil installed and loaded?")
- self.log.critical(message)
- raise DesiInstallException(message)
+ self.module_file = os.path.join(self.working_dir, 'etc',
+ self.baseproduct + '.module')
+ if not os.path.exists(self.module_file):
+ self.module_file = resource_filename('desiutil',
+ 'data/desiutil.module')
if self.options.test:
self.log.debug('Test Mode. Skipping loading of dependencies.')
self.deps = list()
@@ -686,7 +678,7 @@ class DesiInstall(object):
self.deps = dependencies(self.module_file)
for d in self.deps:
base_d = d.split('/')[0]
- if base_d in environ['LOADEDMODULES']:
+ if base_d in os.environ['LOADEDMODULES']:
m_command = 'switch'
else:
m_command = 'load'
@@ -704,11 +696,11 @@ class DesiInstall(object):
return None
else:
if self.baseproduct == 'desimodules':
- nersc_module = join(self.default_nersc_dir_template.format(nersc_host=self.nersc, desiconda_version='startup'),
- 'modulefiles')
+ nersc_module = os.path.join(self.default_nersc_dir_template.format(nersc_host=self.nersc, desiconda_version='startup'),
+ 'modulefiles')
else:
- nersc_module = join(self.default_nersc_dir(),
- 'modulefiles')
+ nersc_module = os.path.join(self.default_nersc_dir(),
+ 'modulefiles')
if not hasattr(self, 'config'):
return nersc_module
if self.config is not None:
@@ -735,14 +727,14 @@ class DesiInstall(object):
if self.is_trunk or self.is_branch:
dev = True
else:
- if isdir(join(self.working_dir, 'py')):
+ if os.path.isdir(os.path.join(self.working_dir, 'py')):
dev = True
self.log.debug("configure_module(%s, %s, working_dir=%s, dev=%s)",
self.baseproduct, self.baseversion,
self.working_dir, dev)
self.module_keywords = configure_module(self.baseproduct,
self.baseversion,
- join(self.options.root, 'code'),
+ os.path.join(self.options.root, 'code'),
working_dir=self.working_dir,
dev=dev)
if self.options.moduledir is None:
@@ -750,18 +742,18 @@ class DesiInstall(object):
# We didn't set a module dir, so derive it from options.root
#
if self.nersc is None:
- self.options.moduledir = join(self.options.root, 'modulefiles')
+ self.options.moduledir = os.path.join(self.options.root, 'modulefiles')
else:
self.options.moduledir = self.nersc_module_dir
self.log.debug("nersc_module_dir set to %s.",
self.options.moduledir)
if not self.options.test:
- if not isdir(self.options.moduledir):
+ if not os.path.isdir(self.options.moduledir):
self.log.info("Creating Modules directory %s.",
self.options.moduledir)
- self.log.debug("makedirs('%s')", self.options.moduledir)
+ self.log.debug("os.makedirs('%s')", self.options.moduledir)
try:
- makedirs(self.options.moduledir)
+ os.makedirs(self.options.moduledir)
except OSError as ose:
self.log.critical(ose.strerror)
raise DesiInstallException(ose.strerror)
@@ -793,12 +785,12 @@ class DesiInstall(object):
:class:`str`
The current working directory. Because we're about to change it.
"""
- environ['WORKING_DIR'] = self.working_dir
- environ['INSTALL_DIR'] = self.install_dir
+ os.environ['WORKING_DIR'] = self.working_dir
+ os.environ['INSTALL_DIR'] = self.install_dir
if self.baseproduct == 'desiutil':
- environ['DESIUTIL'] = self.install_dir
+ os.environ['DESIUTIL'] = self.install_dir
else:
- if self.baseproduct in environ['LOADEDMODULES']:
+ if self.baseproduct in os.environ['LOADEDMODULES']:
m_command = 'switch'
else:
m_command = 'load'
@@ -809,9 +801,9 @@ class DesiInstall(object):
env_version = self.baseproduct.upper() + '_VERSION'
# The current install script expects a version in the form of
# branches/test-0.4 or tags/0.4.4 or trunk
- if env_version not in environ:
- environ[env_version] = 'tags/'+self.baseversion
- self.original_dir = getcwd()
+ if env_version not in os.environ:
+ os.environ[env_version] = 'tags/'+self.baseversion
+ self.original_dir = os.getcwd()
return self.original_dir
def get_extra(self):
@@ -819,19 +811,14 @@ class DesiInstall(object):
This is done here so that :envvar:`INSTALL_DIR` is defined.
"""
- extra_script = join(self.working_dir, 'etc',
- '{0}_data.sh'.format(self.baseproduct))
- if exists(extra_script):
+ extra_script = os.path.join(self.working_dir, 'etc',
+ '{0}_data.sh'.format(self.baseproduct))
+ if os.path.exists(extra_script):
self.log.debug("Detected extra script: %s.", extra_script)
- self.log.debug("makedirs('%s')", self.install_dir)
+ self.log.debug("os.makedirs('%s')", self.install_dir)
if self.options.test:
self.log.debug('Test Mode. Skipping install of extra data.')
else:
- try:
- makedirs(self.install_dir)
- except OSError as ose:
- self.log.critical(ose.strerror)
- raise DesiInstallException(ose.strerror)
proc = Popen([extra_script], universal_newlines=True,
stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
@@ -851,55 +838,55 @@ class DesiInstall(object):
# For certain installs, all that is needed is to copy the
# downloaded code to the install directory.
#
- self.log.debug("copytree('%s', '%s')",
+ self.log.debug("shutil.copytree('%s', '%s')",
self.working_dir, self.install_dir)
if self.options.test:
self.log.debug("Test mode. Skipping copy of %s to %s.",
self.working_dir, self.install_dir)
else:
- copytree(self.working_dir, self.install_dir)
+ shutil.copytree(self.working_dir, self.install_dir)
else:
#
# Run a 'real' install
#
- # chdir(self.working_dir)
+ # os.chdir(self.working_dir)
if 'py' in self.build_type:
#
# For Python installs, a site-packages directory needs to
# exist. We may need to manipulate sys.path to include this
# directory.
#
- lib_dir = join(self.install_dir, 'lib',
- self.module_keywords['pyversion'],
- 'site-packages')
+ lib_dir = os.path.join(self.install_dir, 'lib',
+ self.module_keywords['pyversion'],
+ 'site-packages')
if self.options.test:
self.log.debug("Test Mode. Skipping creation of %s.",
lib_dir)
else:
- self.log.debug("makedirs('%s')", lib_dir)
+ self.log.debug("os.makedirs('%s')", lib_dir)
try:
- makedirs(lib_dir)
+ os.makedirs(lib_dir)
except OSError as ose:
self.log.critical(ose.strerror)
raise DesiInstallException(ose.strerror)
- if lib_dir not in path:
+ if lib_dir not in sys.path:
try:
newpythonpath = (lib_dir + ':' +
- environ['PYTHONPATH'])
+ os.environ['PYTHONPATH'])
except KeyError:
newpythonpath = lib_dir
- environ['PYTHONPATH'] = newpythonpath
- path.insert(int(path[0] == ''), lib_dir)
+ os.environ['PYTHONPATH'] = newpythonpath
+ sys.path.insert(int(sys.path[0] == ''), lib_dir)
#
# Ready to python setup.py
#
- command = [executable, 'setup.py', 'install',
+ command = [sys.executable, 'setup.py', 'install',
'--prefix={0}'.format(self.install_dir)]
self.log.debug(' '.join(command))
if self.options.test:
self.log.debug("Test Mode. Skipping 'python setup.py install'.")
else:
- chdir(self.working_dir)
+ os.chdir(self.working_dir)
proc = Popen(command, universal_newlines=True,
stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
@@ -941,9 +928,9 @@ class DesiInstall(object):
self.log.debug("Test Mode. Skipping 'make install'.")
else:
if 'src' in self.build_type:
- chdir(self.install_dir)
+ os.chdir(self.install_dir)
else:
- chdir(self.working_dir)
+ os.chdir(self.working_dir)
proc = Popen(command, universal_newlines=True,
stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
@@ -964,6 +951,31 @@ class DesiInstall(object):
raise DesiInstallException(message)
return
+ def verify_bootstrap(self):
+ """Make sure that desiutil/desiInstall was installed with
+ an explicit Python executable path.
+
+ For anything besides an initial bootstrap install of desiutil,
+ this function does nothing.
+
+ Returns
+ -------
+ :class:`bool`
+ Returns ``True`` if everything is OK.
+ """
+ if self.options.bootstrap:
+ desiInstall = os.path.join(self.install_dir, 'bin', 'desiInstall')
+ with open(desiInstall, 'r') as d:
+ lines = d.readlines()
+ self.log.debug("%s", lines[0].strip())
+ if self.options.anaconda not in lines[0]:
+ message = ("desiInstall executable ({0}) does not contain " +
+ "an explicit desiconda version " +
+ "({1})!").format(desiInstall, self.options.anaconda)
+ self.log.critical(message)
+ raise DesiInstallException(message)
+ return True
+
def permissions(self):
"""Fix possible install permission errors.
@@ -994,13 +1006,13 @@ class DesiInstall(object):
:class:`bool`
Returns ``True``
"""
- self.log.debug("chdir('%s')", self.original_dir)
+ self.log.debug("os.chdir('%s')", self.original_dir)
if not self.options.test:
- chdir(self.original_dir)
+ os.chdir(self.original_dir)
if not self.options.keep:
- self.log.debug("rmtree('%s')", self.working_dir)
+ self.log.debug("shutil.rmtree('%s')", self.working_dir)
if not self.options.test:
- rmtree(self.working_dir)
+ shutil.rmtree(self.working_dir)
return True
def run(self): # pragma: no cover
@@ -1026,6 +1038,7 @@ class DesiInstall(object):
self.prepare_environment()
self.get_extra()
self.install()
+ self.verify_bootstrap()
self.permissions()
except DesiInstallException:
return 1
| installed code shouldn't reference desiconda/current
A path problem for the 18.2 install, which we need to fix for 18.3:
```
[cori10 ~] source /project/projectdirs/desi/software/desi_environment_18.2.sh
[cori10 ~] echo $DESICONDA
/global/common/software/desi/cori/desiconda/20180130-1.2.4-spec/conda
[cori10 ~] which desi_extract_spectra
/global/common/software/desi/cori/desiconda/current/code/desispec/0.18.0/bin/desi_extract_spectra
[cori10 ~] head $(which desi_extract_spectra )
#!/global/common/software/desi/cori/desiconda/current/conda/bin/python
# EASY-INSTALL-SCRIPT: 'desispec==0.18.0','desi_extract_spectra'
__requires__ = 'desispec==0.18.0'
__import__('pkg_resources').run_script('desispec==0.18.0', 'desi_extract_spectra')
```
Note that those are in terms of desiconda/current instead of desiconda/20180130-1.2.4-spec . Right now those are the same, but when we update which version of desiconda is "current" it will change the underlying desiconda version used when someone configures 18.2. For 18.3, we should make sure that all the paths use desiconda/20180130-1.2.4-spec and don't rely upon "current".
As a procedure, I hope this is as simple as configuring desiconda/20180130-1.2.4-spec instead of desiconda/current before proceeding with installations. It would be extra helpful if desiInstall could resolve symlinks into their final non-symlinked path; unfortunately `os.path.abspath` doesn't do that.
| desihub/desiutil | diff --git a/py/desiutil/test/test_install.py b/py/desiutil/test/test_install.py
index 54b5487..8f78efb 100644
--- a/py/desiutil/test/test_install.py
+++ b/py/desiutil/test/test_install.py
@@ -266,10 +266,10 @@ class TestInstall(unittest.TestCase):
with patch.dict('os.environ', {'DESICONDA': 'FOO'}):
v = self.desiInstall.anaconda_version()
self.assertEqual(v, 'current')
- environ['DESICONDA'] = '/global/common/software/desi/cori/desiconda/20170613-1.1.4-spectro/code/desiconda/20170613-1.1.4-spectro_conda'
+ environ['DESICONDA'] = '/global/common/software/desi/cori/desiconda/20170613-1.1.4-spectro/conda'
v = self.desiInstall.anaconda_version()
self.assertEqual(v, '20170613-1.1.4-spectro')
- environ['DESICONDA'] = '/global/common/software/desi/cori/desiconda/20170613-1.1.4-spectro/CODE/desiconda/20170613-1.1.4-spectro_conda'
+ environ['DESICONDA'] = '/global/common/software/desi/cori/desiconda/20170613-1.1.4-spectro/code/desiconda/20170613-1.1.4-spectro_conda'
v = self.desiInstall.anaconda_version()
self.assertEqual(v, 'current')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 2
} | 1.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astropy==6.0.1
astropy-iers-data==0.2025.3.31.0.36.18
certifi==2025.1.31
charset-normalizer==3.4.1
contourpy==1.3.0
cycler==0.12.1
-e git+https://github.com/desihub/desiutil.git@2ca867f079c18629bde67dd21c1d64642e75d115#egg=desiutil
exceptiongroup==1.2.2
fonttools==4.56.0
healpy==1.17.3
idna==3.10
importlib_resources==6.5.2
iniconfig==2.1.0
kiwisolver==1.4.7
matplotlib==3.9.4
numpy==1.26.4
packaging==24.2
pillow==11.1.0
pluggy==1.5.0
pyerfa==2.0.1.5
pyparsing==3.2.3
pytest==8.3.5
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
six==1.17.0
tomli==2.2.1
urllib3==2.3.0
zipp==3.21.0
| name: desiutil
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astropy==6.0.1
- astropy-iers-data==0.2025.3.31.0.36.18
- certifi==2025.1.31
- charset-normalizer==3.4.1
- contourpy==1.3.0
- cycler==0.12.1
- exceptiongroup==1.2.2
- fonttools==4.56.0
- healpy==1.17.3
- idna==3.10
- importlib-resources==6.5.2
- iniconfig==2.1.0
- kiwisolver==1.4.7
- matplotlib==3.9.4
- numpy==1.26.4
- packaging==24.2
- pillow==11.1.0
- pluggy==1.5.0
- pyerfa==2.0.1.5
- pyparsing==3.2.3
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- requests==2.32.3
- six==1.17.0
- tomli==2.2.1
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/desiutil
| [
"py/desiutil/test/test_install.py::TestInstall::test_anaconda_version"
]
| [
"py/desiutil/test/test_install.py::TestInstall::test_get_options",
"py/desiutil/test/test_install.py::TestInstall::test_verify_url"
]
| [
"py/desiutil/test/test_install.py::TestInstall::test_build_type",
"py/desiutil/test/test_install.py::TestInstall::test_cleanup",
"py/desiutil/test/test_install.py::TestInstall::test_default_nersc_dir",
"py/desiutil/test/test_install.py::TestInstall::test_dependencies",
"py/desiutil/test/test_install.py::TestInstall::test_get_product_version",
"py/desiutil/test/test_install.py::TestInstall::test_identify_branch",
"py/desiutil/test/test_install.py::TestInstall::test_nersc_module_dir",
"py/desiutil/test/test_install.py::TestInstall::test_sanity_check",
"py/desiutil/test/test_install.py::TestInstall::test_set_install_dir",
"py/desiutil/test/test_install.py::test_suite"
]
| []
| BSD 3-Clause "New" or "Revised" License | 2,328 | [
"doc/changes.rst",
"py/desiutil/install.py",
"py/desiutil/data/desiutil.module"
]
| [
"doc/changes.rst",
"py/desiutil/install.py",
"py/desiutil/data/desiutil.module"
]
|
|
google__yapf-543 | 39e0290b2b0f9f678122391d29da6733081b99b9 | 2018-03-24 13:31:27 | 841ad411adef77a38bf9e98f5ab843d65f3177d7 | googlebot:
Thanks for your pull request. It looks like this may be your first contribution to a Google open source project (if not, look below for help). Before we can look at your pull request, you'll need to sign a Contributor License Agreement (CLA).
:memo: **Please visit <https://cla.developers.google.com/> to sign.**
Once you've signed (or fixed any issues), please reply here (e.g. `I signed it!`) and we'll verify it.
----
#### What to do if you already signed the CLA
##### Individual signers
* It's possible we don't have your GitHub username or you're using a different email address on your commit. Check [your existing CLA data](https://cla.developers.google.com/clas) and verify that your [email is set on your git commits](https://help.github.com/articles/setting-your-email-in-git/).
##### Corporate signers
* Your company has a Point of Contact who decides which employees are authorized to participate. Ask your POC to be added to the group of authorized contributors. If you don't know who your Point of Contact is, direct the project maintainer to `go/cla#troubleshoot`.
* The email used to register you as an authorized contributor must be the email used for the Git commit. Check [your existing CLA data](https://cla.developers.google.com/clas) and verify that your [email is set on your git commits](https://help.github.com/articles/setting-your-email-in-git/).
* The email used to register you as an authorized contributor must also be [attached to your GitHub account](https://github.com/settings/emails).
<!-- need_sender_cla -->
coveralls:
[](https://coveralls.io/builds/16158749)
Coverage decreased (-0.01%) to 95.51% when pulling **e7c0a5f6e640b3eab0ef3e33e1c604eff394662c on bj00rn:master** into **8d90c204e8abecc59d8e9a6037782293f26fa7f8 on google:master**.
bj00rn: @gwelymernans will you look at this?
softinio: @bj00rn Is this PR active? Likely to be approved? Looks like you need to sign CLA.
gwelymernans: I'm sorry about the delay. It's been busy.
Could you please update `CHANGELOG` and `README.rst`. Also, you need to sign the CLA for me to accept this.
bj00rn: I signed it!
googlebot: CLAs look good, thanks!
<!-- ok -->
bj00rn: @gwelymernans updated & signed!
bcollazo: Anything new on this front? Looking forward to using the feature! Thanks.
gwelymernans: Please resolve the merge conflicts as well. | diff --git a/CHANGELOG b/CHANGELOG
index 698e6ea..504fcde 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -2,6 +2,10 @@
# All notable changes to this project will be documented in this file.
# This project adheres to [Semantic Versioning](http://semver.org/).
+## [0.26.0] UNRELEASED
+### Added
+- Support additional file exclude patterns in .yapfignore file.
+
## [0.25.0] UNRELEASED
### Added
- Added `INDENT_BLANK_LINES` knob to select whether the blank lines are empty
diff --git a/README.rst b/README.rst
index 366a80e..f5773e0 100644
--- a/README.rst
+++ b/README.rst
@@ -136,6 +136,14 @@ If ``--diff`` is supplied, YAPF returns zero when no changes were necessary, non
otherwise (including program error). You can use this in a CI workflow to test that code
has been YAPF-formatted.
+---------------------------------------------
+Excluding files from formatting (.yapfignore)
+---------------------------------------------
+
+In addition to exlude patterns provided on commandline, YAPF looks for additional
+patterns specified in a file named ``.yapfignore`` located in the working directory from
+which YAPF is invoked.
+
Formatting style
================
diff --git a/yapf/__init__.py b/yapf/__init__.py
index c6ffc4d..bda5b81 100644
--- a/yapf/__init__.py
+++ b/yapf/__init__.py
@@ -188,8 +188,13 @@ def main(argv):
file_resources.WriteReformattedCode('<stdout>', reformatted_source)
return 0
- files = file_resources.GetCommandLineFiles(args.files, args.recursive,
- args.exclude)
+ # Get additional exclude patterns from ignorefile
+ exclude_patterns_from_ignore_file = file_resources.GetExcludePatternsForDir(
+ os.getcwd())
+
+ files = file_resources.GetCommandLineFiles(
+ args.files, args.recursive,
+ (args.exclude or []) + exclude_patterns_from_ignore_file)
if not files:
raise errors.YapfError('Input filenames did not match any python files')
diff --git a/yapf/yapflib/file_resources.py b/yapf/yapflib/file_resources.py
index 6e7202d..18e6c6b 100644
--- a/yapf/yapflib/file_resources.py
+++ b/yapf/yapflib/file_resources.py
@@ -32,6 +32,36 @@ LF = '\n'
CRLF = '\r\n'
+def _GetExcludePatternsFromFile(filename):
+ ignore_patterns = []
+ # See if we have a .yapfignore file.
+ if os.path.isfile(filename) and os.access(filename, os.R_OK):
+ for line in open(filename, 'r').readlines():
+ if line.strip() and not line.startswith('#'):
+ ignore_patterns.append(line.strip())
+
+ if any(e.startswith('./') for e in ignore_patterns):
+ raise errors.YapfError('path in .yapfignore should not start with ./')
+
+ return ignore_patterns
+
+
+def GetExcludePatternsForDir(dirname):
+ """Return patterns of files to exclude from ignorefile in a given directory.
+
+ Looks for .yapfignore in the directory dirname.
+
+ Arguments:
+ dirname: (unicode) The name of the directory.
+
+ Returns:
+ A List of file patterns to exclude if ignore file is found
+ , otherwhise empty List.
+ """
+ ignore_file = os.path.join(dirname, '.yapfignore')
+ return _GetExcludePatternsFromFile(ignore_file)
+
+
def GetDefaultStyleForDir(dirname):
"""Return default style name for a given directory.
| Ignore files
It was great if one could add files to ignore to a knobs config. This will apply for recursive mode. Sometimes there are auto generated files where it makes no sense to improve the style. | google/yapf | diff --git a/yapftests/file_resources_test.py b/yapftests/file_resources_test.py
index 8391e5f..0f413f8 100644
--- a/yapftests/file_resources_test.py
+++ b/yapftests/file_resources_test.py
@@ -36,6 +36,30 @@ def _restore_working_dir():
os.chdir(curdir)
+class GetExcludePatternsForDir(unittest.TestCase):
+
+ def setUp(self):
+ self.test_tmpdir = tempfile.mkdtemp()
+
+ def tearDown(self):
+ shutil.rmtree(self.test_tmpdir)
+
+ def _make_test_dir(self, name):
+ fullpath = os.path.normpath(os.path.join(self.test_tmpdir, name))
+ os.makedirs(fullpath)
+ return fullpath
+
+ def test_get_exclude_file_patterns(self):
+ local_ignore_file = os.path.join(self.test_tmpdir, '.yapfignore')
+ ignore_patterns = ['temp/**/*.py', 'temp2/*.py']
+ with open(local_ignore_file, 'w') as f:
+ f.writelines('\n'.join(ignore_patterns))
+
+ self.assertEqual(
+ sorted(file_resources.GetExcludePatternsForDir(self.test_tmpdir)),
+ sorted(ignore_patterns))
+
+
class GetDefaultStyleForDirTest(unittest.TestCase):
def setUp(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 4
} | 0.24 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
-e git+https://github.com/google/yapf.git@39e0290b2b0f9f678122391d29da6733081b99b9#egg=yapf
| name: yapf
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- pytest-cov==6.0.0
prefix: /opt/conda/envs/yapf
| [
"yapftests/file_resources_test.py::GetExcludePatternsForDir::test_get_exclude_file_patterns"
]
| []
| [
"yapftests/file_resources_test.py::GetDefaultStyleForDirTest::test_no_local_style",
"yapftests/file_resources_test.py::GetDefaultStyleForDirTest::test_with_local_style",
"yapftests/file_resources_test.py::GetCommandLineFilesTest::test_find_files_not_dirs",
"yapftests/file_resources_test.py::GetCommandLineFilesTest::test_find_with_excluded_current_dir",
"yapftests/file_resources_test.py::GetCommandLineFilesTest::test_find_with_excluded_dirs",
"yapftests/file_resources_test.py::GetCommandLineFilesTest::test_find_with_excluded_hidden_dirs",
"yapftests/file_resources_test.py::GetCommandLineFilesTest::test_find_with_excluded_hidden_dirs_relative",
"yapftests/file_resources_test.py::GetCommandLineFilesTest::test_nonrecursive_find_in_dir",
"yapftests/file_resources_test.py::GetCommandLineFilesTest::test_recursive_find_in_dir",
"yapftests/file_resources_test.py::GetCommandLineFilesTest::test_recursive_find_in_dir_with_exclude",
"yapftests/file_resources_test.py::IsPythonFileTest::test_empty_without_py_extension",
"yapftests/file_resources_test.py::IsPythonFileTest::test_python_shebang",
"yapftests/file_resources_test.py::IsPythonFileTest::test_with_invalid_encoding",
"yapftests/file_resources_test.py::IsPythonFileTest::test_with_latin_encoding",
"yapftests/file_resources_test.py::IsPythonFileTest::test_with_py_extension",
"yapftests/file_resources_test.py::IsIgnoredTest::test_root_path",
"yapftests/file_resources_test.py::IsIgnoredTest::test_sub_path",
"yapftests/file_resources_test.py::IsIgnoredTest::test_trailing_slash",
"yapftests/file_resources_test.py::WriteReformattedCodeTest::test_write_encoded_to_stdout",
"yapftests/file_resources_test.py::WriteReformattedCodeTest::test_write_to_file",
"yapftests/file_resources_test.py::WriteReformattedCodeTest::test_write_to_stdout"
]
| []
| Apache License 2.0 | 2,329 | [
"README.rst",
"yapf/__init__.py",
"yapf/yapflib/file_resources.py",
"CHANGELOG"
]
| [
"README.rst",
"yapf/__init__.py",
"yapf/yapflib/file_resources.py",
"CHANGELOG"
]
|
epochblue__nanogen-11 | dfa6393e9c03290146be9d9e0676a40f84e517a5 | 2018-03-25 19:47:40 | dfa6393e9c03290146be9d9e0676a40f84e517a5 | diff --git a/.gitignore b/.gitignore
index ae20bd2..7133977 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,4 +16,5 @@ dist
# nanogen files
example/_site
+example/_preview
nanogen.log
diff --git a/nanogen/cli.py b/nanogen/cli.py
index b78f549..1206f7f 100644
--- a/nanogen/cli.py
+++ b/nanogen/cli.py
@@ -7,9 +7,6 @@ from nanogen import version
from nanogen import models
-blog = models.Blog(os.getcwd())
-
-
@click.group()
@click.option('-v', '--verbose', count=True, help='Turn on verbose output.')
@click.version_option(version=version.version)
@@ -20,18 +17,21 @@ def cli(verbose):
@cli.command()
def init():
"""Initialize the current directory."""
+ blog = models.Blog(os.getcwd())
blog.init()
@cli.command()
def clean():
"""Clean any generated files."""
+ blog = models.Blog(os.getcwd())
blog.clean()
@cli.command()
def build():
"""Start a build of the site."""
+ blog = models.Blog(os.getcwd())
blog.build()
@@ -39,6 +39,7 @@ def build():
@click.argument('title')
def new(title):
"""Create a new post with the given title"""
+ blog = models.Blog(os.getcwd())
try:
blog.new_post(title)
except ValueError as ve:
@@ -48,6 +49,7 @@ def new(title):
@click.argument('title')
def draft(title):
"""Create a new draft post with the given title"""
+ blog = models.Blog(os.getcwd())
try:
blog.new_post(title, draft=True)
except ValueError as ve:
@@ -59,25 +61,27 @@ def draft(title):
@click.option('-p', '--port', default=8080, type=int, help='The port to serve on')
def preview(host, port):
"""Serve a preview of the site on HOST and PORT."""
- site_dir = os.path.join(os.getcwd(), '_site')
- if not os.path.isdir(site_dir):
- click.ClickException('Unable to locate _site directory. Did you forget to run `nanogen build`?')
+ blog = models.Blog(os.getcwd(), is_preview=True)
+ blog.clean()
+ blog.build()
try:
import SimpleHTTPServer, BaseHTTPServer
handler = SimpleHTTPServer.SimpleHTTPRequestHandler
handler.protocol_version = 'HTTP/1.0'
- httpd = BaseHTTPServer.HTTPServer((host, 8080), handler)
+ httpd = BaseHTTPServer.HTTPServer((host, port), handler)
except ImportError:
import http.server
handler = http.server.SimpleHTTPRequestHandler
handler.protocol_version = 'HTTP/1.0'
- httpd = http.server.HTTPServer((host, 8080), handler)
+ httpd = http.server.HTTPServer((host, port), handler)
try:
click.secho('Serving your site on http://{host}:{port}/...'.format(host=host, port=port))
click.secho('Press <Ctrl-C> to stop the server.\n')
- os.chdir(site_dir)
+
+ os.chdir(blog.PATHS['preview'])
+
httpd.serve_forever()
except KeyboardInterrupt:
httpd.server_close()
diff --git a/nanogen/models.py b/nanogen/models.py
index 368bd05..c9f55bf 100644
--- a/nanogen/models.py
+++ b/nanogen/models.py
@@ -73,17 +73,19 @@ class Post(object):
class Blog(object):
- def __init__(self, base_dir):
+ def __init__(self, base_dir, is_preview=False):
self.PATHS = {
'cwd': base_dir,
'site': os.path.join(base_dir, '_site'),
+ 'preview': os.path.join(base_dir, '_preview'),
'posts': os.path.join(base_dir, '_posts'),
'drafts': os.path.join(base_dir, '_drafts'),
'layout': os.path.join(base_dir, '_layout')
}
self.config = self.parse_config()
- self.posts = self.collect_posts()
+ self.output_dir = self.PATHS['preview'] if is_preview else self.PATHS['site']
+ self.posts = self.collect_posts(include_drafts=is_preview)
jinja_loader = jinja2.FileSystemLoader(self.PATHS['layout'])
self.jinja_env = jinja2.Environment(loader=jinja_loader)
@@ -100,10 +102,12 @@ class Blog(object):
config.read(os.path.join(self.PATHS['cwd'], 'blog.cfg'))
return config
- def collect_posts(self):
+ def collect_posts(self, include_drafts=False):
"""
Finds valid post files within the posts directory.
+ :param include_drafts: True if draft posts should be included
+ :type include_drafts: bool
:return: A list of found posts
:rtype: list
"""
@@ -113,9 +117,17 @@ class Blog(object):
ls = os.listdir(self.PATHS['posts'])
post_path = lambda path: os.path.join(self.PATHS['posts'], path)
- return [Post(self.PATHS['site'], post_path(p))
- for p in ls
- if utils.is_valid_post_file(p)]
+ posts = [Post(self.output_dir, post_path(p))
+ for p in ls
+ if utils.is_valid_post_file(p)]
+
+ if include_drafts:
+ ls = os.listdir(self.PATHS['drafts'])
+ drafts_path = lambda path: os.path.join(self.PATHS['drafts'], path)
+ posts.extend([Post(self.output_dir, drafts_path(p))
+ for p in ls
+ if utils.is_valid_post_file(p)])
+ return posts
def generate_posts(self):
"""
@@ -150,21 +162,26 @@ class Blog(object):
posts = self.posts
logger.log.debug('Rendering index.html')
- filepath = os.path.join(self.PATHS['site'], 'index.html')
+ output_file = os.path.join(self.output_dir, 'index.html')
template = self.jinja_env.get_template('index.html')
html = template.render(site=self.config['site'], posts=list(reversed(posts)))
logger.log.debug('Writing page to disk: index.html')
- with open(filepath, 'w') as pout:
+ with open(output_file, 'w') as pout:
pout.write(html)
def generate_feeds(self):
+ """
+ Generate RSS and JSON feed files, if templates for them exist.
+
+ :return: None
+ """
logger.log.debug('Writing feed pages...')
posts = self.posts
for feed in ('rss.xml', 'feed.json'):
logger.log.debug('Rendering index.html')
- filepath = os.path.join(self.PATHS['site'], feed)
+ output_file = os.path.join(self.output_dir, feed)
try:
template = self.jinja_env.get_template(feed)
@@ -175,22 +192,25 @@ class Blog(object):
html = template.render(site=self.config['site'], posts=list(reversed(posts)))
logger.log.debug('Writing page to disk: %s', feed)
- with open(filepath, 'w') as pout:
+ with open(output_file, 'w') as pout:
pout.write(html)
def copy_static_files(self):
"""
- Copy static files into the _sites directory.
+ Copy static files into the output directory.
:return: None
"""
layout_static = os.path.join(self.PATHS['layout'], 'static')
- site_static = os.path.join(self.PATHS['site'], 'static')
+ output_static = os.path.join(self.output_dir, 'static')
+
+ if not os.path.isdir(layout_static):
+ return
- if os.path.isdir(os.path.join(site_static)):
- shutil.rmtree(site_static)
+ if os.path.isdir(os.path.join(output_static)):
+ shutil.rmtree(output_static)
- shutil.copytree(layout_static, site_static)
+ shutil.copytree(layout_static, output_static)
def init(self):
"""
@@ -216,13 +236,13 @@ class Blog(object):
def build(self):
"""
- Generate the site. Will create the _site dir if one doesn't already exist.
+ Generate the site. Will create the output dir if necessary.
:return: None
"""
- if not os.path.isdir(self.PATHS['site']):
- logger.log.debug('Creating site directory...')
- subprocess.call(['mkdir', self.PATHS['site']])
+ if not os.path.isdir(self.output_dir):
+ logger.log.debug('Creating output directory...')
+ subprocess.call(['mkdir', self.output_dir])
self.generate_posts()
self.generate_index_page()
@@ -236,9 +256,8 @@ class Blog(object):
:return: None
"""
logger.log.info('Cleaning generated files...')
- site_dir = self.PATHS['site']
- if os.path.isdir(site_dir):
- subprocess.call(['rm', '-r', site_dir])
+ if os.path.isdir(self.output_dir):
+ subprocess.call(['rm', '-r', self.output_dir])
def new_post(self, title, draft=False):
"""
| Improve `preview` command
The `preview` command currently only serves up the `_site` directory, which is created by the `build` command. This is OK, but it doesn't allow for previewing draft posts. The `preview` command should instead:
1. Collect and generate all available posts
2. Collect and generate all available drafts
3. Spit them into a separate `_site_preview` folder
4. Serve that `_site_preview` folder instead of the `_site` folder
Notes:
* Perhaps the default behavior of the `preview` command remains the same, and this new "with drafts" behavior is available via a CLI flag? | epochblue/nanogen | diff --git a/tests/test_models.py b/tests/test_models.py
index a9e5080..8342ad0 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -224,6 +224,25 @@ def test_blog_build_and_clean(tmpdir):
with mock.patch('subprocess.call'):
blog.new_post('Test title 1', draft=False)
+ post_template = path.join('_layout').join('post.html')
+ post_template.write("""\
+ <!doctype html>
+ <html>
+ <body>Post template would go here.</body>
+ </html>
+ """)
+
+ index_template = path.join('_layout').join('index.html')
+ index_template.write("""\
+ <!doctype html>
+ <html>
+ <body>Index template would go here.</body>
+ </html>
+ """)
+
+ blog_config = path.join('_layout').join('blog.cfg')
+ blog_config.write(example_config)
+
# Refresh the blog instance to better emulate real-world usage
blog = models.Blog(str(path))
blog.build()
@@ -239,3 +258,52 @@ def test_blog_build_and_clean(tmpdir):
blog.clean()
assert not os.path.isdir(str(site_path))
+
+
+def test_blog_build_and_clean_with_drafts(tmpdir):
+ path = tmpdir.mkdir('blog')
+ preview_path = path.mkdir('_preview')
+
+ # Set up a nanogen blog for posts
+ blog = models.Blog(str(path))
+ blog.init()
+
+ with mock.patch('subprocess.call'):
+ blog.new_post('Test post', draft=False)
+ blog.new_post('Draft post', draft=True)
+
+ post_template = path.join('_layout').join('post.html')
+ post_template.write("""\
+ <!doctype html>
+ <html>
+ <body>Post template would go here.</body>
+ </html>
+ """)
+
+ index_template = path.join('_layout').join('index.html')
+ index_template.write("""\
+ <!doctype html>
+ <html>
+ <body>Index template would go here.</body>
+ </html>
+ """)
+
+ blog_config = path.join('_layout').join('blog.cfg')
+ blog_config.write(example_config)
+
+ # Refresh the blog instance to better emulate real-world usage
+ blog = models.Blog(str(path), is_preview=True)
+ blog.build()
+
+ site_dir = [os.path.basename(str(file)) for file in preview_path.listdir()]
+ assert 'index.html' in site_dir
+
+ today = datetime.date.today()
+ expected_post_dir = preview_path.join('{}'.format(today.year)).join('{:02d}'.format(today.month))
+ generated_posts = [os.path.basename(str(file)) for file in expected_post_dir.listdir()]
+ assert len(generated_posts) == 2
+ assert 'test-post.html' in generated_posts
+ assert 'draft-post.html' in generated_posts
+
+ blog.clean()
+ assert not os.path.isdir(str(preview_path))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | 2.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
click==6.7
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==2.10
MarkupSafe==2.0.1
mistune==0.8.3
mistune-contrib==0.1
-e git+https://github.com/epochblue/nanogen.git@dfa6393e9c03290146be9d9e0676a40f84e517a5#egg=nanogen
packaging==21.3
pluggy==1.0.0
py==1.11.0
Pygments==2.2.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: nanogen
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- click==6.7
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==2.10
- markupsafe==2.0.1
- mistune==0.8.3
- mistune-contrib==0.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pygments==2.2.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/nanogen
| [
"tests/test_models.py::test_blog_build_and_clean_with_drafts"
]
| []
| [
"tests/test_models.py::test_post",
"tests/test_models.py::test_blog_create",
"tests/test_models.py::test_blog_init",
"tests/test_models.py::test_blog_new_post",
"tests/test_models.py::test_blog_new_draft",
"tests/test_models.py::test_blog_copy_static_files",
"tests/test_models.py::test_blog_generate_posts",
"tests/test_models.py::test_blog_generate_index_page",
"tests/test_models.py::test_blog_generate_feeds_no_feed_files",
"tests/test_models.py::test_blog_feeds",
"tests/test_models.py::test_blog_build_and_clean"
]
| []
| MIT License | 2,331 | [
"nanogen/models.py",
".gitignore",
"nanogen/cli.py"
]
| [
"nanogen/models.py",
".gitignore",
"nanogen/cli.py"
]
|
|
G-Node__python-odml-269 | c16f9891c4363dfcf907fd7daa076acba4cbe5eb | 2018-03-26 12:56:13 | eeff5922987b064681d1328f81af317d8171808f | diff --git a/odml/property.py b/odml/property.py
index 74e31f7..894296d 100644
--- a/odml/property.py
+++ b/odml/property.py
@@ -22,9 +22,9 @@ class BaseProperty(base.baseobject, Property):
dependency=None, dependency_value=None, dtype=None,
value_origin=None, id=None):
"""
- Create a new Property with a single value. The method will try to infer
- the value's dtype from the type of the value if not explicitly stated.
- Example for a property with
+ Create a new Property. If a value without an explicitly stated dtype
+ has been provided, the method will try to infer the value's dtype.
+ Example:
>>> p = Property("property1", "a string")
>>> p.dtype
>>> str
@@ -34,21 +34,25 @@ class BaseProperty(base.baseobject, Property):
>>> p = Property("prop", [2, 3, 4])
>>> p.dtype
>>> int
- :param name: The name of the property
- :param value: Some data value, this may be a list of homogeneous values
+ :param name: The name of the property.
+ :param value: Some data value, it can be a single value or
+ a list of homogeneous values.
:param unit: The unit of the stored data.
- :param uncertainty: the uncertainty (e.g. the standard deviation)
+ :param uncertainty: The uncertainty (e.g. the standard deviation)
associated with a measure value.
:param reference: A reference (e.g. an URL) to an external definition
of the value.
:param definition: The definition of the property.
:param dependency: Another property this property depends on.
:param dependency_value: Dependency on a certain value.
- :param dtype: the data type of the values stored in the property,
- if dtype is not given, the type is deduced from the values
+ :param dtype: The data type of the values stored in the property,
+ if dtype is not given, the type is deduced from the values.
+ Check odml.DType for supported data types.
:param value_origin: Reference where the value originated from e.g. a file name.
+ :param id: UUID string as specified in RFC 4122. If no id is provided,
+ an id will be generated and assigned. An id has to be unique
+ within an odML Document.
"""
- # TODO validate arguments
try:
if id is not None:
self._id = str(uuid.UUID(id))
@@ -84,7 +88,7 @@ class BaseProperty(base.baseobject, Property):
def new_id(self, id=None):
"""
- new_id sets the id of the current object to a RFC 4122 compliant UUID.
+ new_id sets the id of the current object to an RFC 4122 compliant UUID.
If an id was provided, it is assigned if it is RFC 4122 UUID format compliant.
If no id was provided, a new UUID is generated and assigned.
:param id: UUID string as specified in RFC 4122.
@@ -108,7 +112,7 @@ class BaseProperty(base.baseobject, Property):
@property
def dtype(self):
"""
- The data type of the value
+ The data type of the value. Check odml.DType for supported data types.
"""
return self._dtype
@@ -116,11 +120,9 @@ class BaseProperty(base.baseobject, Property):
def dtype(self, new_type):
"""
If the data type of a property value is changed, it is tried
- to convert the value to the new type.
- If this doesn't work, the change is refused.
-
- This behaviour can be overridden by directly accessing the *_dtype*
- attribute and adjusting the *data* attribute manually.
+ to convert existing values to the new type. If this doesn't work,
+ the change is refused. The dtype can always be changed, if
+ a Property does not contain values.
"""
# check if this is a valid type
if not dtypes.valid_type(new_type):
@@ -139,7 +141,7 @@ class BaseProperty(base.baseobject, Property):
@property
def parent(self):
"""
- The section containing this property
+ The section containing this property.
"""
return self._parent
@@ -170,29 +172,30 @@ class BaseProperty(base.baseobject, Property):
@property
def value(self):
"""
- Returns the value(s) stored in this property. Method always returns a list that
- is a copy (!) of the stored value. Changing this list will NOT change the property.
- For manipulation of the stored values use the append, extend, and direct access methods
- (using brackets).
+ Returns the value(s) stored in this property. Method always returns a list
+ that is a copy (!) of the stored value. Changing this list will NOT change
+ the property.
+ For manipulation of the stored values use the append, extend, and direct
+ access methods (using brackets).
For example:
- >> p = odml.Property("prop", value=[1, 2, 3])
- >> print(p.value)
+ >>> p = odml.Property("prop", value=[1, 2, 3])
+ >>> print(p.value)
[1, 2, 3]
- >> p.value.append(4)
- >> print(p.value)
+ >>> p.value.append(4)
+ >>> print(p.value)
[1, 2, 3]
Individual values can be accessed and manipulated like this:
>>> print(p[0])
[1]
- >> p[0] = 4
- >> print(p[0])
+ >>> p[0] = 4
+ >>> print(p[0])
[4]
The values can be iterated e.g. with a loop:
- >> for v in p.value:
- print(v)
+ >>> for v in p.value:
+ >>> print(v)
4
2
3
@@ -201,18 +204,18 @@ class BaseProperty(base.baseobject, Property):
def value_str(self, index=0):
"""
- Used to access typed data of the value as a string.
- Use data to access the raw type, i.e.:
+ Used to access typed data of the value at a specific
+ index position as a string.
"""
return dtypes.set(self._value[index], self._dtype)
def _validate_values(self, values):
"""
- Method ensures that the passed value(s) can be cast to the
- same dtype, i.e. that associated with this property or the
- inferred dtype of the first entry of the values list.
+ Method ensures that the passed value(s) can be cast to the
+ same dtype, i.e. that are associated with this property or the
+ inferred dtype of the first entry of the values list.
- :param values an iterable that contains the values
+ :param values: an iterable that contains the values.
"""
for v in values:
try:
@@ -227,7 +230,7 @@ class BaseProperty(base.baseobject, Property):
If new_value is a string, it will convert it to a list of
strings if the new_value contains embracing brackets.
- returns list of new_value
+ :return: list of new_value
"""
if isinstance(new_value, str):
if new_value[0] == "[" and new_value[-1] == "]":
@@ -241,21 +244,22 @@ class BaseProperty(base.baseobject, Property):
elif not isinstance(new_value, list):
new_value = [new_value]
else:
- raise ValueError("odml.Property._convert_value_input: unsupported data type for values: %s" % type(new_value))
+ raise ValueError("odml.Property._convert_value_input: "
+ "unsupported data type for values: %s" % type(new_value))
return new_value
@value.setter
def value(self, new_value):
"""
-
Set the value of the property discarding any previous information.
Method will try to convert the passed value to the dtype of
- the property and raise an ValueError, if not possible
+ the property and raise an ValueError if not possible.
- :param new_value a single value or list of values.
+ :param new_value: a single value or list of values.
"""
# Make sure boolean value 'False' gets through as well...
- if new_value is None or (isinstance(new_value, (list, tuple, str)) and len(new_value) == 0):
+ if new_value is None or \
+ (isinstance(new_value, (list, tuple, str)) and len(new_value) == 0):
self._value = []
return
@@ -285,6 +289,8 @@ class BaseProperty(base.baseobject, Property):
@uncertainty.setter
def uncertainty(self, new_value):
+ if new_value == "":
+ new_value = None
self._uncertainty = new_value
@property
@@ -339,9 +345,9 @@ class BaseProperty(base.baseobject, Property):
def remove(self, value):
"""
- Remove a value from this property and unset its parent.
- Raises a TypeError if this would cause the property not to hold any
- value at all. This can be circumvented by using the *_values* property.
+ Remove a value from this property. Only the first encountered
+ occurrence of the passed in value is removed from the properties
+ list of values.
"""
if value in self._value:
self._value.remove(value)
@@ -358,6 +364,7 @@ class BaseProperty(base.baseobject, Property):
def clone(self):
"""
Clone this object to copy it independently to another document.
+ The id of the cloned object will be set to a different uuid.
"""
obj = super(BaseProperty, self).clone()
obj._parent = None
@@ -367,23 +374,23 @@ class BaseProperty(base.baseobject, Property):
return obj
def merge(self, other, strict=True):
- """Merges the property 'other' into self, if possible. Information
- will be synchronized. Method will raise an ValueError when the
+ """
+ Merges the property 'other' into self, if possible. Information
+ will be synchronized. Method will raise a ValueError when the
information in this property and the passed property are in
conflict.
- :param other a Property
- :param strict Bool value to indicate whether types should be
- implicitly converted even when information may be lost. Default is True, i.e. no conversion, and error will be raised if types do not match.
-
+ :param other: an odML Property.
+ :param strict: Bool value to indicate whether types should be implicitly converted
+ even when information may be lost. Default is True, i.e. no conversion,
+ and a ValueError will be raised if types do not match.
"""
- assert(isinstance(other, (BaseProperty)))
+ assert(isinstance(other, BaseProperty))
if strict and self.dtype != other.dtype:
raise ValueError("odml.Property.merge: src and dest dtypes do not match!")
if self.unit is not None and other.unit is not None and self.unit != other.unit:
- raise ValueError("odml.Property.merge: src and dest units (%s, %s) do not match!"
- % (other.unit, self.unit))
+ raise ValueError("odml.Property.merge: src and dest units (%s, %s) do not match!" % (other.unit, self.unit))
if self.definition is not None and other.definition is not None:
self_def = ''.join(map(str.strip, self.definition.split())).lower()
@@ -422,14 +429,14 @@ class BaseProperty(base.baseobject, Property):
def unmerge(self, other):
"""
- Stub that doesn't do anything for this class
+ Stub that doesn't do anything for this class.
"""
pass
def get_merged_equivalent(self):
"""
- Return the merged object (i.e. if the section is linked to another one,
- return the corresponding property of the linked section) or None
+ Return the merged object (i.e. if the parent section is linked to another one,
+ return the corresponding property of the linked section) or None.
"""
if self.parent is None or self.parent._merged is None:
return None
@@ -466,17 +473,18 @@ class BaseProperty(base.baseobject, Property):
def extend(self, obj, strict=True):
"""
- Extend the list of values stored in this property by the passed values. Method will
- raise an ValueError, if values cannot be converted to the current dtype. One can also pass
- another Property to append all values stored in that one. In this case units must match!
+ Extend the list of values stored in this property by the passed values. Method
+ will raise a ValueError, if values cannot be converted to the current dtype.
+ One can also pass another Property to append all values stored in that one.
+ In this case units must match!
- :param obj single value, list of values or Property
- :param strict a Bool that controls whether dtypes must match. Default is True.
+ :param obj: single value, list of values or a Property.
+ :param strict: a Bool that controls whether dtypes must match. Default is True.
"""
if isinstance(obj, BaseProperty):
- if (obj.unit != self.unit):
- raise ValueError("odml.Property.append: src and dest units (%s, %s) do not match!"
- % (obj.unit, self.unit))
+ if obj.unit != self.unit:
+ raise ValueError("odml.Property.extend: src and dest units (%s, %s) "
+ "do not match!" % (obj.unit, self.unit))
self.extend(obj.value)
return
@@ -486,29 +494,41 @@ class BaseProperty(base.baseobject, Property):
new_value = self._convert_value_input(obj)
if len(new_value) > 0 and strict and dtypes.infer_dtype(new_value[0]) != self.dtype:
- raise ValueError("odml.Property.extend: passed value data type does not match dtype!");
+ raise ValueError("odml.Property.extend: "
+ "passed value data type does not match dtype!")
if not self._validate_values(new_value):
- raise ValueError("odml.Property.append: passed value(s) cannot be converted to "
- "data type \'%s\'!" % self._dtype)
+ raise ValueError("odml.Property.extend: passed value(s) cannot be converted "
+ "to data type \'%s\'!" % self._dtype)
self._value.extend([dtypes.get(v, self.dtype) for v in new_value])
def append(self, obj, strict=True):
"""
- Append a single value to the list of stored values. Method will raise an ValueError if
- the passed value cannot be converted to the current dtype.
+ Append a single value to the list of stored values. Method will raise
+ a ValueError if the passed value cannot be converted to the current dtype.
- :param obj the additional value.
- :param strict a Bool that controls whether dtypes must match. Default is True.
+ :param obj: the additional value.
+ :param strict: a Bool that controls whether dtypes must match. Default is True.
"""
+ # Ignore empty values before nasty stuff happens, but make sure
+ # 0 and False get through.
+ if obj in [None, "", [], {}]:
+ return
+
+ if not self.value:
+ self.value = obj
+ return
+
new_value = self._convert_value_input(obj)
if len(new_value) > 1:
raise ValueError("odml.property.append: Use extend to add a list of values!")
+
if len(new_value) > 0 and strict and dtypes.infer_dtype(new_value[0]) != self.dtype:
- raise ValueError("odml.Property.extend: passed value data type does not match dtype!");
+ raise ValueError("odml.Property.append: "
+ "passed value data type does not match dtype!")
if not self._validate_values(new_value):
- raise ValueError("odml.Property.append: passed value(s) cannot be converted to "
- "data type \'%s\'!" % self._dtype)
- self._value.append(dtypes.get(new_value[0], self.dtype))
+ raise ValueError("odml.Property.append: passed value(s) cannot be converted "
+ "to data type \'%s\'!" % self._dtype)
+ self._value.append(dtypes.get(new_value[0], self.dtype))
| Property.append returns dtype error on unset dtype
When using `Property.append` of a Property where neither value nor dtype are set, a dtype mismatch related ValueError is raised. | G-Node/python-odml | diff --git a/test/test_property.py b/test/test_property.py
index d0dc673..f0aa976 100644
--- a/test/test_property.py
+++ b/test/test_property.py
@@ -10,26 +10,67 @@ class TestProperty(unittest.TestCase):
def setUp(self):
pass
+ def test_simple_attributes(self):
+ p_name = "propertyName"
+ p_origin = "from over there"
+ p_unit = "pears"
+ p_uncertainty = "+-12"
+ p_ref = "4 8 15 16 23"
+ p_def = "an odml test property"
+ p_dep = "yes"
+ p_dep_val = "42"
+
+ prop = Property(name=p_name, value_origin=p_origin, unit=p_unit,
+ uncertainty=p_uncertainty, reference=p_ref, definition=p_def,
+ dependency=p_dep, dependency_value=p_dep_val)
+
+ self.assertEqual(prop.name, p_name)
+ self.assertEqual(prop.value_origin, p_origin)
+ self.assertEqual(prop.unit, p_unit)
+ self.assertEqual(prop.uncertainty, p_uncertainty)
+ self.assertEqual(prop.reference, p_ref)
+ self.assertEqual(prop.definition, p_def)
+ self.assertEqual(prop.dependency, p_dep)
+ self.assertEqual(prop.dependency_value, p_dep_val)
+
+ # Test setting attributes
+ prop.name = "%s_edit" % p_name
+ self.assertEqual(prop.name, "%s_edit" % p_name)
+ prop.value_origin = "%s_edit" % p_origin
+ self.assertEqual(prop.value_origin, "%s_edit" % p_origin)
+ prop.unit = "%s_edit" % p_unit
+ self.assertEqual(prop.unit, "%s_edit" % p_unit)
+ prop.uncertainty = "%s_edit" % p_uncertainty
+ self.assertEqual(prop.uncertainty, "%s_edit" % p_uncertainty)
+ prop.reference = "%s_edit" % p_ref
+ self.assertEqual(prop.reference, "%s_edit" % p_ref)
+ prop.definition = "%s_edit" % p_def
+ self.assertEqual(prop.definition, "%s_edit" % p_def)
+ prop.dependency = "%s_edit" % p_dep
+ self.assertEqual(prop.dependency, "%s_edit" % p_dep)
+ prop.dependency_value = "%s_edit" % p_dep_val
+ self.assertEqual(prop.dependency_value, "%s_edit" % p_dep_val)
+
+ # Test setting attributes to None when '' is passed.
+ prop.value_origin = ""
+ self.assertIsNone(prop.value_origin)
+ prop.unit = ""
+ self.assertIsNone(prop.unit)
+ prop.uncertainty = ""
+ self.assertIsNone(prop.uncertainty)
+ prop.reference = ""
+ self.assertIsNone(prop.reference)
+ prop.definition = ""
+ self.assertIsNone(prop.definition)
+ prop.dependency = ""
+ self.assertIsNone(prop.dependency)
+ prop.dependency_value = ""
+ self.assertIsNone(prop.dependency_value)
+
def test_value(self):
p = Property("property", 100)
self.assertEqual(p.value[0], 100)
- self.assertEqual(type(p.value), list)
-
- p.append(10)
- self.assertEqual(len(p), 2)
- self.assertRaises(ValueError, p.append, [1, 2, 3])
-
- p.extend([20, 30, '40'])
- self.assertEqual(len(p), 5)
- with self.assertRaises(ValueError):
- p.append('invalid')
- with self.assertRaises(ValueError):
- p.extend(('5', 6, 7))
-
- p2 = Property("property 2", 3)
- self.assertRaises(ValueError, p.append, p2)
- p.extend(p2)
- self.assertEqual(len(p), 6)
+ self.assertIsInstance(p.value, list)
p.value = None
self.assertEqual(len(p), 0)
@@ -46,42 +87,178 @@ class TestProperty(unittest.TestCase):
p.value = ()
self.assertEqual(len(p), 0)
- p3 = Property("test", value=2, unit="Hz")
- p4 = Property("test", value=5.5, unit="s")
+ p.value.append(5)
+ self.assertEqual(len(p.value), 0)
+
+ p2 = Property("test", {"name": "Marie", "name": "Johanna"})
+ self.assertEqual(len(p2), 1)
+
+ # Test tuple dtype value.
+ t = Property(name="Location", value='(39.12; 67.19)', dtype='2-tuple')
+ tuple_value = t.value[0] # As the formed tuple is a list of list
+ self.assertEqual(tuple_value[0], '39.12')
+ self.assertEqual(tuple_value[1], '67.19')
+ # Test invalid tuple length
with self.assertRaises(ValueError):
- p3.append(p4)
+ _ = Property(name="Public-Key", value='(5689; 1254; 687)', dtype='2-tuple')
- p.value.append(5)
- self.assertEqual(len(p.value), 0)
- self.assertRaises(ValueError, p.append, 5.5)
+ def test_value_append(self):
+ # Test append w/o Property value or dtype
+ prop = Property(name="append")
+ prop.append(1)
+ self.assertEqual(prop.dtype, DType.int)
+ self.assertEqual(prop.value, [1])
+
+ # Test append with Property dtype.
+ prop = Property(name="append", dtype="int")
+ prop.append(3)
+ self.assertEqual(prop.value, [3])
+
+ # Test append with Property value
+ prop = Property(name="append", value=[1, 2])
+ prop.append(3)
+ self.assertEqual(prop.value, [1, 2, 3])
+
+ # Test append with Property list value
+ prop = Property(name="append", value=[1, 2])
+ prop.append([3])
+ self.assertEqual(prop.value, [1, 2, 3])
+
+ # Test append of empty values, make sure 0 and False are properly handled
+ prop = Property(name="append")
+ prop.append(None)
+ prop.append("")
+ prop.append([])
+ prop.append({})
+ self.assertEqual(prop.value, [])
+
+ prop.append(0)
+ self.assertEqual(prop.value, [0])
+
+ prop.value = None
+ prop.dtype = None
+ prop.append(False)
+ self.assertEqual(prop.value, [False])
+
+ prop = Property(name="append", value=[1, 2])
+ prop.append(None)
+ prop.append("")
+ prop.append([])
+ prop.append({})
+ self.assertEqual(prop.value, [1, 2])
- p.append(5.5, strict=False)
- self.assertEqual(len(p), 1)
+ prop.append(0)
+ self.assertEqual(prop.value, [1, 2, 0])
- self.assertRaises(ValueError, p.extend, [3.14, 6.28])
- p.extend([3.14, 6.28], strict=False)
- self.assertEqual(len(p), 3)
+ # Test fail append with multiple values
+ prop = Property(name="append", value=[1, 2, 3])
+ with self.assertRaises(ValueError):
+ prop.append([4, 5])
+ self.assertEqual(prop.value, [1, 2, 3])
+
+ # Test fail append with mismatching dtype
+ prop = Property(name="append", value=[1, 2], dtype="int")
+ with self.assertRaises(ValueError):
+ prop.append([3.14])
+ with self.assertRaises(ValueError):
+ prop.append([True])
+ with self.assertRaises(ValueError):
+ prop.append(["5.927"])
+ self.assertEqual(prop.value, [1, 2])
+
+ # Test strict flag
+ prop.append(3.14, strict=False)
+ prop.append(True, strict=False)
+ prop.append("5.927", strict=False)
+ self.assertEqual(prop.value, [1, 2, 3, 1, 5])
+
+ # Make sure non-convertible values still raise an error
+ with self.assertRaises(ValueError):
+ prop.append("invalid")
+ self.assertEqual(prop.value, [1, 2, 3, 1, 5])
p5 = Property("test", value="a string")
p5.append("Freude")
self.assertEqual(len(p5), 2)
self.assertRaises(ValueError, p5.append, "[a, b, c]")
- p5.extend("[a, b, c]")
- self.assertEqual(len(p5), 5)
- p6 = Property("test", {"name": "Marie", "name": "Johanna"})
- self.assertEqual(len(p6), 1)
+ def test_value_extend(self):
+ prop = Property(name="extend")
- # Test tuple dtype value.
- t = Property(name="Location", value='(39.12; 67.19)', dtype='2-tuple')
- tuple_value = t.value[0] # As the formed tuple is a list of list
- self.assertEqual(tuple_value[0], '39.12')
- self.assertEqual(tuple_value[1], '67.19')
+ # Test extend w/o Property value or dtype.
+ val = [1, 2, 3]
+ prop.extend(val)
+ self.assertEqual(prop.dtype, DType.int)
+ self.assertEqual(prop.value, val)
- # Test invalid tuple length
+ # Extend with single value.
+ prop.extend(4)
+ self.assertEqual(prop.value, [1, 2, 3, 4])
+
+ # Extend with list value.
+ prop.extend([5, 6])
+ self.assertEqual(prop.value, [1, 2, 3, 4, 5, 6])
+
+ # Test extend w/o Property value
+ prop = Property(name="extend", dtype="float")
+ prop.extend([1.0, 2.0, 3.0])
+ self.assertEqual(prop.value, [1.0, 2.0, 3.0])
+
+ # Test extend with Property value
+ prop = Property(name="extend", value=10)
+ prop.extend([20, 30, '40'])
+ self.assertEqual(prop.value, [10, 20, 30, 40])
+
+ # Test extend fail with mismatching dtype
with self.assertRaises(ValueError):
- _ = Property(name="Public-Key", value='(5689; 1254; 687)', dtype='2-tuple')
+ prop.extend(['5', 6, 7])
+ with self.assertRaises(ValueError):
+ prop.extend([5, 6, 'a'])
+
+ # Test extend via Property
+ prop = Property(name="extend", value=["a", "b"])
+ ext_prop = Property(name="value extend", value="c")
+ prop.extend(ext_prop)
+ self.assertEqual(prop.value, ["a", "b", "c"])
+
+ ext_prop.value = ["d", "e"]
+ prop.extend(ext_prop)
+ self.assertEqual(prop.value, ["a", "b", "c", "d", "e"])
+
+ ext_prop = Property(name="value extend", value=[1, 2 ,3])
+ with self.assertRaises(ValueError):
+ prop.extend(ext_prop)
+ self.assertEqual(prop.value, ["a", "b", "c", "d", "e"])
+
+ # Test extend via Property unit check
+ prop = Property(name="extend", value=[1, 2], unit="mV")
+ ext_prop = Property(name="extend", value=[3, 4], unit="mV")
+ prop.extend(ext_prop)
+ self.assertEqual(prop.value, [1, 2, 3, 4])
+
+ ext_prop.unit = "kV"
+ with self.assertRaises(ValueError):
+ prop.extend(ext_prop)
+ self.assertEqual(prop.value, [1, 2, 3, 4])
+
+ ext_prop.unit = ""
+ with self.assertRaises(ValueError):
+ prop.extend(ext_prop)
+ self.assertEqual(prop.value, [1, 2, 3, 4])
+
+ # Test strict flag
+ prop = Property(name="extend", value=[1, 2], dtype="int")
+ with self.assertRaises(ValueError):
+ prop.extend([3.14, True, "5.927"])
+ self.assertEqual(prop.value, [1, 2])
+
+ prop.extend([3.14, True, "5.927"], strict=False)
+ self.assertEqual(prop.value, [1, 2, 3, 1, 5])
+
+ # Make sure non-convertible values still raise an error
+ with self.assertRaises(ValueError):
+ prop.extend([6, "some text"])
def test_get_set_value(self):
values = [1, 2, 3, 4, 5]
@@ -150,9 +327,6 @@ class TestProperty(unittest.TestCase):
assert(p.dtype == 'string')
assert(p.value == ['7', '20', '1 Dog', 'Seven'])
- def test_name(self):
- pass
-
def test_parent(self):
p = Property("property_section", parent=Section("S"))
self.assertIsInstance(p.parent, BaseSection)
@@ -206,6 +380,12 @@ class TestProperty(unittest.TestCase):
with self.assertRaises(AttributeError):
prop.dtype = "x-tuple"
+ # Test not setting None when a property contains values.
+ prop.value = [1, 2, 3]
+ self.assertIsNotNone(prop.dtype)
+ prop.dtype = None
+ self.assertIsNotNone(prop.dtype)
+
def test_get_path(self):
doc = Document()
sec = Section(name="parent", parent=doc)
@@ -218,14 +398,6 @@ class TestProperty(unittest.TestCase):
prop.parent = sec
self.assertEqual("/%s:%s" % (sec.name, prop.name), prop.get_path())
- def test_value_origin(self):
- p = Property("P")
- self.assertEqual(p.value_origin, None)
- p = Property("P", value_origin="V")
- self.assertEqual(p.value_origin, "V")
- p.value_origin = ""
- self.assertEqual(p.value_origin, None)
-
def test_id(self):
p = Property(name="P")
self.assertIsNotNone(p.id)
diff --git a/test/test_property_integration.py b/test/test_property_integration.py
index 479883f..cf30d59 100644
--- a/test/test_property_integration.py
+++ b/test/test_property_integration.py
@@ -106,6 +106,7 @@ class TestPropertyIntegration(unittest.TestCase):
self.assertEqual(jprop.unit, p_unit)
self.assertEqual(jprop.uncertainty, p_uncertainty)
self.assertEqual(jprop.reference, p_ref)
+ self.assertEqual(jprop.definition, p_def)
self.assertEqual(jprop.dependency, p_dep)
self.assertEqual(jprop.dependency_value, p_dep_val)
@@ -116,6 +117,7 @@ class TestPropertyIntegration(unittest.TestCase):
self.assertEqual(xprop.unit, p_unit)
self.assertEqual(xprop.uncertainty, p_uncertainty)
self.assertEqual(xprop.reference, p_ref)
+ self.assertEqual(xprop.definition, p_def)
self.assertEqual(xprop.dependency, p_dep)
self.assertEqual(xprop.dependency_value, p_dep_val)
@@ -126,5 +128,6 @@ class TestPropertyIntegration(unittest.TestCase):
self.assertEqual(yprop.unit, p_unit)
self.assertEqual(yprop.uncertainty, p_uncertainty)
self.assertEqual(yprop.reference, p_ref)
+ self.assertEqual(yprop.definition, p_def)
self.assertEqual(yprop.dependency, p_dep)
self.assertEqual(yprop.dependency_value, p_dep_val)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "lxml enum34 pyyaml rdflib",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libxml2-dev libxslt1-dev lib32z1-dev"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
brotlipy==0.7.0
certifi==2021.5.30
cffi @ file:///tmp/build/80754af9/cffi_1625814693874/work
charset-normalizer @ file:///tmp/build/80754af9/charset-normalizer_1630003229654/work
cryptography @ file:///tmp/build/80754af9/cryptography_1635366128178/work
html5lib @ file:///Users/ktietz/demo/mc3/conda-bld/html5lib_1629144453894/work
idna @ file:///tmp/build/80754af9/idna_1637925883363/work
importlib-metadata==4.8.3
iniconfig==1.1.1
isodate @ file:///Users/ktietz/demo/mc3/conda-bld/isodate_1630584690429/work
keepalive @ file:///home/conda/feedstock_root/build_artifacts/keepalive_1635948558527/work
lxml @ file:///tmp/build/80754af9/lxml_1616442911898/work
-e git+https://github.com/G-Node/python-odml.git@c16f9891c4363dfcf907fd7daa076acba4cbe5eb#egg=odML
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work
pyOpenSSL @ file:///opt/conda/conda-bld/pyopenssl_1643788558760/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
PySocks @ file:///tmp/build/80754af9/pysocks_1605305763431/work
pytest==7.0.1
PyYAML==5.4.1
rdflib @ file:///home/conda/feedstock_root/build_artifacts/rdflib_1610581402529/work
requests @ file:///opt/conda/conda-bld/requests_1641824580448/work
six @ file:///tmp/build/80754af9/six_1644875935023/work
SPARQLWrapper @ file:///home/conda/feedstock_root/build_artifacts/sparqlwrapper_1629916978493/work
tomli==1.2.3
typing_extensions==4.1.1
urllib3 @ file:///opt/conda/conda-bld/urllib3_1643638302206/work
webencodings==0.5.1
zipp==3.6.0
| name: python-odml
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- brotlipy=0.7.0=py36h27cfd23_1003
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- cffi=1.14.6=py36h400218f_0
- charset-normalizer=2.0.4=pyhd3eb1b0_0
- cryptography=35.0.0=py36hd23ed53_0
- enum34=1.1.10=py36h06a4308_0
- html5lib=1.1=pyhd3eb1b0_0
- icu=58.2=he6710b0_3
- idna=3.3=pyhd3eb1b0_0
- isodate=0.6.0=pyhd3eb1b0_1
- keepalive=0.5=pyhd8ed1ab_6
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libxml2=2.9.14=h74e7548_0
- libxslt=1.1.35=h4e12654_0
- lxml=4.6.3=py36h9120a33_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- pycparser=2.21=pyhd3eb1b0_0
- pyopenssl=22.0.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pysocks=1.7.1=py36h06a4308_0
- python=3.6.13=h12debd9_1
- python_abi=3.6=2_cp36m
- pyyaml=5.4.1=py36h27cfd23_1
- rdflib=5.0.0=py36h5fab9bb_3
- readline=8.2=h5eee18b_0
- requests=2.27.1=pyhd3eb1b0_0
- setuptools=58.0.4=py36h06a4308_0
- six=1.16.0=pyhd3eb1b0_1
- sparqlwrapper=1.8.5=py36h5fab9bb_1006
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- urllib3=1.26.8=pyhd3eb1b0_0
- webencodings=0.5.1=py36_1
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- yaml=0.2.5=h7b6447c_0
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/python-odml
| [
"test/test_property.py::TestProperty::test_simple_attributes",
"test/test_property.py::TestProperty::test_value_append"
]
| []
| [
"test/test_property.py::TestProperty::test_bool_conversion",
"test/test_property.py::TestProperty::test_clone",
"test/test_property.py::TestProperty::test_dtype",
"test/test_property.py::TestProperty::test_get_merged_equivalent",
"test/test_property.py::TestProperty::test_get_path",
"test/test_property.py::TestProperty::test_get_set_value",
"test/test_property.py::TestProperty::test_id",
"test/test_property.py::TestProperty::test_merge",
"test/test_property.py::TestProperty::test_new_id",
"test/test_property.py::TestProperty::test_parent",
"test/test_property.py::TestProperty::test_str_to_int_convert",
"test/test_property.py::TestProperty::test_value",
"test/test_property.py::TestProperty::test_value_extend",
"test/test_property_integration.py::TestPropertyIntegration::test_id",
"test/test_property_integration.py::TestPropertyIntegration::test_simple_attributes"
]
| []
| BSD 4-Clause "Original" or "Old" License | 2,332 | [
"odml/property.py"
]
| [
"odml/property.py"
]
|
|
nipy__nipype-2514 | e446466290b9ccba5d5aa589971c97e744d9267b | 2018-03-27 00:19:02 | 704b97dee7848283692bac38f04541c5af2a87b5 | diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py
index 1d26bef58..595605e38 100644
--- a/nipype/interfaces/afni/preprocess.py
+++ b/nipype/interfaces/afni/preprocess.py
@@ -2510,7 +2510,7 @@ class TProjectInputSpec(AFNICommandInputSpec):
rather than the value stored in the dataset header.""",
argstr='-TR %g')
mask = File(
- exist=True,
+ exists=True,
desc="""Only operate on voxels nonzero in the mset dataset.
++ Voxels outside the mask will be filled with zeros.
++ If no masking option is given, then all voxels
diff --git a/nipype/interfaces/dtitk/__init__.py b/nipype/interfaces/dtitk/__init__.py
index e3f3cb7aa..a41c09e58 100644
--- a/nipype/interfaces/dtitk/__init__.py
+++ b/nipype/interfaces/dtitk/__init__.py
@@ -6,10 +6,8 @@ Top-level namespace for dti-tk.
"""
# from .base import ()
-from .registration import (RigidTask, AffineTask, DiffeoTask,
- ComposeXfmTask, diffeoSymTensor3DVolTask,
- affSymTensor3DVolTask, affScalarVolTask,
- diffeoScalarVolTask)
-from .utils import (TVAdjustOriginTask, TVAdjustVoxSpTask,
- SVAdjustVoxSpTask, TVResampleTask, SVResampleTask,
- TVtoolTask, BinThreshTask)
+from .registration import (Rigid, Affine, Diffeo,
+ ComposeXfm, DiffeoSymTensor3DVol, AffSymTensor3DVol,
+ AffScalarVol, DiffeoScalarVol)
+from .utils import (TVAdjustVoxSp, SVAdjustVoxSp, TVResample, SVResample,
+ TVtool, BinThresh)
diff --git a/nipype/interfaces/dtitk/base.py b/nipype/interfaces/dtitk/base.py
index b14ad5ed2..069016e7f 100644
--- a/nipype/interfaces/dtitk/base.py
+++ b/nipype/interfaces/dtitk/base.py
@@ -13,6 +13,12 @@ Currently these tools are supported:
* Rigid Tensor Registration
* Affine Tensor Registration
* Diffeomorphic Tensor Registration
+* Combine affiine and diffeomorphic transforms
+* Application of transform to tensor and scalar volumes
+* Threshold and Binarize
+* Adjusting the voxel space of tensor and scalar volumes
+* Resampling tensor and scalar volumes
+* Calculation of tensor metrics from tensor volume
Examples
--------
@@ -28,10 +34,26 @@ from ... import logging
from ...utils.filemanip import fname_presuffix
from ..base import CommandLine
from nipype.interfaces.fsl.base import Info
+import warnings
LOGGER = logging.getLogger('interface')
+class DTITKRenameMixin(object):
+ def __init__(self, *args, **kwargs):
+ classes = [cls.__name__ for cls in self.__class__.mro()]
+ dep_name = classes[0]
+ rename_idx = classes.index('DTITKRenameMixin')
+ new_name = classes[rename_idx + 1]
+ warnings.warn('The {} interface has been renamed to {}\n'
+ 'Please see the documentation for DTI-TK '
+ 'interfaces, as some inputs have been '
+ 'added or renamed for clarity.'
+ ''.format(dep_name, new_name),
+ DeprecationWarning)
+ super(DTITKRenameMixin, self).__init__(*args, **kwargs)
+
+
class CommandLineDtitk(CommandLine):
def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True,
diff --git a/nipype/interfaces/dtitk/registration.py b/nipype/interfaces/dtitk/registration.py
index 3dd1c068c..6aa40d420 100644
--- a/nipype/interfaces/dtitk/registration.py
+++ b/nipype/interfaces/dtitk/registration.py
@@ -1,394 +1,489 @@
-from ..base import TraitedSpec, CommandLineInputSpec, traits, isdefined
-from ...utils.filemanip import fname_presuffix
+# -*- coding: utf-8 -*-
+# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
+# vi: set ft=python sts=4 ts=4 sw=4 et:
+"""DTITK registration interfaces
+
+DTI-TK developed by Gary Hui Zhang, [email protected]
+For additional help, visit http://dti-tk.sf.net
+
+The high-dimensional tensor-based DTI registration algorithm
+
+Zhang, H., Avants, B.B, Yushkevich, P.A., Woo, J.H., Wang, S., McCluskey, L.H.,
+ Elman, L.B., Melhem, E.R., Gee, J.C., High-dimensional spatial normalization
+ of diffusion tensor images improves the detection of white matter differences
+ in amyotrophic lateral sclerosis, IEEE Transactions on Medical Imaging,
+ 26(11):1585-1597, November 2007. PMID: 18041273.
+
+The original piecewise-affine tensor-based DTI registration algorithm at the
+core of DTI-TK
+
+Zhang, H., Yushkevich, P.A., Alexander, D.C., Gee, J.C., Deformable
+ registration of diffusion tensor MR images with explicit orientation
+ optimization, Medical Image Analysis, 10(5):764-785, October 2006. PMID:
+ 16899392.
+
+"""
+
+from ..base import TraitedSpec, CommandLineInputSpec, traits, File, isdefined
+from ...utils.filemanip import fname_presuffix, split_filename
+from .base import CommandLineDtitk, DTITKRenameMixin
import os
-from .base import CommandLineDtitk
+
+__docformat__ = 'restructuredtext'
class RigidInputSpec(CommandLineInputSpec):
- fixed_file = traits.Str(desc="fixed diffusion tensor image",
- exists=True, mandatory=True,
- position=0, argstr="%s")
- moving_file = traits.Str(desc="diffusion tensor image path", exists=True,
- mandatory=True, position=1, argstr="%s")
- similarity_metric = traits.Enum('EDS', 'GDS', 'DDS', 'NMI', exists=True,
+ fixed_file = File(desc="fixed tensor volume", exists=True,
+ mandatory=True, position=0, argstr="%s", copyfile=False)
+ moving_file = File(desc="moving tensor volume", exists=True,
+ mandatory=True, position=1, argstr="%s", copyfile=False)
+ similarity_metric = traits.Enum('EDS', 'GDS', 'DDS', 'NMI',
mandatory=True, position=2, argstr="%s",
- desc="similarity metric")
- samplingX = traits.Float(mandatory=True, position=3, argstr="%s",
- desc="dist between samp points (mm)",
- default_value=4)
- samplingY = traits.Float(mandatory=True, position=4, argstr="%s",
- desc="dist between samp points (mm)",
- default_value=4)
- samplingZ = traits.Float(mandatory=True, position=5, argstr="%s",
- desc="dist between samp points (mm)",
- default_value=4)
- ftol = traits.Float(mandatory=True, position=6, argstr="%s",
- desc="cost function tolerance", default_value=0.01)
- useInTrans = traits.Float(mandatory=False, position=7, argstr="%s",
- desc="to initialize with existing xfm set as 1",
- default_value=1)
+ desc="similarity metric", usedefault=True)
+ sampling_xyz = traits.Tuple((4, 4, 4), mandatory=True, position=3,
+ argstr="%g %g %g", usedefault=True,
+ desc="dist between samp points (mm) (x,y,z)")
+ ftol = traits.Float(mandatory=True, position=4, argstr="%g",
+ desc="cost function tolerance", default_value=0.01,
+ usedefault=True)
+ initialize_xfm = File(copyfile=True, desc="Initialize w/DTITK-FORMAT"
+ "affine", position=5, argstr="%s", exists=True)
class RigidOutputSpec(TraitedSpec):
- out_file = traits.File(exists=True)
- out_file_xfm = traits.File(exists=True)
-
-
-class RigidTask(CommandLineDtitk):
+ out_file = File(exists=True)
+ out_file_xfm = File(exists=True)
+
+
+class Rigid(CommandLineDtitk):
+ """Performs rigid registration between two tensor volumes
+
+ Example
+ -------
+
+ >>> from nipype.interfaces import dtitk
+ >>> node = dtitk.Rigid()
+ >>> node.inputs.fixed_file = 'im1.nii'
+ >>> node.inputs.moving_file = 'im2.nii'
+ >>> node.inputs.similarity_metric = 'EDS'
+ >>> node.inputs.sampling_xyz = (4,4,4)
+ >>> node.inputs.ftol = 0.01
+ >>> node.cmdline
+ 'dti_rigid_reg im1.nii im2.nii EDS 4 4 4 0.01'
+ >>> node.run() # doctest: +SKIP
"""
- Performs rigid registration between two tensor volumes
-
- Example
- -------
-
- >>> import nipype.interfaces.dtitk as dtitk
- >>> node = dtitk.RigidTask()
- >>> node.inputs.fixed_file = 'diffusion.nii.gz'
- >>> node.inputs.moving_file = 'diffusion2.nii.gz'
- >>> node.inputs.similarity_metric = 'EDS'
- >>> node.inputs.samplingX = 4
- >>> node.inputs.samplingY = 4
- >>> node.inputs.samplingZ = 4
- >>> node.inputs.ftol = 0.01
- >>> node.inputs.useInTrans = 1
- >>> node.run() # doctest: +SKIP
- """
input_spec = RigidInputSpec
output_spec = RigidOutputSpec
_cmd = 'dti_rigid_reg'
+ '''def _format_arg(self, name, spec, value):
+ if name == 'initialize_xfm':
+ value = 1
+ return super(Rigid, self)._format_arg(name, spec, value)'''
+
+ def _run_interface(self, runtime):
+ runtime = super(Rigid, self)._run_interface(runtime)
+ if '''.aff doesn't exist or can't be opened''' in runtime.stderr:
+ self.raise_exception(runtime)
+ return runtime
+
def _list_outputs(self):
outputs = self.output_spec().get()
- outputs['out_file_xfm'] = self.inputs.moving_file.replace('.nii.gz',
- '.aff')
- outputs['out_file'] = self.inputs.moving_file.replace('.nii.gz',
- '_aff.nii.gz')
+ moving = self.inputs.moving_file
+ outputs['out_file_xfm'] = fname_presuffix(moving, suffix='.aff',
+ use_ext=False)
+ outputs['out_file'] = fname_presuffix(moving, suffix='_aff')
return outputs
-class AffineInputSpec(CommandLineInputSpec):
- fixed_file = traits.Str(desc="fixed diffusion tensor image",
- exists=True, mandatory=True,
- position=0, argstr="%s")
- moving_file = traits.Str(desc="diffusion tensor image path", exists=True,
- mandatory=True, position=1, argstr="%s")
- similarity_metric = traits.Enum('EDS', 'GDS', 'DDS', 'NMI', exists=True,
- mandatory=True, position=2, argstr="%s",
- desc="similarity metric")
- samplingX = traits.Float(mandatory=True, position=3, argstr="%s",
- desc="dist between samp points (mm)",
- default_value=4)
- samplingY = traits.Float(mandatory=True, position=4, argstr="%s",
- desc="dist between samp points (mm)",
- default_value=4)
- samplingZ = traits.Float(mandatory=True, position=5, argstr="%s",
- desc="dist between samp points (mm)",
- default_value=4)
- ftol = traits.Float(mandatory=True, position=6, argstr="%s",
- desc="cost function tolerance", default_value=0.01)
- useInTrans = traits.Float(mandatory=False, position=7, argstr="%s",
- desc="to initialize with existing xfm set as 1",
- default_value=1)
-
-
-class AffineOutputSpec(TraitedSpec):
- out_file = traits.File(exists=True)
- out_file_xfm = traits.File(exists=True)
-
-
-class AffineTask(CommandLineDtitk):
+class Affine(Rigid):
+ """Performs affine registration between two tensor volumes
+
+ Example
+ -------
+
+ >>> from nipype.interfaces import dtitk
+ >>> node = dtitk.Affine()
+ >>> node.inputs.fixed_file = 'im1.nii'
+ >>> node.inputs.moving_file = 'im2.nii'
+ >>> node.inputs.similarity_metric = 'EDS'
+ >>> node.inputs.sampling_xyz = (4,4,4)
+ >>> node.inputs.ftol = 0.01
+ >>> node.inputs.initialize_xfm = 'im_affine.aff'
+ >>> node.cmdline
+ 'dti_affine_reg im1.nii im2.nii EDS 4 4 4 0.01 im_affine.aff'
+ >>> node.run() # doctest: +SKIP
"""
- Performs affine registration between two tensor volumes
-
- Example
- -------
-
- >>> import nipype.interfaces.dtitk as dtitk
- >>> node = dtitk.AffineTask()
- >>> node.inputs.fixed_file = 'diffusion.nii.gz'
- >>> node.inputs.moving_file = 'diffusion2.nii.gz'
- >>> node.inputs.similarity_metric = 'EDS'
- >>> node.inputs.samplingX = 4
- >>> node.inputs.samplingY = 4
- >>> node.inputs.samplingZ = 4
- >>> node.inputs.ftol = 0.01
- >>> node.inputs.useInTrans = 1
- >>> node.run() # doctest: +SKIP
- """
- input_spec = AffineInputSpec
- output_spec = AffineOutputSpec
_cmd = 'dti_affine_reg'
- def _list_outputs(self):
- outputs = self.output_spec().get()
- outputs['out_file_xfm'] = self.inputs.moving_file.replace('.nii.gz',
- '.aff')
- outputs['out_file'] = self.inputs.moving_file.replace('.nii.gz',
- '_aff.nii.gz')
- return outputs
-
class DiffeoInputSpec(CommandLineInputSpec):
- fixed_file = traits.Str(desc="fixed diffusion tensor image",
- exists=True, mandatory=False, position=0,
- argstr="%s")
- moving_file = traits.Str(desc="moving diffusion tensor image",
- exists=True, mandatory=False,
- position=1, argstr="%s")
- mask = traits.Str(desc="mask", exists=True, mandatory=False, position=2,
- argstr="%s")
- legacy = traits.Float(desc="legacy parameter; always set to 1",
- exists=True, mandatory=True,
- position=3, default_value=1, argstr="%s")
- n_iters = traits.Float(desc="number of iterations",
- exists=True, mandatory=True,
- position=4, default_value=6, argstr="%s")
- ftol = traits.Float(desc="iteration for the optimization to stop",
- exists=True, mandatory=True,
- position=5, default_value=0.002, argstr="%s")
+ fixed_file = File(desc="fixed tensor volume",
+ exists=True, position=0, argstr="%s")
+ moving_file = File(desc="moving tensor volume",
+ exists=True, position=1, argstr="%s", copyfile=False)
+ mask_file = File(desc="mask", exists=True, position=2, argstr="%s")
+ legacy = traits.Enum(1, desc="legacy parameter; always set to 1",
+ usedefault=True, mandatory=True,
+ position=3, argstr="%d")
+ n_iters = traits.Int(6, desc="number of iterations",
+ mandatory=True,
+ position=4, argstr="%d", usedefault=True)
+ ftol = traits.Float(0.002, desc="iteration for the optimization to stop",
+ mandatory=True, position=5, argstr="%g",
+ usedefault=True)
class DiffeoOutputSpec(TraitedSpec):
- out_file = traits.File(exists=True)
- out_file_xfm = traits.File(exists=True)
-
-
-class DiffeoTask(CommandLineDtitk):
+ out_file = File(exists=True)
+ out_file_xfm = File(exists=True)
+
+
+class Diffeo(CommandLineDtitk):
+ """Performs diffeomorphic registration between two tensor volumes
+
+ Example
+ -------
+
+ >>> from nipype.interfaces import dtitk
+ >>> node = dtitk.Diffeo()
+ >>> node.inputs.fixed_file = 'im1.nii'
+ >>> node.inputs.moving_file = 'im2.nii'
+ >>> node.inputs.mask_file = 'mask.nii'
+ >>> node.inputs.legacy = 1
+ >>> node.inputs.n_iters = 6
+ >>> node.inputs.ftol = 0.002
+ >>> node.cmdline
+ 'dti_diffeomorphic_reg im1.nii im2.nii mask.nii 1 6 0.002'
+ >>> node.run() # doctest: +SKIP
"""
- Performs diffeomorphic registration between two tensor volumes
-
- Example
- -------
-
- >>> import nipype.interfaces.dtitk as dtitk
- >>> node = dtitk.DiffeoTask()
- >>> node.inputs.fixed_file = 'diffusion.nii.gz'
- >>> node.inputs.moving_file = 'diffusion2.nii.gz'
- >>> node.inputs.mask = 'mask.nii.gz'
- >>> node.inputs.legacy = 1
- >>> node.inputs.n_iters = 6
- >>> node.inputs.ftol = 0.002
- >>> node.run() # doctest: +SKIP
- """
input_spec = DiffeoInputSpec
output_spec = DiffeoOutputSpec
_cmd = 'dti_diffeomorphic_reg'
def _list_outputs(self):
outputs = self.output_spec().get()
- outputs['out_file_xfm'] = self.inputs.moving_file.replace(
- '.nii.gz', '_diffeo.df.nii.gz')
- outputs['out_file'] = self.inputs.moving_file.replace(
- '.nii.gz', '_diffeo.nii.gz')
+ moving = self.inputs.moving_file
+ outputs['out_file_xfm'] = fname_presuffix(moving, suffix='_diffeo.df')
+ outputs['out_file'] = fname_presuffix(moving, suffix='_diffeo')
return outputs
class ComposeXfmInputSpec(CommandLineInputSpec):
- in_df = traits.Str(desc='diffeomorphic file.df.nii.gz', exists=True,
- mandatory=False, position=1, argstr="-df %s")
- in_aff = traits.Str(desc='affine file.aff', exists=True, mandatory=False,
- position=0, argstr="-aff %s")
- out_file = traits.Str(desc='output_path', exists=True, mandatory=False,
- position=2, argstr="-out %s", name_source="in_df",
- name_template="%s_comboaff.nii.gz")
+ in_df = File(desc='diffeomorphic warp file', exists=True,
+ argstr="-df %s", mandatory=True)
+ in_aff = File(desc='affine transform file', exists=True,
+ argstr="-aff %s", mandatory=True)
+ out_file = File(desc='output path',
+ argstr="-out %s", genfile=True)
class ComposeXfmOutputSpec(TraitedSpec):
- out_file = traits.File(desc='cheese', exists=True)
+ out_file = File(exists=True)
-class ComposeXfmTask(CommandLineDtitk):
+class ComposeXfm(CommandLineDtitk):
"""
Combines diffeomorphic and affine transforms
- Example
- -------
-
- >>> import nipype.interfaces.dtitk as dtitk
- >>> node = dtitk.ComposeXfmTask()
- >>> node.inputs.in_df = 'ants_Warp.nii.gz'
- >>> node.inputs.in_aff= 'ants_Affine.txt'
- >>> node.run() # doctest: +SKIP
- """
+ Example
+ -------
+
+ >>> from nipype.interfaces import dtitk
+ >>> node = dtitk.ComposeXfm()
+ >>> node.inputs.in_df = 'im_warp.df.nii'
+ >>> node.inputs.in_aff= 'im_affine.aff'
+ >>> node.cmdline
+ 'dfRightComposeAffine -aff im_affine.aff -df im_warp.df.nii -out
+ im_warp_affdf.df.nii'
+ >>> node.run() # doctest: +SKIP
+ """
input_spec = ComposeXfmInputSpec
output_spec = ComposeXfmOutputSpec
_cmd = 'dfRightComposeAffine'
def _list_outputs(self):
- outputs = self.output_spec().get()
- outputs['out_file'] = self.inputs.in_df.replace('.df.nii.gz',
- '_combo.df.nii.gz')
+ outputs = self._outputs().get()
+ out_file = self.inputs.out_file
+ if not isdefined(out_file):
+ out_file = self._gen_filename('out_file')
+ outputs['out_file'] = os.path.abspath(out_file)
return outputs
+ def _gen_filename(self, name):
+ if name != 'out_file':
+ return
+ path, base, ext = split_filename(self.inputs.in_df)
+ suffix = '_affdf'
+ if base.endswith('.df'):
+ suffix += '.df'
+ base = base[:-3]
+ return fname_presuffix(base, suffix=suffix + ext, use_ext=False)
+
+
+class AffSymTensor3DVolInputSpec(CommandLineInputSpec):
+ in_file = File(desc='moving tensor volume', exists=True,
+ argstr="-in %s", mandatory=True)
+ out_file = File(desc='output filename',
+ argstr="-out %s", name_source="in_file",
+ name_template="%s_affxfmd", keep_extension=True)
+ transform = File(exists=True, argstr="-trans %s",
+ xor=['target', 'translation', 'euler', 'deformation'],
+ desc='transform to apply: specify an input transformation'
+ ' file; parameters input will be ignored',)
+ interpolation = traits.Enum('LEI', 'EI', usedefault=True,
+ argstr="-interp %s",
+ desc='Log Euclidean/Euclidean Interpolation')
+ reorient = traits.Enum('PPD', 'NO', 'FS', argstr='-reorient %s',
+ usedefault=True, desc='Reorientation strategy: '
+ 'preservation of principal direction, no '
+ 'reorientation, or finite strain')
+ target = File(exists=True, argstr="-target %s", xor=['transform'],
+ desc='output volume specification read from the target '
+ 'volume if specified')
+ translation = traits.Tuple((traits.Float(), traits.Float(),
+ traits.Float()),
+ desc='translation (x,y,z) in mm',
+ argstr='-translation %g %g %g',
+ xor=['transform'])
+ euler = traits.Tuple((traits.Float(), traits.Float(), traits.Float()),
+ desc='(theta, phi, psi) in degrees',
+ xor=['transform'], argstr='-euler %g %g %g')
+ deformation = traits.Tuple((traits.Float(),) * 6,
+ desc='(xx,yy,zz,xy,yz,xz)', xor=['transform'],
+ argstr='-deformation %g %g %g %g %g %g')
+
+
+class AffSymTensor3DVolOutputSpec(TraitedSpec):
+ out_file = File(exists=True)
+
+
+class AffSymTensor3DVol(CommandLineDtitk):
+ """
+ Applies affine transform to a tensor volume
-class diffeoSymTensor3DVolInputSpec(CommandLineInputSpec):
- in_tensor = traits.Str(desc='moving tensor', exists=True, mandatory=False,
- position=0, argstr="-in %s")
- in_xfm = traits.Str(desc='transform to apply', exists=True,
- mandatory=False,
- position=1, argstr="-trans %s")
- in_target = traits.Str(desc='', exists=True, mandatory=False, position=2,
- argstr="-target %s")
- out_file = traits.Str(desc='', exists=True, mandatory=False, position=3,
- argstr="-out %s", name_source="in_tensor",
- name_template="%s_diffeoxfmd.nii.gz")
+ Example
+ -------
+
+ >>> from nipype.interfaces import dtitk
+ >>> node = dtitk.AffSymTensor3DVol()
+ >>> node.inputs.in_file = 'im1.nii'
+ >>> node.inputs.transform = 'im_affine.aff'
+ >>> node.cmdline
+ 'affineSymTensor3DVolume -in im1.nii -interp LEI -out im1_affxfmd.nii
+ -reorient PPD -trans im_affine.aff'
+ >>> node.run() # doctest: +SKIP
+ """
+ input_spec = AffSymTensor3DVolInputSpec
+ output_spec = AffSymTensor3DVolOutputSpec
+ _cmd = 'affineSymTensor3DVolume'
-class diffeoSymTensor3DVolOutputSpec(TraitedSpec):
- out_file = traits.File(desc='cheese', exists=True)
+class AffScalarVolInputSpec(CommandLineInputSpec):
+ in_file = File(desc='moving scalar volume', exists=True,
+ argstr="-in %s", mandatory=True)
+ out_file = File(desc='output filename',
+ argstr="-out %s", name_source="in_file",
+ name_template="%s_affxfmd", keep_extension=True)
+ transform = File(exists=True, argstr="-trans %s",
+ xor=['target', 'translation', 'euler', 'deformation'],
+ desc='transform to apply: specify an input transformation'
+ ' file; parameters input will be ignored',)
+ interpolation = traits.Enum('trilinear', 'NN',
+ usedefault=True, argstr="-interp %s",
+ desc='trilinear or nearest neighbor'
+ ' interpolation')
+ target = File(exists=True, argstr="-target %s", xor=['transform'],
+ desc='output volume specification read from the target '
+ 'volume if specified')
+ translation = traits.Tuple((traits.Float(), traits.Float(),
+ traits.Float()),
+ desc='translation (x,y,z) in mm',
+ argstr='-translation %g %g %g',
+ xor=['transform'])
+ euler = traits.Tuple((traits.Float(), traits.Float(), traits.Float()),
+ desc='(theta, phi, psi) in degrees',
+ xor=['transform'], argstr='-euler %g %g %g')
+ deformation = traits.Tuple((traits.Float(),) * 6,
+ desc='(xx,yy,zz,xy,yz,xz)', xor=['transform'],
+ argstr='-deformation %g %g %g %g %g %g')
+
+
+class AffScalarVolOutputSpec(TraitedSpec):
+ out_file = File(desc='moved volume', exists=True)
+
+
+class AffScalarVol(CommandLineDtitk):
+ """
+ Applies affine transform to a scalar volume
+ Example
+ -------
+
+ >>> from nipype.interfaces import dtitk
+ >>> node = dtitk.AffScalarVol()
+ >>> node.inputs.in_file = 'im1.nii'
+ >>> node.inputs.transform = 'im_affine.aff'
+ >>> node.cmdline
+ 'affineScalarVolume -in im1.nii -interp 0 -out im1_affxfmd.nii -trans
+ im_affine.aff'
+ >>> node.run() # doctest: +SKIP
+ """
+ input_spec = AffScalarVolInputSpec
+ output_spec = AffScalarVolOutputSpec
+ _cmd = 'affineScalarVolume'
-class diffeoSymTensor3DVolTask(CommandLineDtitk):
+ def _format_arg(self, name, spec, value):
+ if name == 'interpolation':
+ value = {'trilinear': 0, 'NN': 1}[value]
+ return super(AffScalarVol, self)._format_arg(name, spec, value)
+
+
+class DiffeoSymTensor3DVolInputSpec(CommandLineInputSpec):
+ in_file = File(desc='moving tensor volume', exists=True,
+ argstr="-in %s", mandatory=True)
+ out_file = File(desc='output filename',
+ argstr="-out %s", name_source="in_file",
+ name_template="%s_diffeoxfmd", keep_extension=True)
+ transform = File(exists=True, argstr="-trans %s",
+ mandatory=True, desc='transform to apply')
+ df = traits.Str('FD', argstr="-df %s", usedefault=True)
+ interpolation = traits.Enum('LEI', 'EI', usedefault=True,
+ argstr="-interp %s",
+ desc='Log Euclidean/Euclidean Interpolation')
+ reorient = traits.Enum('PPD', 'FS', argstr='-reorient %s',
+ usedefault=True, desc='Reorientation strategy: '
+ 'preservation of principal direction or finite '
+ 'strain')
+ target = File(exists=True, argstr="-target %s", xor=['voxel_size'],
+ desc='output volume specification read from the target '
+ 'volume if specified')
+ voxel_size = traits.Tuple((traits.Float(), traits.Float(), traits.Float()),
+ desc='xyz voxel size (superseded by target)',
+ argstr="-vsize %g %g %g", xor=['target'])
+ flip = traits.Tuple((traits.Int(), traits.Int(), traits.Int()),
+ argstr="-flip %d %d %d")
+ resampling_type = traits.Enum('backward', 'forward',
+ desc='use backward or forward resampling',
+ argstr="-type %s")
+
+
+class DiffeoSymTensor3DVolOutputSpec(TraitedSpec):
+ out_file = File(exists=True)
+
+
+class DiffeoSymTensor3DVol(CommandLineDtitk):
"""
Applies diffeomorphic transform to a tensor volume
- Example
- -------
-
- >>> import nipype.interfaces.dtitk as dtitk
- >>> node = dtitk.diffeoSymTensor3DVolTask()
- >>> node.inputs.in_tensor = 'diffusion.nii'
- >>> node.inputs.in_xfm = 'ants_Warp.nii.gz'
- >>> node.run() # doctest: +SKIP
- """
+ Example
+ -------
+
+ >>> from nipype.interfaces import dtitk
+ >>> node = dtitk.DiffeoSymTensor3DVol()
+ >>> node.inputs.in_file = 'im1.nii'
+ >>> node.inputs.transform = 'im_warp.df.nii'
+ >>> node.cmdline
+ 'deformationSymTensor3DVolume -df FD -in im1.nii -interp LEI -out
+ im1_diffeoxfmd.nii -reorient PPD -trans im_warp.df.nii'
+ >>> node.run() # doctest: +SKIP
+ """
- input_spec = diffeoSymTensor3DVolInputSpec
- output_spec = diffeoSymTensor3DVolOutputSpec
+ input_spec = DiffeoSymTensor3DVolInputSpec
+ output_spec = DiffeoSymTensor3DVolOutputSpec
_cmd = 'deformationSymTensor3DVolume'
- def _list_outputs(self):
- outputs = self.output_spec().get()
- outputs['out_file'] = self.inputs.out_file
- return outputs
+ def _format_arg(self, name, spec, value):
+ if name == 'resampling_type':
+ value = {'forward': 0, 'backward': 1}[value]
+ return super(DiffeoSymTensor3DVol, self)._format_arg(name, spec, value)
+
+
+class DiffeoScalarVolInputSpec(CommandLineInputSpec):
+ in_file = File(desc='moving scalar volume', exists=True,
+ argstr="-in %s", mandatory=True)
+ out_file = File(desc='output filename',
+ argstr="-out %s", name_source="in_file",
+ name_template="%s_diffeoxfmd", keep_extension=True)
+ transform = File(exists=True, argstr="-trans %s",
+ mandatory=True, desc='transform to apply')
+ target = File(exists=True, argstr="-target %s", xor=['voxel_size'],
+ desc='output volume specification read from the target '
+ 'volume if specified')
+ voxel_size = traits.Tuple((traits.Float(), traits.Float(), traits.Float()),
+ desc='xyz voxel size (superseded by target)',
+ argstr="-vsize %g %g %g", xor=['target'])
+ flip = traits.Tuple((traits.Int(), traits.Int(), traits.Int()),
+ argstr="-flip %d %d %d")
+ resampling_type = traits.Enum('backward', 'forward',
+ desc='use backward or forward resampling',
+ argstr="-type %s")
+ interpolation = traits.Enum('trilinear', 'NN',
+ desc='trilinear, or nearest neighbor',
+ argstr="-interp %s",
+ usedefault=True)
+
+
+class DiffeoScalarVolOutputSpec(TraitedSpec):
+ out_file = File(desc='moved volume', exists=True)
+
+
+class DiffeoScalarVol(CommandLineDtitk):
+ """
+ Applies diffeomorphic transform to a scalar volume
+ Example
+ -------
+
+ >>> from nipype.interfaces import dtitk
+ >>> node = dtitk.DiffeoScalarVol()
+ >>> node.inputs.in_file = 'im1.nii'
+ >>> node.inputs.transform = 'im_warp.df.nii'
+ >>> node.cmdline
+ 'deformationScalarVolume -in im1.nii -interp 0 -out im1_diffeoxfmd.nii
+ -trans im_warp.df.nii'
+ >>> node.run() # doctest: +SKIP
+ """
-class affSymTensor3DVolInputSpec(CommandLineInputSpec):
- in_tensor = traits.Str(desc='moving tensor', exists=True, mandatory=False,
- position=0, argstr="-in %s")
- in_xfm = traits.Str(desc='transform to apply', exists=True,
- mandatory=False, position=1, argstr="-trans %s")
- in_target = traits.Str(desc='', exists=True, mandatory=False, position=2,
- argstr="-target %s")
- out_file = traits.Str(desc='', exists=True, mandatory=False, position=3,
- argstr="-out %s", name_source="in_tensor",
- name_template="%s_affxfmd.nii.gz")
+ input_spec = DiffeoScalarVolInputSpec
+ output_spec = DiffeoScalarVolOutputSpec
+ _cmd = 'deformationScalarVolume'
+ def _format_arg(self, name, spec, value):
+ if name == 'resampling_type':
+ value = {'forward': 0, 'backward': 1}[value]
+ elif name == 'interpolation':
+ value = {'trilinear': 0, 'NN': 1}[value]
+ return super(DiffeoScalarVol, self)._format_arg(name, spec, value)
-class affSymTensor3DVolOutputSpec(TraitedSpec):
- out_file = traits.File(desc='cheese', exists=True)
+class RigidTask(DTITKRenameMixin, Rigid):
+ pass
-class affSymTensor3DVolTask(CommandLineDtitk):
- """
- Applies affine transform to a tensor volume
- Example
- -------
-
- >>> import nipype.interfaces.dtitk as dtitk
- >>> node = dtitk.affSymTensor3DVolTask()
- >>> node.inputs.in_tensor = 'diffusion.nii'
- >>> node.inputs.in_xfm = 'ants_Affine.txt'
- >>> node.run() # doctest: +SKIP
- """
- input_spec = affSymTensor3DVolInputSpec
- output_spec = affSymTensor3DVolOutputSpec
- _cmd = 'affineSymTensor3DVolume'
+class AffineTask(DTITKRenameMixin, Affine):
+ pass
- def _list_outputs(self):
- outputs = self.output_spec().get()
- outputs['out_file'] = os.path.abspath(self.inputs.out_file)
- return outputs
+class DiffeoTask(DTITKRenameMixin, Diffeo):
+ pass
-class affScalarVolInputSpec(CommandLineInputSpec):
- in_volume = traits.Str(desc='moving volume', exists=True, mandatory=False,
- position=0, argstr="-in %s")
- in_xfm = traits.Str(desc='transform to apply', exists=True,
- mandatory=False,
- position=1, argstr="-trans %s")
- in_target = traits.Str(desc='', position=2, argstr="-target %s")
- out_file = traits.Str(desc='', mandatory=False, position=3,
- argstr="-out %s", name_source="in_volume",
- name_template="%s_affxfmd.nii.gz")
+class ComposeXfmTask(DTITKRenameMixin, ComposeXfm):
+ pass
-class affScalarVolOutputSpec(TraitedSpec):
- out_file = traits.File(desc='moved volume', exists=True)
+class affScalarVolTask(DTITKRenameMixin, AffScalarVol):
+ pass
-class affScalarVolTask(CommandLineDtitk):
- """
- Applies affine transform to a scalar volume
- Example
- -------
-
- >>> import nipype.interfaces.dtitk as dtitk
- >>> node = dtitk.affScalarVolTask()
- >>> node.inputs.in_volume = 'fa.nii.gz'
- >>> node.inputs.in_xfm = 'ants_Affine.txt'
- >>> node.run() # doctest: +SKIP
- """
- input_spec = affScalarVolInputSpec
- output_spec = affScalarVolOutputSpec
- _cmd = 'affineScalarVolume'
+class affSymTensor3DVolTask(DTITKRenameMixin, AffSymTensor3DVol):
+ pass
- def _list_outputs(self):
- outputs = self.output_spec().get()
- outputs['out_file'] = os.path.abspath(self.inputs.out_file)
- return outputs
+class diffeoScalarVolTask(DTITKRenameMixin, DiffeoScalarVol):
+ pass
-class diffeoScalarVolInputSpec(CommandLineInputSpec):
- in_volume = traits.Str(desc='moving volume', exists=True, mandatory=False,
- position=0, argstr="-in %s")
- in_xfm = traits.Str(desc='transform to apply', exists=True,
- mandatory=False,
- position=2, argstr="-trans %s")
- in_target = traits.Str(desc='', exists=True, mandatory=False, position=3,
- argstr="-target %s")
- out_file = traits.Str(desc='', position=1, argstr="-out %s",
- name_source="in_volume",
- name_template="%s_diffeoxfmd.nii.gz")
- in_vsize = traits.Str(desc='', exists=True, mandatory=False, position=4,
- argstr="-vsize %s")
- in_flip = traits.Str(desc='', exists=True, mandatory=False, position=5,
- argstr="-flip %s")
- in_type = traits.Str(desc='', exists=True, mandatory=False, position=6,
- argstr="-type %s")
- in_interp = traits.Str(desc='0 trilin, 1 NN', exists=True, mandatory=False,
- position=7, argstr="-interp %s")
-
-
-class diffeoScalarVolOutputSpec(TraitedSpec):
- out_file = traits.File(desc='moved volume', exists=True)
-
-
-class diffeoScalarVolTask(CommandLineDtitk):
- """
- Applies diffeomorphic transform to a scalar volume
- Example
- -------
-
- >>> import nipype.interfaces.dtitk as dtitk
- >>> node = dtitk.diffeoScalarVolTask()
- >>> node.inputs.in_volume = 'fa.nii.gz'
- >>> node.inputs.in_xfm = 'ants_Warp.nii.gz'
- >>> node.run() # doctest: +SKIP
- """
-
- input_spec = diffeoScalarVolInputSpec
- output_spec = diffeoScalarVolOutputSpec
- _cmd = 'deformationScalarVolume'
-
- def _list_outputs(self):
- outputs = self.output_spec().get()
- if not isdefined(self.inputs.out_file):
- self.inputs.out_file = fname_presuffix(self.inputs.in_volume,
- suffix="_diffeoxfmd",
- newpath=os.path.abspath(
- "."))
- outputs['out_file'] = os.path.abspath(self.inputs.out_file)
- return outputs
+class diffeoSymTensor3DVolTask(DTITKRenameMixin, DiffeoSymTensor3DVol):
+ pass
diff --git a/nipype/interfaces/dtitk/utils.py b/nipype/interfaces/dtitk/utils.py
index 86e0c08b8..274ea2914 100644
--- a/nipype/interfaces/dtitk/utils.py
+++ b/nipype/interfaces/dtitk/utils.py
@@ -1,296 +1,240 @@
-__author__ = 'kjordan'
-
-from ..base import TraitedSpec, CommandLineInputSpec, File, \
- traits, isdefined
-import os
-from .base import CommandLineDtitk
-
-
-class TVAdjustOriginInputSpec(CommandLineInputSpec):
- in_file = File(desc="image to resample", exists=True, mandatory=True,
- position=0, argstr="-in %s")
- out_file = traits.Str(genfile=True, desc='output path', position=1,
- argstr="-out %s")
- origin = traits.Str(desc='xyz voxel size', exists=True, mandatory=False,
- position=4, argstr='-origin %s')
+# -*- coding: utf-8 -*-
+# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
+# vi: set ft=python sts=4 ts=4 sw=4 et:
+"""DTITK utility interfaces
+DTI-TK developed by Gary Hui Zhang, [email protected]
+For additional help, visit http://dti-tk.sf.net
-class TVAdjustOriginOutputSpec(TraitedSpec):
- out_file = traits.Str(exists=True)
+The high-dimensional tensor-based DTI registration algorithm
+Zhang, H., Avants, B.B, Yushkevich, P.A., Woo, J.H., Wang, S., McCluskey, L.H.,
+Elman, L.B., Melhem, E.R., Gee, J.C., High-dimensional spatial normalization of
+diffusion tensor images improves the detection of white matter differences in
+amyotrophic lateral sclerosis, IEEE Transactions on Medical Imaging,
+26(11):1585-1597, November 2007. PMID: 18041273.
-class TVAdjustOriginTask(CommandLineDtitk):
- """
- Moves the origin of a tensor volume to zero
-
- Example
- -------
+The original piecewise-affine tensor-based DTI registration algorithm at the
+core of DTI-TK
- >>> import nipype.interfaces.dtitk as dtitk
- >>> node = dtitk.TVAdjustOriginTask()
- >>> node.inputs.in_file = 'diffusion.nii'
- >>> node.run() # doctest: +SKIP
- """
+Zhang, H., Yushkevich, P.A., Alexander, D.C., Gee, J.C., Deformable
+registration of diffusion tensor MR images with explicit orientation
+optimization, Medical Image Analysis, 10(5):764-785, October 2006. PMID:
+16899392.
- input_spec = TVAdjustOriginInputSpec
- output_spec = TVAdjustOriginOutputSpec
- _cmd = 'TVAdjustVoxelspace'
- _suffix = "_originzero"
+"""
- def _list_outputs(self):
- outputs = self.output_spec().get()
- outputs['out_file'] = self.inputs.out_file
- if not isdefined(self.inputs.out_file):
- outputs["out_file"] = self._gen_fname(self.inputs.in_file,
- suffix=self._suffix,
- ext='.'+'.'.join(
- self.inputs.in_file.
- split(".")[1:]))
- outputs["out_file"] = os.path.abspath(outputs["out_file"])
- return outputs
+from ..base import TraitedSpec, CommandLineInputSpec, File, traits, isdefined
+from ...utils.filemanip import fname_presuffix
+from .base import CommandLineDtitk, DTITKRenameMixin
+import os
- def _gen_filename(self, name):
- if name == "out_file":
- return self._list_outputs()["out_file"]
- return None
+__docformat__ = 'restructuredtext'
class TVAdjustVoxSpInputSpec(CommandLineInputSpec):
- in_file = File(desc="image to resample", exists=True, mandatory=True,
- position=0, argstr="-in %s")
- out_file = traits.Str(genfile=True, desc='output path', position=1,
- argstr="-out %s")
- origin = traits.Str(desc='xyz voxel size', exists=True, mandatory=False,
- position=4, argstr='-origin %s')
- target = traits.Str(desc='target volume', exists=True, mandatory=False,
- position=2, argstr="-target %s")
- vsize = traits.Str(desc='resampled voxel size', exists=True,
- mandatory=False, position=3, argstr="-vsize %s")
+ in_file = File(desc="tensor volume to modify", exists=True,
+ mandatory=True, argstr="-in %s")
+ out_file = File(desc='output path',
+ argstr="-out %s", name_source='in_file',
+ name_template='%s_avs', keep_extension=True)
+ target_file = File(desc='target volume to match',
+ argstr="-target %s",
+ xor=['voxel_size', 'origin'])
+ voxel_size = traits.Tuple((traits.Float(), traits.Float(), traits.Float()),
+ desc='xyz voxel size (superseded by target)',
+ argstr="-vsize %g %g %g", xor=['target_file'])
+ origin = traits.Tuple((traits.Float(), traits.Float(), traits.Float()),
+ desc='xyz origin (superseded by target)',
+ argstr='-origin %g %g %g',
+ xor=['target_file'])
class TVAdjustVoxSpOutputSpec(TraitedSpec):
- out_file = traits.Str(exists=True)
+ out_file = File(exists=True)
-class TVAdjustVoxSpTask(CommandLineDtitk):
+class TVAdjustVoxSp(CommandLineDtitk):
"""
Adjusts the voxel space of a tensor volume
Example
-------
- >>> import nipype.interfaces.dtitk as dtitk
- >>> node = dtitk.TVAdjustVoxSpTask()
- >>> node.inputs.in_file = 'diffusion.nii'
+ >>> from nipype.interfaces import dtitk
+ >>> node = dtitk.TVAdjustVoxSp()
+ >>> node.inputs.in_file = 'im1.nii'
+ >>> node.inputs.target_file = 'im2.nii'
+ >>> node.cmdline
+ 'TVAdjustVoxelspace -in im1.nii -out im1_avs.nii -target im2.nii'
>>> node.run() # doctest: +SKIP
"""
input_spec = TVAdjustVoxSpInputSpec
output_spec = TVAdjustVoxSpOutputSpec
_cmd = 'TVAdjustVoxelspace'
- _suffix = '_reslice'
-
- def _list_outputs(self):
- outputs = self.output_spec().get()
- outputs['out_file'] = self.inputs.out_file
- if not isdefined(self.inputs.out_file):
- outputs["out_file"] = self._gen_fname(self.inputs.in_file,
- suffix=self._suffix,
- ext='.'+'.'.join(
- self.inputs.in_file.
- split(".")[1:]))
- outputs["out_file"] = os.path.abspath(outputs["out_file"])
- return outputs
-
- def _gen_filename(self, name):
- if name == "out_file":
- return self._list_outputs()["out_file"]
- return None
-
-
-# TODO not using these yet... need to be tested
class SVAdjustVoxSpInputSpec(CommandLineInputSpec):
- in_file = traits.Str(desc="image to resample", exists=True,
- mandatory=True, position=0, argstr="-in %s")
- in_target = traits.Str(desc='target volume', exists=True, mandatory=False,
- position=2, argstr="-target %s")
- in_voxsz = traits.Str(desc='resampled voxel size', exists=True,
- mandatory=False, position=3, argstr="-vsize %s")
- out_file = traits.Str(desc='output path', exists=True, mandatory=False,
- position=1, argstr="-out %s",
- name_source="in_file",
- name_template='%s_origmvd.nii.gz')
- origin = traits.Str(desc='xyz voxel size', exists=True, mandatory=False,
- position=4, argstr='-origin %s')
+ in_file = File(desc="scalar volume to modify", exists=True,
+ mandatory=True, argstr="-in %s")
+ out_file = File(desc='output path', argstr="-out %s",
+ name_source="in_file", name_template='%s_avs',
+ keep_extension=True)
+ target_file = File(desc='target volume to match',
+ argstr="-target %s", xor=['voxel_size', 'origin'])
+ voxel_size = traits.Tuple((traits.Float(), traits.Float(), traits.Float()),
+ desc='xyz voxel size (superseded by target)',
+ argstr="-vsize %g %g %g", xor=['target_file'])
+ origin = traits.Tuple((traits.Float(), traits.Float(), traits.Float()),
+ desc='xyz origin (superseded by target)',
+ argstr='-origin %g %g %g',
+ xor=['target_file'])
class SVAdjustVoxSpOutputSpec(TraitedSpec):
- out_file = traits.File(exists=True)
+ out_file = File(exists=True)
-class SVAdjustVoxSpTask(CommandLineDtitk):
+class SVAdjustVoxSp(CommandLineDtitk):
"""
Adjusts the voxel space of a scalar volume
Example
-------
- >>> import nipype.interfaces.dtitk as dtitk
- >>> node = dtitk.SVAdjustVoxSpTask()
- >>> node.inputs.in_file = 'diffusion.nii.gz'
+ >>> from nipype.interfaces import dtitk
+ >>> node = dtitk.SVAdjustVoxSp()
+ >>> node.inputs.in_file = 'im1.nii'
+ >>> node.inputs.target_file = 'im2.nii'
+ >>> node.cmdline
+ 'SVAdjustVoxelspace -in im1.nii -out im1_avs.nii -target im2.nii'
>>> node.run() # doctest: +SKIP
"""
input_spec = SVAdjustVoxSpInputSpec
output_spec = SVAdjustVoxSpOutputSpec
_cmd = 'SVAdjustVoxelspace'
- _suffix = '_reslice'
-
- def _gen_filename(self, name):
- if name == "out_file":
- return self._list_outputs()["out_file"]
- return None
-
- def _list_outputs(self):
- outputs = self.output_spec().get()
- outputs['out_file'] = self.inputs.out_file
- if not isdefined(self.inputs.out_file):
- outputs["out_file"] = self._gen_filename(self.inputs.in_file,
- suffix=self._suffix,
- ext='.' + '.'.join(
- self.inputs.in_file.
- split(".")[1:]))
- outputs["out_file"] = os.path.abspath(outputs["out_file"])
- return outputs
class TVResampleInputSpec(CommandLineInputSpec):
- in_file = traits.Str(desc="image to resample", exists=True,
- mandatory=True, position=0, argstr="-in %s")
- in_arraysz = traits.Str(desc='resampled array size', exists=True,
- mandatory=False, position=1, argstr="-size %s")
- in_voxsz = traits.Str(desc='resampled voxel size', exists=True,
- mandatory=False, position=2, argstr="-vsize %s")
- out_file = traits.Str(desc='output path', exists=True, mandatory=False,
- position=3, argstr="-out %s",
- name_source="in_file",
- name_template="%s_resampled.nii.gz")
+ in_file = File(desc="tensor volume to resample", exists=True,
+ mandatory=True, argstr="-in %s")
+ out_file = File(desc='output path',
+ name_source="in_file", name_template="%s_resampled",
+ keep_extension=True, argstr="-out %s")
+ target_file = File(desc='specs read from the target volume',
+ argstr="-target %s",
+ xor=['array_size', 'voxel_size', 'origin'])
+ align = traits.Enum('center', 'origin', argstr="-align %s",
+ desc='how to align output volume to input volume')
+ interpolation = traits.Enum('LEI', 'EI', argstr="-interp %s",
+ desc='Log Euclidean Euclidean Interpolation')
+ array_size = traits.Tuple((traits.Int(), traits.Int(), traits.Int()),
+ desc='resampled array size', xor=['target_file'],
+ argstr="-size %d %d %d")
+ voxel_size = traits.Tuple((traits.Float(), traits.Float(), traits.Float()),
+ desc='resampled voxel size', xor=['target_file'],
+ argstr="-vsize %g %g %g")
+ origin = traits.Tuple((traits.Float(), traits.Float(), traits.Float()),
+ desc='xyz origin', xor=['target_file'],
+ argstr='-origin %g %g %g')
class TVResampleOutputSpec(TraitedSpec):
- out_file = traits.File(exists=True)
+ out_file = File(exists=True)
-class TVResampleTask(CommandLineDtitk):
+class TVResample(CommandLineDtitk):
"""
Resamples a tensor volume
Example
-------
- >>> import nipype.interfaces.dtitk as dtitk
- >>> node = dtitk.TVResampleTask()
- >>> node.inputs.in_file = 'diffusion.nii.gz'
+ >>> from nipype.interfaces import dtitk
+ >>> node = dtitk.TVResample()
+ >>> node.inputs.in_file = 'im1.nii'
+ >>> node.inputs.target_file = 'im2.nii'
+ >>> node.cmdline
+ 'TVResample -in im1.nii -out im1_resampled.nii -target im2.nii'
>>> node.run() # doctest: +SKIP
"""
input_spec = TVResampleInputSpec
output_spec = TVResampleOutputSpec
_cmd = 'TVResample'
- _suffix = '_resampled'
-
- def _list_outputs(self):
- outputs = self.output_spec().get()
- outputs['out_file'] = self.inputs.out_file
- if not isdefined(self.inputs.out_file):
- outputs["out_file"] = self._gen_fname(self.inputs.in_file,
- suffix=self._suffix,
- ext='.' + '.'.join(
- self.inputs.in_file.
- split(".")[1:]))
- outputs["out_file"] = os.path.abspath(outputs["out_file"])
- return outputs
-
- def _gen_filename(self, name):
- if name == "out_file":
- return self._list_outputs()["out_file"]
- return None
class SVResampleInputSpec(CommandLineInputSpec):
- in_file = traits.Str(desc="image to resample", exists=True,
- mandatory=True, position=0, argstr="-in %s")
- in_arraysz = traits.Str(desc='resampled array size', exists=True,
- mandatory=False, position=1,
- argstr="-size %s")
- in_voxsz = traits.Str(desc='resampled voxel size', exists=True,
- mandatory=False, position=2, argstr="-vsize %s")
- out_file = traits.Str(desc='output path', exists=True, mandatory=False,
- position=3, argstr="-out %s",
- name_source="in_file",
- name_template="%s_resampled.nii.gz")
+ in_file = File(desc="image to resample", exists=True,
+ mandatory=True, argstr="-in %s")
+ out_file = File(desc='output path',
+ name_source="in_file", name_template="%s_resampled",
+ keep_extension=True, argstr="-out %s")
+ target_file = File(desc='specs read from the target volume',
+ argstr="-target %s",
+ xor=['array_size', 'voxel_size', 'origin'])
+ align = traits.Enum('center', 'origin', argstr="-align %s",
+ desc='how to align output volume to input volume')
+ array_size = traits.Tuple((traits.Int(), traits.Int(), traits.Int()),
+ desc='resampled array size', xor=['target_file'],
+ argstr="-size %d %d %d")
+ voxel_size = traits.Tuple((traits.Float(), traits.Float(), traits.Float()),
+ desc='resampled voxel size', xor=['target_file'],
+ argstr="-vsize %g %g %g")
+ origin = traits.Tuple((traits.Float(), traits.Float(), traits.Float()),
+ desc='xyz origin', xor=['target_file'],
+ argstr='-origin %g %g %g')
class SVResampleOutputSpec(TraitedSpec):
- out_file = traits.File(exists=True)
+ out_file = File(exists=True)
-class SVResampleTask(CommandLineDtitk):
+class SVResample(CommandLineDtitk):
"""
Resamples a scalar volume
Example
-------
- >>> import nipype.interfaces.dtitk as dtitk
- >>> node = dtitk.SVResampleTask()
- >>> node.inputs.in_file = 'diffusion.nii'
+ >>> from nipype.interfaces import dtitk
+ >>> node = dtitk.SVResample()
+ >>> node.inputs.in_file = 'im1.nii'
+ >>> node.inputs.target_file = 'im2.nii'
+ >>> node.cmdline
+ 'SVResample -in im1.nii -out im1_resampled.nii -target im2.nii'
>>> node.run() # doctest: +SKIP
"""
input_spec = SVResampleInputSpec
output_spec = SVResampleOutputSpec
_cmd = 'SVResample'
- _suffix = '_resampled'
-
- def _list_outputs(self):
- outputs = self.output_spec().get()
- outputs['out_file'] = self.inputs.out_file
- if not isdefined(self.inputs.out_file):
- outputs["out_file"] = self._gen_fname(self.inputs.in_file,
- suffix=self._suffix,
- ext='.' + '.'.join(
- self.inputs.in_file.
- split(".")[1:]))
- outputs["out_file"] = os.path.abspath(outputs["out_file"])
- return outputs
-
- def _gen_filename(self, name):
- if name == "out_file":
- return self._list_outputs()["out_file"]
- return None
class TVtoolInputSpec(CommandLineInputSpec):
- in_file = traits.Str(desc="image to resample", exists=True,
- mandatory=False, position=0, argstr="-in %s")
- in_flag = traits.Enum('fa', 'tr', 'ad', 'rd', 'pd', 'rgb', exists=True,
- mandatory=False, position=1, argstr="-%s", desc='')
+ in_file = File(desc="scalar volume to resample", exists=True,
+ argstr="-in %s", mandatory=True)
+ '''NOTE: there are a lot more options here; not implementing all of them'''
+ in_flag = traits.Enum('fa', 'tr', 'ad', 'rd', 'pd', 'rgb',
+ argstr="-%s", desc='')
+ out_file = File(argstr="-out %s", genfile=True)
class TVtoolOutputSpec(TraitedSpec):
- out_file = traits.File(exists=True)
+ out_file = File()
-class TVtoolTask(CommandLineDtitk):
+class TVtool(CommandLineDtitk):
"""
Calculates a tensor metric volume from a tensor volume
Example
-------
- >>> import nipype.interfaces.dtitk as dtitk
- >>> node = dtitk.TVtoolTask()
- >>> node.inputs.in_file = 'diffusion.nii'
+ >>> from nipype.interfaces import dtitk
+ >>> node = dtitk.TVtool()
+ >>> node.inputs.in_file = 'im1.nii'
>>> node.inputs.in_flag = 'fa'
+ >>> node.cmdline
+ 'TVtool -in im1.nii -fa -out im1_fa.nii'
>>> node.run() # doctest: +SKIP
"""
input_spec = TVtoolInputSpec
@@ -298,70 +242,92 @@ class TVtoolTask(CommandLineDtitk):
_cmd = 'TVtool'
def _list_outputs(self):
- _suffix = self.inputs.in_flag
- outputs = self.output_spec().get()
- outputs['out_file'] = self.inputs.out_file
- if not isdefined(self.inputs.out_file):
- outputs["out_file"] = self._gen_fname(self.inputs.in_file,
- suffix=_suffix,
- ext='.' + '.'.join(
- self.inputs.in_file.
- split(".")[1:]))
- outputs["out_file"] = os.path.abspath(outputs["out_file"])
+ outputs = self._outputs().get()
+ out_file = self.inputs.out_file
+ if not isdefined(out_file):
+ out_file = self._gen_filename('out_file')
+ outputs['out_file'] = os.path.abspath(out_file)
return outputs
def _gen_filename(self, name):
- if name == "out_file":
- return self._list_outputs()["out_file"]
- return None
+ if name != 'out_file':
+ return
+ return fname_presuffix(os.path.basename(self.inputs.in_file),
+ suffix='_' + self.inputs.in_flag)
+
+
+'''Note: SVTool not implemented at this time'''
class BinThreshInputSpec(CommandLineInputSpec):
- in_file = traits.Str(desc='', exists=True, mandatory=False, position=0,
- argstr="%s")
- out_file = traits.Str(desc='', exists=True, mandatory=False, position=1,
- argstr="%s")
- in_numbers = traits.Str(desc='LB UB inside_value outside_value',
- exists=True, mandatory=False, position=2,
- argstr="%s")
+ in_file = File(desc='Image to threshold/binarize', exists=True,
+ position=0, argstr="%s", mandatory=True)
+ out_file = File(desc='output path', position=1, argstr="%s",
+ keep_extension=True, name_source='in_file',
+ name_template='%s_thrbin')
+ lower_bound = traits.Float(0.01, position=2, argstr="%g", mandatory=True,
+ desc='lower bound of binarization range')
+ upper_bound = traits.Float(100, position=3, argstr="%g", mandatory=True,
+ desc='upper bound of binarization range')
+ inside_value = traits.Float(1, position=4, argstr="%g", usedefault=True,
+ mandatory=True, desc='value for voxels in '
+ 'binarization range')
+ outside_value = traits.Float(0, position=5, argstr="%g", usedefault=True,
+ mandatory=True, desc='value for voxels'
+ 'outside of binarization range')
class BinThreshOutputSpec(TraitedSpec):
- out_file = traits.File(exists=True)
+ out_file = File(exists=True)
-class BinThreshTask(CommandLineDtitk):
+class BinThresh(CommandLineDtitk):
"""
- Binarizes an image based on parameters
+ Binarizes an image
- Example
- -------
+ Example
+ -------
- >>> import nipype.interfaces.dtitk as dtitk
- >>> node = dtitk.BinThreshTask()
- >>> node.inputs.in_file = 'diffusion.nii'
- >>> node.inputs.in_numbers = '0 100 1 0'
- >>> node.run() # doctest: +SKIP
- """
+ >>> from nipype.interfaces import dtitk
+ >>> node = dtitk.BinThresh()
+ >>> node.inputs.in_file = 'im1.nii'
+ >>> node.inputs.lower_bound = 0
+ >>> node.inputs.upper_bound = 100
+ >>> node.inputs.inside_value = 1
+ >>> node.inputs.outside_value = 0
+ >>> node.cmdline
+ 'BinaryThresholdImageFilter im1.nii im1_thrbin.nii 0 100 1 0'
+ >>> node.run() # doctest: +SKIP
+ """
input_spec = BinThreshInputSpec
output_spec = BinThreshOutputSpec
_cmd = 'BinaryThresholdImageFilter'
- _suffix = '_bin'
- def _list_outputs(self):
- outputs = self.output_spec().get()
- outputs['out_file'] = self.inputs.out_file
- if not isdefined(self.inputs.out_file):
- outputs["out_file"] = self._gen_fname(self.inputs.in_file,
- suffix=self._suffix,
- ext='.'+'.'.join(
- self.inputs.in_file.
- split(".")[1:]))
- outputs["out_file"] = os.path.abspath(outputs["out_file"])
- return outputs
- def _gen_filename(self, name):
- if name == "out_file":
- return self._list_outputs()["out_file"]
- return None
+class BinThreshTask(DTITKRenameMixin, BinThresh):
+ pass
+
+
+class SVAdjustVoxSpTask(DTITKRenameMixin, SVAdjustVoxSp):
+ pass
+
+
+class SVResampleTask(DTITKRenameMixin, SVResample):
+ pass
+
+
+class TVAdjustOriginTask(DTITKRenameMixin, TVAdjustVoxSp):
+ pass
+
+
+class TVAdjustVoxSpTask(DTITKRenameMixin, TVAdjustVoxSp):
+ pass
+
+
+class TVResampleTask(DTITKRenameMixin, TVResample):
+ pass
+
+
+class TVtoolTask(DTITKRenameMixin, TVtool):
+ pass
diff --git a/nipype/testing/data/im_affine.aff b/nipype/testing/data/im_affine.aff
new file mode 100644
index 000000000..e69de29bb
diff --git a/nipype/testing/data/im_warp.df.nii b/nipype/testing/data/im_warp.df.nii
new file mode 100644
index 000000000..e69de29bb
diff --git a/nipype/workflows/dmri/dtitk/__init__.py b/nipype/workflows/dmri/dtitk/__init__.py
new file mode 100644
index 000000000..02dbf2554
--- /dev/null
+++ b/nipype/workflows/dmri/dtitk/__init__.py
@@ -0,0 +1,8 @@
+# -*- coding: utf-8 -*-
+# coding: utf-8
+# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
+# vi: set ft=python sts=4 ts=4 sw=4 et:
+
+from __future__ import absolute_import
+from .tensor_registration import (affine_tensor_pipeline,
+ diffeomorphic_tensor_pipeline)
diff --git a/nipype/workflows/dmri/dtitk/tensor_registration.py b/nipype/workflows/dmri/dtitk/tensor_registration.py
new file mode 100644
index 000000000..c4d3d3248
--- /dev/null
+++ b/nipype/workflows/dmri/dtitk/tensor_registration.py
@@ -0,0 +1,127 @@
+# -*- coding: utf-8 -*-
+# coding: utf-8
+# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
+# vi: set ft=python sts=4 ts=4 sw=4 et:
+
+from ....pipeline import engine as pe
+from ....interfaces import utility as niu
+from ....interfaces import dtitk
+
+
+def affine_tensor_pipeline(name='AffTen'):
+
+ """
+ Workflow that performs a linear registration
+ (Rigid followed by Affine)
+
+ Example
+ -------
+
+ >>> from nipype.workflows.dmri.dtitk.tensor_registration import affine_tensor_pipeline
+ >>> affine = affine_tensor_pipeline()
+ >>> affine.inputs.inputnode.fixed_file = 'im1.nii'
+ >>> affine.inputs.inputnode.moving_file = 'im2.nii'
+ >>> affine.run() # doctest: +SKIP
+
+
+ """
+ inputnode = pe.Node(niu.IdentityInterface(
+ fields=['fixed_file', 'moving_file']),
+ name='inputnode')
+ outputnode = pe.Node(niu.IdentityInterface(
+ fields=['out_file', 'out_file_xfm']),
+ name='outputnode')
+
+ rigid_node = pe.Node(dtitk.Rigid(), name='rigid_node')
+ affine_node = pe.Node(dtitk.Affine(), name='affine_node')
+
+ wf = pe.Workflow(name=name)
+
+ wf.connect(inputnode, 'fixed_file', rigid_node, 'fixed_file')
+ wf.connect(inputnode, 'moving_file', rigid_node, 'moving_file')
+ wf.connect(rigid_node, 'out_file_xfm', affine_node, 'initialize_xfm')
+ wf.connect(inputnode, 'fixed_file', affine_node, 'fixed_file')
+ wf.connect(inputnode, 'moving_file', affine_node, 'moving_file')
+ wf.connect(affine_node, 'out_file', outputnode, 'out_file')
+ wf.connect(affine_node, 'out_file_xfm', outputnode, 'out_file_xfm')
+
+ return wf
+
+
+def diffeomorphic_tensor_pipeline(name='DiffeoTen',
+ params={'array_size': (128, 128, 64)}):
+ """
+ Workflow that performs a diffeomorphic registration
+ (Rigid and Affine follwed by Diffeomorphic)
+ Note: the requirements for a diffeomorphic registration specify that
+ the dimension 0 is a power of 2 so images are resliced prior to
+ registration
+
+ Example
+ -------
+
+ >>> from nipype.workflows.dmri.dtitk.tensor_registration import diffeomorphic_tensor_pipeline
+ >>> diffeo = diffeomorphic_tensor_pipeline()
+ >>> diffeo.inputs.inputnode.fixed_file = 'im1.nii'
+ >>> diffeo.inputs.inputnode.moving_file = 'im2.nii'
+ >>> diffeo.run() # doctest: +SKIP
+
+
+ """
+ inputnode = pe.Node(niu.IdentityInterface(
+ fields=['fixed_file', 'moving_file']),
+ name='inputnode')
+ outputnode = pe.Node(niu.IdentityInterface(
+ fields=['out_file', 'out_file_xfm',
+ 'fixed_resliced', 'moving_resliced']),
+ name='outputnode')
+
+ reslice_node_pow2 = pe.Node(dtitk.TVResample(
+ origin=(0, 0, 0),
+ array_size=params['array_size']),
+ name='reslice_node_pow2')
+ reslice_node_moving = pe.Node(dtitk.TVResample(),
+ name='reslice_node_moving')
+ mask_node = pe.Node(dtitk.BinThresh(lower_bound=0.01, upper_bound=100,
+ inside_value=1, outside_value=0),
+ name='mask_node')
+ rigid_node = pe.Node(dtitk.Rigid(), name='rigid_node')
+ affine_node = pe.Node(dtitk.Affine(), name='affine_node')
+ diffeo_node = pe.Node(dtitk.Diffeo(n_iters=6, ftol=0.002),
+ name='diffeo_node')
+ compose_xfm_node = pe.Node(dtitk.ComposeXfm(), name='compose_xfm_node')
+ apply_xfm_node = pe.Node(dtitk.DiffeoSymTensor3DVol(),
+ name='apply_xfm_node')
+
+ wf = pe.Workflow(name=name)
+
+ # Reslice input images
+ wf.connect(inputnode, 'fixed_file', reslice_node_pow2, 'in_file')
+ wf.connect(reslice_node_pow2, 'out_file',
+ reslice_node_moving, 'target_file')
+ wf.connect(inputnode, 'moving_file', reslice_node_moving, 'in_file')
+ # Rigid registration
+ wf.connect(reslice_node_pow2, 'out_file', rigid_node, 'fixed_file')
+ wf.connect(reslice_node_moving, 'out_file', rigid_node, 'moving_file')
+ # Affine registration
+ wf.connect(rigid_node, 'out_file_xfm', affine_node, 'initialize_xfm')
+ wf.connect(reslice_node_pow2, 'out_file', affine_node, 'fixed_file')
+ wf.connect(reslice_node_moving, 'out_file', affine_node, 'moving_file')
+ # Diffeo registration
+ wf.connect(reslice_node_pow2, 'out_file', mask_node, 'in_file')
+ wf.connect(reslice_node_pow2, 'out_file', diffeo_node, 'fixed_file')
+ wf.connect(affine_node, 'out_file', diffeo_node, 'moving_file')
+ wf.connect(mask_node, 'out_file', diffeo_node, 'mask_file')
+ # Compose transform
+ wf.connect(diffeo_node, 'out_file_xfm', compose_xfm_node, 'in_df')
+ wf.connect(affine_node, 'out_file_xfm', compose_xfm_node, 'in_aff')
+ # Apply transform
+ wf.connect(reslice_node_moving, 'out_file', apply_xfm_node, 'in_file')
+ wf.connect(compose_xfm_node, 'out_file', apply_xfm_node, 'transform')
+ # Send to output
+ wf.connect(apply_xfm_node, 'out_file', outputnode, 'out_file')
+ wf.connect(compose_xfm_node, 'out_file', outputnode, 'out_file_xfm')
+ wf.connect(reslice_node_pow2, 'out_file', outputnode, 'fixed_resliced')
+ wf.connect(reslice_node_moving, 'out_file', outputnode, 'moving_resliced')
+
+ return wf
| some specs need fixing
```
nipype.interfaces.afni.preprocess:TProject:Inputs:mask:exist
nipype.interfaces.dtitk.registration:AffineTask:Inputs:fixed_file:exists
nipype.interfaces.dtitk.registration:AffineTask:Inputs:moving_file:exists
nipype.interfaces.dtitk.registration:AffineTask:Inputs:similarity_metric:exists
nipype.interfaces.dtitk.registration:AffineTask:Inputs:useInTrans:mandatory=False
nipype.interfaces.dtitk.registration:ComposeXfmTask:Inputs:in_aff:exists
nipype.interfaces.dtitk.registration:ComposeXfmTask:Inputs:in_aff:mandatory=False
nipype.interfaces.dtitk.registration:ComposeXfmTask:Inputs:in_df:exists
nipype.interfaces.dtitk.registration:ComposeXfmTask:Inputs:in_df:mandatory=False
nipype.interfaces.dtitk.registration:ComposeXfmTask:Inputs:out_file:exists
nipype.interfaces.dtitk.registration:ComposeXfmTask:Inputs:out_file:mandatory=False
nipype.interfaces.dtitk.registration:DiffeoTask:Inputs:fixed_file:exists
nipype.interfaces.dtitk.registration:DiffeoTask:Inputs:fixed_file:mandatory=False
nipype.interfaces.dtitk.registration:DiffeoTask:Inputs:ftol:exists
nipype.interfaces.dtitk.registration:DiffeoTask:Inputs:legacy:exists
nipype.interfaces.dtitk.registration:DiffeoTask:Inputs:mask:exists
nipype.interfaces.dtitk.registration:DiffeoTask:Inputs:mask:mandatory=False
nipype.interfaces.dtitk.registration:DiffeoTask:Inputs:moving_file:exists
nipype.interfaces.dtitk.registration:DiffeoTask:Inputs:moving_file:mandatory=False
nipype.interfaces.dtitk.registration:DiffeoTask:Inputs:n_iters:exists
nipype.interfaces.dtitk.registration:RigidTask:Inputs:fixed_file:exists
nipype.interfaces.dtitk.registration:RigidTask:Inputs:moving_file:exists
nipype.interfaces.dtitk.registration:RigidTask:Inputs:similarity_metric:exists
nipype.interfaces.dtitk.registration:RigidTask:Inputs:useInTrans:mandatory=False
nipype.interfaces.dtitk.registration:affScalarVolTask:Inputs:in_volume:exists
nipype.interfaces.dtitk.registration:affScalarVolTask:Inputs:in_volume:mandatory=False
nipype.interfaces.dtitk.registration:affScalarVolTask:Inputs:in_xfm:exists
nipype.interfaces.dtitk.registration:affScalarVolTask:Inputs:in_xfm:mandatory=False
nipype.interfaces.dtitk.registration:affScalarVolTask:Inputs:out_file:mandatory=False
nipype.interfaces.dtitk.registration:affSymTensor3DVolTask:Inputs:in_target:exists
nipype.interfaces.dtitk.registration:affSymTensor3DVolTask:Inputs:in_target:mandatory=False
nipype.interfaces.dtitk.registration:affSymTensor3DVolTask:Inputs:in_tensor:exists
nipype.interfaces.dtitk.registration:affSymTensor3DVolTask:Inputs:in_tensor:mandatory=False
nipype.interfaces.dtitk.registration:affSymTensor3DVolTask:Inputs:in_xfm:exists
nipype.interfaces.dtitk.registration:affSymTensor3DVolTask:Inputs:in_xfm:mandatory=False
nipype.interfaces.dtitk.registration:affSymTensor3DVolTask:Inputs:out_file:exists
nipype.interfaces.dtitk.registration:affSymTensor3DVolTask:Inputs:out_file:mandatory=False
nipype.interfaces.dtitk.registration:diffeoScalarVolTask:Inputs:in_flip:exists
nipype.interfaces.dtitk.registration:diffeoScalarVolTask:Inputs:in_flip:mandatory=False
nipype.interfaces.dtitk.registration:diffeoScalarVolTask:Inputs:in_interp:exists
nipype.interfaces.dtitk.registration:diffeoScalarVolTask:Inputs:in_interp:mandatory=False
nipype.interfaces.dtitk.registration:diffeoScalarVolTask:Inputs:in_target:exists
nipype.interfaces.dtitk.registration:diffeoScalarVolTask:Inputs:in_target:mandatory=False
nipype.interfaces.dtitk.registration:diffeoScalarVolTask:Inputs:in_type:exists
nipype.interfaces.dtitk.registration:diffeoScalarVolTask:Inputs:in_type:mandatory=False
nipype.interfaces.dtitk.registration:diffeoScalarVolTask:Inputs:in_volume:exists
nipype.interfaces.dtitk.registration:diffeoScalarVolTask:Inputs:in_volume:mandatory=False
nipype.interfaces.dtitk.registration:diffeoScalarVolTask:Inputs:in_vsize:exists
nipype.interfaces.dtitk.registration:diffeoScalarVolTask:Inputs:in_vsize:mandatory=False
nipype.interfaces.dtitk.registration:diffeoScalarVolTask:Inputs:in_xfm:exists
nipype.interfaces.dtitk.registration:diffeoScalarVolTask:Inputs:in_xfm:mandatory=False
nipype.interfaces.dtitk.registration:diffeoSymTensor3DVolTask:Inputs:in_target:exists
nipype.interfaces.dtitk.registration:diffeoSymTensor3DVolTask:Inputs:in_target:mandatory=False
nipype.interfaces.dtitk.registration:diffeoSymTensor3DVolTask:Inputs:in_tensor:exists
nipype.interfaces.dtitk.registration:diffeoSymTensor3DVolTask:Inputs:in_tensor:mandatory=False
nipype.interfaces.dtitk.registration:diffeoSymTensor3DVolTask:Inputs:in_xfm:exists
nipype.interfaces.dtitk.registration:diffeoSymTensor3DVolTask:Inputs:in_xfm:mandatory=False
nipype.interfaces.dtitk.registration:diffeoSymTensor3DVolTask:Inputs:out_file:exists
nipype.interfaces.dtitk.registration:diffeoSymTensor3DVolTask:Inputs:out_file:mandatory=False
nipype.interfaces.dtitk.utils:BinThreshTask:Inputs:in_file:exists
nipype.interfaces.dtitk.utils:BinThreshTask:Inputs:in_file:mandatory=False
nipype.interfaces.dtitk.utils:BinThreshTask:Inputs:in_numbers:exists
nipype.interfaces.dtitk.utils:BinThreshTask:Inputs:in_numbers:mandatory=False
nipype.interfaces.dtitk.utils:BinThreshTask:Inputs:out_file:exists
nipype.interfaces.dtitk.utils:BinThreshTask:Inputs:out_file:mandatory=False
nipype.interfaces.dtitk.utils:SVAdjustVoxSpTask:Inputs:in_file:exists
nipype.interfaces.dtitk.utils:SVAdjustVoxSpTask:Inputs:in_target:exists
nipype.interfaces.dtitk.utils:SVAdjustVoxSpTask:Inputs:in_target:mandatory=False
nipype.interfaces.dtitk.utils:SVAdjustVoxSpTask:Inputs:in_voxsz:exists
nipype.interfaces.dtitk.utils:SVAdjustVoxSpTask:Inputs:in_voxsz:mandatory=False
nipype.interfaces.dtitk.utils:SVAdjustVoxSpTask:Inputs:origin:exists
nipype.interfaces.dtitk.utils:SVAdjustVoxSpTask:Inputs:origin:mandatory=False
nipype.interfaces.dtitk.utils:SVAdjustVoxSpTask:Inputs:out_file:exists
nipype.interfaces.dtitk.utils:SVAdjustVoxSpTask:Inputs:out_file:mandatory=False
nipype.interfaces.dtitk.utils:SVResampleTask:Inputs:in_arraysz:exists
nipype.interfaces.dtitk.utils:SVResampleTask:Inputs:in_arraysz:mandatory=False
nipype.interfaces.dtitk.utils:SVResampleTask:Inputs:in_file:exists
nipype.interfaces.dtitk.utils:SVResampleTask:Inputs:in_voxsz:exists
nipype.interfaces.dtitk.utils:SVResampleTask:Inputs:in_voxsz:mandatory=False
nipype.interfaces.dtitk.utils:SVResampleTask:Inputs:out_file:exists
nipype.interfaces.dtitk.utils:SVResampleTask:Inputs:out_file:mandatory=False
nipype.interfaces.dtitk.utils:TVAdjustOriginTask:Inputs:origin:exists
nipype.interfaces.dtitk.utils:TVAdjustOriginTask:Inputs:origin:mandatory=False
nipype.interfaces.dtitk.utils:TVAdjustOriginTask:Outputs:out_file:exists
nipype.interfaces.dtitk.utils:TVAdjustVoxSpTask:Inputs:origin:exists
nipype.interfaces.dtitk.utils:TVAdjustVoxSpTask:Inputs:origin:mandatory=False
nipype.interfaces.dtitk.utils:TVAdjustVoxSpTask:Inputs:target:exists
nipype.interfaces.dtitk.utils:TVAdjustVoxSpTask:Inputs:target:mandatory=False
nipype.interfaces.dtitk.utils:TVAdjustVoxSpTask:Inputs:vsize:exists
nipype.interfaces.dtitk.utils:TVAdjustVoxSpTask:Inputs:vsize:mandatory=False
nipype.interfaces.dtitk.utils:TVAdjustVoxSpTask:Outputs:out_file:exists
nipype.interfaces.dtitk.utils:TVResampleTask:Inputs:in_arraysz:exists
nipype.interfaces.dtitk.utils:TVResampleTask:Inputs:in_arraysz:mandatory=False
nipype.interfaces.dtitk.utils:TVResampleTask:Inputs:in_file:exists
nipype.interfaces.dtitk.utils:TVResampleTask:Inputs:in_voxsz:exists
nipype.interfaces.dtitk.utils:TVResampleTask:Inputs:in_voxsz:mandatory=False
nipype.interfaces.dtitk.utils:TVResampleTask:Inputs:out_file:exists
nipype.interfaces.dtitk.utils:TVResampleTask:Inputs:out_file:mandatory=False
nipype.interfaces.dtitk.utils:TVtoolTask:Inputs:in_file:exists
nipype.interfaces.dtitk.utils:TVtoolTask:Inputs:in_file:mandatory=False
nipype.interfaces.dtitk.utils:TVtoolTask:Inputs:in_flag:exists
nipype.interfaces.dtitk.utils:TVtoolTask:Inputs:in_flag:mandatory=False
``` | nipy/nipype | diff --git a/nipype/interfaces/afni/tests/test_auto_TProject.py b/nipype/interfaces/afni/tests/test_auto_TProject.py
index 25a47142b..ffca499ea 100644
--- a/nipype/interfaces/afni/tests/test_auto_TProject.py
+++ b/nipype/interfaces/afni/tests/test_auto_TProject.py
@@ -32,10 +32,7 @@ def test_TProject_inputs():
mandatory=True,
position=1,
),
- mask=dict(
- argstr='-mask %s',
- exist=True,
- ),
+ mask=dict(argstr='-mask %s', ),
noblock=dict(argstr='-noblock', ),
norm=dict(argstr='-norm', ),
num_threads=dict(
diff --git a/nipype/interfaces/dtitk/tests/test_auto_Affine.py b/nipype/interfaces/dtitk/tests/test_auto_Affine.py
new file mode 100644
index 000000000..8a80801ba
--- /dev/null
+++ b/nipype/interfaces/dtitk/tests/test_auto_Affine.py
@@ -0,0 +1,72 @@
+# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
+from __future__ import unicode_literals
+from ..registration import Affine
+
+
+def test_Affine_inputs():
+ input_map = dict(
+ args=dict(argstr='%s', ),
+ environ=dict(
+ nohash=True,
+ usedefault=True,
+ ),
+ fixed_file=dict(
+ argstr='%s',
+ copyfile=False,
+ mandatory=True,
+ position=0,
+ ),
+ ftol=dict(
+ argstr='%g',
+ mandatory=True,
+ position=4,
+ usedefault=True,
+ ),
+ ignore_exception=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ usedefault=True,
+ ),
+ initialize_xfm=dict(
+ argstr='%s',
+ copyfile=True,
+ position=5,
+ ),
+ moving_file=dict(
+ argstr='%s',
+ copyfile=False,
+ mandatory=True,
+ position=1,
+ ),
+ sampling_xyz=dict(
+ argstr='%g %g %g',
+ mandatory=True,
+ position=3,
+ usedefault=True,
+ ),
+ similarity_metric=dict(
+ argstr='%s',
+ mandatory=True,
+ position=2,
+ usedefault=True,
+ ),
+ terminal_output=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ ),
+ )
+ inputs = Affine.input_spec()
+
+ for key, metadata in list(input_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(inputs.traits()[key], metakey) == value
+def test_Affine_outputs():
+ output_map = dict(
+ out_file=dict(),
+ out_file_xfm=dict(),
+ )
+ outputs = Affine.output_spec()
+
+ for key, metadata in list(output_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(outputs.traits()[key], metakey) == value
diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py b/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py
index cff7d1c0d..c7de1d31c 100644
--- a/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py
+++ b/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py
@@ -12,56 +12,48 @@ def test_AffineTask_inputs():
),
fixed_file=dict(
argstr='%s',
- exists=True,
+ copyfile=False,
mandatory=True,
position=0,
),
ftol=dict(
- argstr='%s',
+ argstr='%g',
mandatory=True,
- position=6,
+ position=4,
+ usedefault=True,
),
ignore_exception=dict(
deprecated='1.0.0',
nohash=True,
usedefault=True,
),
+ initialize_xfm=dict(
+ argstr='%s',
+ copyfile=True,
+ position=5,
+ ),
moving_file=dict(
argstr='%s',
- exists=True,
+ copyfile=False,
mandatory=True,
position=1,
),
- samplingX=dict(
- argstr='%s',
+ sampling_xyz=dict(
+ argstr='%g %g %g',
mandatory=True,
position=3,
- ),
- samplingY=dict(
- argstr='%s',
- mandatory=True,
- position=4,
- ),
- samplingZ=dict(
- argstr='%s',
- mandatory=True,
- position=5,
+ usedefault=True,
),
similarity_metric=dict(
argstr='%s',
- exists=True,
mandatory=True,
position=2,
+ usedefault=True,
),
terminal_output=dict(
deprecated='1.0.0',
nohash=True,
),
- useInTrans=dict(
- argstr='%s',
- mandatory=False,
- position=7,
- ),
)
inputs = AffineTask.input_spec()
diff --git a/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py b/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py
new file mode 100644
index 000000000..1004dd42c
--- /dev/null
+++ b/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py
@@ -0,0 +1,68 @@
+# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
+from __future__ import unicode_literals
+from ..utils import BinThresh
+
+
+def test_BinThresh_inputs():
+ input_map = dict(
+ args=dict(argstr='%s', ),
+ environ=dict(
+ nohash=True,
+ usedefault=True,
+ ),
+ ignore_exception=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ usedefault=True,
+ ),
+ in_file=dict(
+ argstr='%s',
+ mandatory=True,
+ position=0,
+ ),
+ inside_value=dict(
+ argstr='%g',
+ mandatory=True,
+ position=4,
+ usedefault=True,
+ ),
+ lower_bound=dict(
+ argstr='%g',
+ mandatory=True,
+ position=2,
+ ),
+ out_file=dict(
+ argstr='%s',
+ keep_extension=True,
+ name_source='in_file',
+ name_template='%s_thrbin',
+ position=1,
+ ),
+ outside_value=dict(
+ argstr='%g',
+ mandatory=True,
+ position=5,
+ usedefault=True,
+ ),
+ terminal_output=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ ),
+ upper_bound=dict(
+ argstr='%g',
+ mandatory=True,
+ position=3,
+ ),
+ )
+ inputs = BinThresh.input_spec()
+
+ for key, metadata in list(input_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(inputs.traits()[key], metakey) == value
+def test_BinThresh_outputs():
+ output_map = dict(out_file=dict(), )
+ outputs = BinThresh.output_spec()
+
+ for key, metadata in list(output_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(outputs.traits()[key], metakey) == value
diff --git a/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py b/nipype/interfaces/dtitk/tests/test_auto_BinThreshTASK.py
similarity index 66%
rename from nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py
rename to nipype/interfaces/dtitk/tests/test_auto_BinThreshTASK.py
index 028a820ba..1f49ddce4 100644
--- a/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py
+++ b/nipype/interfaces/dtitk/tests/test_auto_BinThreshTASK.py
@@ -17,26 +17,42 @@ def test_BinThreshTask_inputs():
),
in_file=dict(
argstr='%s',
- exists=True,
- mandatory=False,
+ mandatory=True,
position=0,
),
- in_numbers=dict(
- argstr='%s',
- exists=True,
- mandatory=False,
+ inside_value=dict(
+ argstr='%g',
+ mandatory=True,
+ position=4,
+ usedefault=True,
+ ),
+ lower_bound=dict(
+ argstr='%g',
+ mandatory=True,
position=2,
),
out_file=dict(
argstr='%s',
- exists=True,
- mandatory=False,
+ keep_extension=True,
+ name_source='in_file',
+ name_template='%s_thrbin',
position=1,
),
+ outside_value=dict(
+ argstr='%g',
+ mandatory=True,
+ position=5,
+ usedefault=True,
+ ),
terminal_output=dict(
deprecated='1.0.0',
nohash=True,
),
+ upper_bound=dict(
+ argstr='%g',
+ mandatory=True,
+ position=3,
+ ),
)
inputs = BinThreshTask.input_spec()
diff --git a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py
new file mode 100644
index 000000000..9ee4c4465
--- /dev/null
+++ b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py
@@ -0,0 +1,46 @@
+# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
+from __future__ import unicode_literals
+from ..registration import ComposeXfm
+
+
+def test_ComposeXfm_inputs():
+ input_map = dict(
+ args=dict(argstr='%s', ),
+ environ=dict(
+ nohash=True,
+ usedefault=True,
+ ),
+ ignore_exception=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ usedefault=True,
+ ),
+ in_aff=dict(
+ argstr='-aff %s',
+ mandatory=True,
+ ),
+ in_df=dict(
+ argstr='-df %s',
+ mandatory=True,
+ ),
+ out_file=dict(
+ argstr='-out %s',
+ genfile=True,
+ ),
+ terminal_output=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ ),
+ )
+ inputs = ComposeXfm.input_spec()
+
+ for key, metadata in list(input_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(inputs.traits()[key], metakey) == value
+def test_ComposeXfm_outputs():
+ output_map = dict(out_file=dict(), )
+ outputs = ComposeXfm.output_spec()
+
+ for key, metadata in list(output_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(outputs.traits()[key], metakey) == value
diff --git a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py
index ec5d2153e..0166be1c3 100644
--- a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py
+++ b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py
@@ -17,23 +17,15 @@ def test_ComposeXfmTask_inputs():
),
in_aff=dict(
argstr='-aff %s',
- exists=True,
- mandatory=False,
- position=0,
+ mandatory=True,
),
in_df=dict(
argstr='-df %s',
- exists=True,
- mandatory=False,
- position=1,
+ mandatory=True,
),
out_file=dict(
argstr='-out %s',
- exists=True,
- mandatory=False,
- name_source='in_df',
- name_template='%s_comboaff.nii.gz',
- position=2,
+ genfile=True,
),
terminal_output=dict(
deprecated='1.0.0',
diff --git a/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py b/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py
new file mode 100644
index 000000000..a389d22bf
--- /dev/null
+++ b/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py
@@ -0,0 +1,68 @@
+# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
+from __future__ import unicode_literals
+from ..registration import Diffeo
+
+
+def test_Diffeo_inputs():
+ input_map = dict(
+ args=dict(argstr='%s', ),
+ environ=dict(
+ nohash=True,
+ usedefault=True,
+ ),
+ fixed_file=dict(
+ argstr='%s',
+ position=0,
+ ),
+ ftol=dict(
+ argstr='%g',
+ mandatory=True,
+ position=5,
+ usedefault=True,
+ ),
+ ignore_exception=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ usedefault=True,
+ ),
+ legacy=dict(
+ argstr='%d',
+ mandatory=True,
+ position=3,
+ usedefault=True,
+ ),
+ mask_file=dict(
+ argstr='%s',
+ position=2,
+ ),
+ moving_file=dict(
+ argstr='%s',
+ copyfile=False,
+ position=1,
+ ),
+ n_iters=dict(
+ argstr='%d',
+ mandatory=True,
+ position=4,
+ usedefault=True,
+ ),
+ terminal_output=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ ),
+ )
+ inputs = Diffeo.input_spec()
+
+ for key, metadata in list(input_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(inputs.traits()[key], metakey) == value
+def test_Diffeo_outputs():
+ output_map = dict(
+ out_file=dict(),
+ out_file_xfm=dict(),
+ )
+ outputs = Diffeo.output_spec()
+
+ for key, metadata in list(output_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(outputs.traits()[key], metakey) == value
diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py
index a0bc0a580..0129f7d70 100644
--- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py
+++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py
@@ -12,15 +12,13 @@ def test_DiffeoTask_inputs():
),
fixed_file=dict(
argstr='%s',
- exists=True,
- mandatory=False,
position=0,
),
ftol=dict(
- argstr='%s',
- exists=True,
+ argstr='%g',
mandatory=True,
position=5,
+ usedefault=True,
),
ignore_exception=dict(
deprecated='1.0.0',
@@ -28,28 +26,25 @@ def test_DiffeoTask_inputs():
usedefault=True,
),
legacy=dict(
- argstr='%s',
- exists=True,
+ argstr='%d',
mandatory=True,
position=3,
+ usedefault=True,
),
- mask=dict(
+ mask_file=dict(
argstr='%s',
- exists=True,
- mandatory=False,
position=2,
),
moving_file=dict(
argstr='%s',
- exists=True,
- mandatory=False,
+ copyfile=False,
position=1,
),
n_iters=dict(
- argstr='%s',
- exists=True,
+ argstr='%d',
mandatory=True,
position=4,
+ usedefault=True,
),
terminal_output=dict(
deprecated='1.0.0',
diff --git a/nipype/interfaces/dtitk/tests/test_auto_Rigid.py b/nipype/interfaces/dtitk/tests/test_auto_Rigid.py
new file mode 100644
index 000000000..8ffc827e6
--- /dev/null
+++ b/nipype/interfaces/dtitk/tests/test_auto_Rigid.py
@@ -0,0 +1,72 @@
+# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
+from __future__ import unicode_literals
+from ..registration import Rigid
+
+
+def test_Rigid_inputs():
+ input_map = dict(
+ args=dict(argstr='%s', ),
+ environ=dict(
+ nohash=True,
+ usedefault=True,
+ ),
+ fixed_file=dict(
+ argstr='%s',
+ copyfile=False,
+ mandatory=True,
+ position=0,
+ ),
+ ftol=dict(
+ argstr='%g',
+ mandatory=True,
+ position=4,
+ usedefault=True,
+ ),
+ ignore_exception=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ usedefault=True,
+ ),
+ initialize_xfm=dict(
+ argstr='%s',
+ copyfile=True,
+ position=5,
+ ),
+ moving_file=dict(
+ argstr='%s',
+ copyfile=False,
+ mandatory=True,
+ position=1,
+ ),
+ sampling_xyz=dict(
+ argstr='%g %g %g',
+ mandatory=True,
+ position=3,
+ usedefault=True,
+ ),
+ similarity_metric=dict(
+ argstr='%s',
+ mandatory=True,
+ position=2,
+ usedefault=True,
+ ),
+ terminal_output=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ ),
+ )
+ inputs = Rigid.input_spec()
+
+ for key, metadata in list(input_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(inputs.traits()[key], metakey) == value
+def test_Rigid_outputs():
+ output_map = dict(
+ out_file=dict(),
+ out_file_xfm=dict(),
+ )
+ outputs = Rigid.output_spec()
+
+ for key, metadata in list(output_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(outputs.traits()[key], metakey) == value
diff --git a/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py b/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py
index fbe65d92f..a2747184d 100644
--- a/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py
+++ b/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py
@@ -12,56 +12,48 @@ def test_RigidTask_inputs():
),
fixed_file=dict(
argstr='%s',
- exists=True,
+ copyfile=False,
mandatory=True,
position=0,
),
ftol=dict(
- argstr='%s',
+ argstr='%g',
mandatory=True,
- position=6,
+ position=4,
+ usedefault=True,
),
ignore_exception=dict(
deprecated='1.0.0',
nohash=True,
usedefault=True,
),
+ initialize_xfm=dict(
+ argstr='%s',
+ copyfile=True,
+ position=5,
+ ),
moving_file=dict(
argstr='%s',
- exists=True,
+ copyfile=False,
mandatory=True,
position=1,
),
- samplingX=dict(
- argstr='%s',
+ sampling_xyz=dict(
+ argstr='%g %g %g',
mandatory=True,
position=3,
- ),
- samplingY=dict(
- argstr='%s',
- mandatory=True,
- position=4,
- ),
- samplingZ=dict(
- argstr='%s',
- mandatory=True,
- position=5,
+ usedefault=True,
),
similarity_metric=dict(
argstr='%s',
- exists=True,
mandatory=True,
position=2,
+ usedefault=True,
),
terminal_output=dict(
deprecated='1.0.0',
nohash=True,
),
- useInTrans=dict(
- argstr='%s',
- mandatory=False,
- position=7,
- ),
)
inputs = RigidTask.input_spec()
diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py
new file mode 100644
index 000000000..3fefd1044
--- /dev/null
+++ b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py
@@ -0,0 +1,56 @@
+# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
+from __future__ import unicode_literals
+from ..utils import SVAdjustVoxSp
+
+
+def test_SVAdjustVoxSp_inputs():
+ input_map = dict(
+ args=dict(argstr='%s', ),
+ environ=dict(
+ nohash=True,
+ usedefault=True,
+ ),
+ ignore_exception=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ usedefault=True,
+ ),
+ in_file=dict(
+ argstr='-in %s',
+ mandatory=True,
+ ),
+ origin=dict(
+ argstr='-origin %g %g %g',
+ xor=['target_file'],
+ ),
+ out_file=dict(
+ argstr='-out %s',
+ keep_extension=True,
+ name_source='in_file',
+ name_template='%s_avs',
+ ),
+ target_file=dict(
+ argstr='-target %s',
+ xor=['voxel_size', 'origin'],
+ ),
+ terminal_output=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ ),
+ voxel_size=dict(
+ argstr='-vsize %g %g %g',
+ xor=['target_file'],
+ ),
+ )
+ inputs = SVAdjustVoxSp.input_spec()
+
+ for key, metadata in list(input_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(inputs.traits()[key], metakey) == value
+def test_SVAdjustVoxSp_outputs():
+ output_map = dict(out_file=dict(), )
+ outputs = SVAdjustVoxSp.output_spec()
+
+ for key, metadata in list(output_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(outputs.traits()[key], metakey) == value
diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py
index d60b203ff..88f328d80 100644
--- a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py
+++ b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py
@@ -17,40 +17,30 @@ def test_SVAdjustVoxSpTask_inputs():
),
in_file=dict(
argstr='-in %s',
- exists=True,
mandatory=True,
- position=0,
- ),
- in_target=dict(
- argstr='-target %s',
- exists=True,
- mandatory=False,
- position=2,
- ),
- in_voxsz=dict(
- argstr='-vsize %s',
- exists=True,
- mandatory=False,
- position=3,
),
origin=dict(
- argstr='-origin %s',
- exists=True,
- mandatory=False,
- position=4,
+ argstr='-origin %g %g %g',
+ xor=['target_file'],
),
out_file=dict(
argstr='-out %s',
- exists=True,
- mandatory=False,
+ keep_extension=True,
name_source='in_file',
- name_template='%s_origmvd.nii.gz',
- position=1,
+ name_template='%s_avs',
+ ),
+ target_file=dict(
+ argstr='-target %s',
+ xor=['voxel_size', 'origin'],
),
terminal_output=dict(
deprecated='1.0.0',
nohash=True,
),
+ voxel_size=dict(
+ argstr='-vsize %g %g %g',
+ xor=['target_file'],
+ ),
)
inputs = SVAdjustVoxSpTask.input_spec()
diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVResample.py b/nipype/interfaces/dtitk/tests/test_auto_SVResample.py
new file mode 100644
index 000000000..e11f4e111
--- /dev/null
+++ b/nipype/interfaces/dtitk/tests/test_auto_SVResample.py
@@ -0,0 +1,61 @@
+# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
+from __future__ import unicode_literals
+from ..utils import SVResample
+
+
+def test_SVResample_inputs():
+ input_map = dict(
+ align=dict(argstr='-align %s', ),
+ args=dict(argstr='%s', ),
+ array_size=dict(
+ argstr='-size %d %d %d',
+ xor=['target_file'],
+ ),
+ environ=dict(
+ nohash=True,
+ usedefault=True,
+ ),
+ ignore_exception=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ usedefault=True,
+ ),
+ in_file=dict(
+ argstr='-in %s',
+ mandatory=True,
+ ),
+ origin=dict(
+ argstr='-origin %g %g %g',
+ xor=['target_file'],
+ ),
+ out_file=dict(
+ argstr='-out %s',
+ keep_extension=True,
+ name_source='in_file',
+ name_template='%s_resampled',
+ ),
+ target_file=dict(
+ argstr='-target %s',
+ xor=['array_size', 'voxel_size', 'origin'],
+ ),
+ terminal_output=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ ),
+ voxel_size=dict(
+ argstr='-vsize %g %g %g',
+ xor=['target_file'],
+ ),
+ )
+ inputs = SVResample.input_spec()
+
+ for key, metadata in list(input_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(inputs.traits()[key], metakey) == value
+def test_SVResample_outputs():
+ output_map = dict(out_file=dict(), )
+ outputs = SVResample.output_spec()
+
+ for key, metadata in list(output_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(outputs.traits()[key], metakey) == value
diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py b/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py
index 64f4cadb6..8058c9443 100644
--- a/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py
+++ b/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py
@@ -5,7 +5,12 @@ from ..utils import SVResampleTask
def test_SVResampleTask_inputs():
input_map = dict(
+ align=dict(argstr='-align %s', ),
args=dict(argstr='%s', ),
+ array_size=dict(
+ argstr='-size %d %d %d',
+ xor=['target_file'],
+ ),
environ=dict(
nohash=True,
usedefault=True,
@@ -15,36 +20,32 @@ def test_SVResampleTask_inputs():
nohash=True,
usedefault=True,
),
- in_arraysz=dict(
- argstr='-size %s',
- exists=True,
- mandatory=False,
- position=1,
- ),
in_file=dict(
argstr='-in %s',
- exists=True,
mandatory=True,
- position=0,
),
- in_voxsz=dict(
- argstr='-vsize %s',
- exists=True,
- mandatory=False,
- position=2,
+ origin=dict(
+ argstr='-origin %g %g %g',
+ xor=['target_file'],
),
out_file=dict(
argstr='-out %s',
- exists=True,
- mandatory=False,
+ keep_extension=True,
name_source='in_file',
- name_template='%s_resampled.nii.gz',
- position=3,
+ name_template='%s_resampled',
+ ),
+ target_file=dict(
+ argstr='-target %s',
+ xor=['array_size', 'voxel_size', 'origin'],
),
terminal_output=dict(
deprecated='1.0.0',
nohash=True,
),
+ voxel_size=dict(
+ argstr='-vsize %g %g %g',
+ xor=['target_file'],
+ ),
)
inputs = SVResampleTask.input_spec()
diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py
index bad603900..a111687d2 100644
--- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py
+++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py
@@ -18,23 +18,29 @@ def test_TVAdjustOriginTask_inputs():
in_file=dict(
argstr='-in %s',
mandatory=True,
- position=0,
),
origin=dict(
- argstr='-origin %s',
- exists=True,
- mandatory=False,
- position=4,
+ argstr='-origin %g %g %g',
+ xor=['target_file'],
),
out_file=dict(
argstr='-out %s',
- genfile=True,
- position=1,
+ keep_extension=True,
+ name_source='in_file',
+ name_template='%s_avs',
+ ),
+ target_file=dict(
+ argstr='-target %s',
+ xor=['voxel_size', 'origin'],
),
terminal_output=dict(
deprecated='1.0.0',
nohash=True,
),
+ voxel_size=dict(
+ argstr='-vsize %g %g %g',
+ xor=['target_file'],
+ ),
)
inputs = TVAdjustOriginTask.input_spec()
@@ -42,7 +48,7 @@ def test_TVAdjustOriginTask_inputs():
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_TVAdjustOriginTask_outputs():
- output_map = dict(out_file=dict(exists=True, ), )
+ output_map = dict(out_file=dict(), )
outputs = TVAdjustOriginTask.output_spec()
for key, metadata in list(output_map.items()):
diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py
new file mode 100644
index 000000000..34d8c2bf4
--- /dev/null
+++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py
@@ -0,0 +1,56 @@
+# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
+from __future__ import unicode_literals
+from ..utils import TVAdjustVoxSp
+
+
+def test_TVAdjustVoxSp_inputs():
+ input_map = dict(
+ args=dict(argstr='%s', ),
+ environ=dict(
+ nohash=True,
+ usedefault=True,
+ ),
+ ignore_exception=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ usedefault=True,
+ ),
+ in_file=dict(
+ argstr='-in %s',
+ mandatory=True,
+ ),
+ origin=dict(
+ argstr='-origin %g %g %g',
+ xor=['target_file'],
+ ),
+ out_file=dict(
+ argstr='-out %s',
+ keep_extension=True,
+ name_source='in_file',
+ name_template='%s_avs',
+ ),
+ target_file=dict(
+ argstr='-target %s',
+ xor=['voxel_size', 'origin'],
+ ),
+ terminal_output=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ ),
+ voxel_size=dict(
+ argstr='-vsize %g %g %g',
+ xor=['target_file'],
+ ),
+ )
+ inputs = TVAdjustVoxSp.input_spec()
+
+ for key, metadata in list(input_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(inputs.traits()[key], metakey) == value
+def test_TVAdjustVoxSp_outputs():
+ output_map = dict(out_file=dict(), )
+ outputs = TVAdjustVoxSp.output_spec()
+
+ for key, metadata in list(output_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(outputs.traits()[key], metakey) == value
diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py
index 2da57d886..7d4c3d6e7 100644
--- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py
+++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py
@@ -18,34 +18,28 @@ def test_TVAdjustVoxSpTask_inputs():
in_file=dict(
argstr='-in %s',
mandatory=True,
- position=0,
),
origin=dict(
- argstr='-origin %s',
- exists=True,
- mandatory=False,
- position=4,
+ argstr='-origin %g %g %g',
+ xor=['target_file'],
),
out_file=dict(
argstr='-out %s',
- genfile=True,
- position=1,
+ keep_extension=True,
+ name_source='in_file',
+ name_template='%s_avs',
),
- target=dict(
+ target_file=dict(
argstr='-target %s',
- exists=True,
- mandatory=False,
- position=2,
+ xor=['voxel_size', 'origin'],
),
terminal_output=dict(
deprecated='1.0.0',
nohash=True,
),
- vsize=dict(
- argstr='-vsize %s',
- exists=True,
- mandatory=False,
- position=3,
+ voxel_size=dict(
+ argstr='-vsize %g %g %g',
+ xor=['target_file'],
),
)
inputs = TVAdjustVoxSpTask.input_spec()
@@ -54,7 +48,7 @@ def test_TVAdjustVoxSpTask_inputs():
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_TVAdjustVoxSpTask_outputs():
- output_map = dict(out_file=dict(exists=True, ), )
+ output_map = dict(out_file=dict(), )
outputs = TVAdjustVoxSpTask.output_spec()
for key, metadata in list(output_map.items()):
diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVResample.py b/nipype/interfaces/dtitk/tests/test_auto_TVResample.py
new file mode 100644
index 000000000..6a1a01a59
--- /dev/null
+++ b/nipype/interfaces/dtitk/tests/test_auto_TVResample.py
@@ -0,0 +1,62 @@
+# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
+from __future__ import unicode_literals
+from ..utils import TVResample
+
+
+def test_TVResample_inputs():
+ input_map = dict(
+ align=dict(argstr='-align %s', ),
+ args=dict(argstr='%s', ),
+ array_size=dict(
+ argstr='-size %d %d %d',
+ xor=['target_file'],
+ ),
+ environ=dict(
+ nohash=True,
+ usedefault=True,
+ ),
+ ignore_exception=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ usedefault=True,
+ ),
+ in_file=dict(
+ argstr='-in %s',
+ mandatory=True,
+ ),
+ interpolation=dict(argstr='-interp %s', ),
+ origin=dict(
+ argstr='-origin %g %g %g',
+ xor=['target_file'],
+ ),
+ out_file=dict(
+ argstr='-out %s',
+ keep_extension=True,
+ name_source='in_file',
+ name_template='%s_resampled',
+ ),
+ target_file=dict(
+ argstr='-target %s',
+ xor=['array_size', 'voxel_size', 'origin'],
+ ),
+ terminal_output=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ ),
+ voxel_size=dict(
+ argstr='-vsize %g %g %g',
+ xor=['target_file'],
+ ),
+ )
+ inputs = TVResample.input_spec()
+
+ for key, metadata in list(input_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(inputs.traits()[key], metakey) == value
+def test_TVResample_outputs():
+ output_map = dict(out_file=dict(), )
+ outputs = TVResample.output_spec()
+
+ for key, metadata in list(output_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(outputs.traits()[key], metakey) == value
diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py
index 044e8f67d..43ae1cd84 100644
--- a/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py
+++ b/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py
@@ -5,7 +5,12 @@ from ..utils import TVResampleTask
def test_TVResampleTask_inputs():
input_map = dict(
+ align=dict(argstr='-align %s', ),
args=dict(argstr='%s', ),
+ array_size=dict(
+ argstr='-size %d %d %d',
+ xor=['target_file'],
+ ),
environ=dict(
nohash=True,
usedefault=True,
@@ -15,36 +20,33 @@ def test_TVResampleTask_inputs():
nohash=True,
usedefault=True,
),
- in_arraysz=dict(
- argstr='-size %s',
- exists=True,
- mandatory=False,
- position=1,
- ),
in_file=dict(
argstr='-in %s',
- exists=True,
mandatory=True,
- position=0,
),
- in_voxsz=dict(
- argstr='-vsize %s',
- exists=True,
- mandatory=False,
- position=2,
+ interpolation=dict(argstr='-interp %s', ),
+ origin=dict(
+ argstr='-origin %g %g %g',
+ xor=['target_file'],
),
out_file=dict(
argstr='-out %s',
- exists=True,
- mandatory=False,
+ keep_extension=True,
name_source='in_file',
- name_template='%s_resampled.nii.gz',
- position=3,
+ name_template='%s_resampled',
+ ),
+ target_file=dict(
+ argstr='-target %s',
+ xor=['array_size', 'voxel_size', 'origin'],
),
terminal_output=dict(
deprecated='1.0.0',
nohash=True,
),
+ voxel_size=dict(
+ argstr='-vsize %g %g %g',
+ xor=['target_file'],
+ ),
)
inputs = TVResampleTask.input_spec()
diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVtool.py b/nipype/interfaces/dtitk/tests/test_auto_TVtool.py
new file mode 100644
index 000000000..a1eceb754
--- /dev/null
+++ b/nipype/interfaces/dtitk/tests/test_auto_TVtool.py
@@ -0,0 +1,43 @@
+# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
+from __future__ import unicode_literals
+from ..utils import TVtool
+
+
+def test_TVtool_inputs():
+ input_map = dict(
+ args=dict(argstr='%s', ),
+ environ=dict(
+ nohash=True,
+ usedefault=True,
+ ),
+ ignore_exception=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ usedefault=True,
+ ),
+ in_file=dict(
+ argstr='-in %s',
+ mandatory=True,
+ ),
+ in_flag=dict(argstr='-%s', ),
+ out_file=dict(
+ argstr='-out %s',
+ genfile=True,
+ ),
+ terminal_output=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ ),
+ )
+ inputs = TVtool.input_spec()
+
+ for key, metadata in list(input_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(inputs.traits()[key], metakey) == value
+def test_TVtool_outputs():
+ output_map = dict(out_file=dict(), )
+ outputs = TVtool.output_spec()
+
+ for key, metadata in list(output_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(outputs.traits()[key], metakey) == value
diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py
index 8fb0ce055..cebbdc96a 100644
--- a/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py
+++ b/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py
@@ -17,15 +17,12 @@ def test_TVtoolTask_inputs():
),
in_file=dict(
argstr='-in %s',
- exists=True,
- mandatory=False,
- position=0,
+ mandatory=True,
),
- in_flag=dict(
- argstr='-%s',
- exists=True,
- mandatory=False,
- position=1,
+ in_flag=dict(argstr='-%s', ),
+ out_file=dict(
+ argstr='-out %s',
+ genfile=True,
),
terminal_output=dict(
deprecated='1.0.0',
diff --git a/nipype/interfaces/dtitk/tests/test_auto_affScalarVol.py b/nipype/interfaces/dtitk/tests/test_auto_affScalarVol.py
new file mode 100644
index 000000000..b9c01c493
--- /dev/null
+++ b/nipype/interfaces/dtitk/tests/test_auto_affScalarVol.py
@@ -0,0 +1,68 @@
+# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
+from __future__ import unicode_literals
+from ..registration import AffScalarVol
+
+
+def test_AffScalarVol_inputs():
+ input_map = dict(
+ args=dict(argstr='%s', ),
+ deformation=dict(
+ argstr='-deformation %g %g %g %g %g %g',
+ xor=['transform'],
+ ),
+ environ=dict(
+ nohash=True,
+ usedefault=True,
+ ),
+ euler=dict(
+ argstr='-euler %g %g %g',
+ xor=['transform'],
+ ),
+ ignore_exception=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ usedefault=True,
+ ),
+ in_file=dict(
+ argstr='-in %s',
+ mandatory=True,
+ ),
+ interpolation=dict(
+ argstr='-interp %s',
+ usedefault=True,
+ ),
+ out_file=dict(
+ argstr='-out %s',
+ keep_extension=True,
+ name_source='in_file',
+ name_template='%s_affxfmd',
+ ),
+ target=dict(
+ argstr='-target %s',
+ xor=['transform'],
+ ),
+ terminal_output=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ ),
+ transform=dict(
+ argstr='-trans %s',
+ xor=['target', 'translation', 'euler', 'deformation'],
+ ),
+ translation=dict(
+ argstr='-translation %g %g %g',
+ xor=['transform'],
+ ),
+ )
+ inputs = AffScalarVol.input_spec()
+
+ for key, metadata in list(input_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(inputs.traits()[key], metakey) == value
+def test_AffScalarVol_outputs():
+ output_map = dict(out_file=dict(), )
+ outputs = AffScalarVol.output_spec()
+
+ for key, metadata in list(output_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(outputs.traits()[key], metakey) == value
diff --git a/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py
index 02d22b182..e0d45372c 100644
--- a/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py
+++ b/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py
@@ -6,42 +6,53 @@ from ..registration import affScalarVolTask
def test_affScalarVolTask_inputs():
input_map = dict(
args=dict(argstr='%s', ),
+ deformation=dict(
+ argstr='-deformation %g %g %g %g %g %g',
+ xor=['transform'],
+ ),
environ=dict(
nohash=True,
usedefault=True,
),
+ euler=dict(
+ argstr='-euler %g %g %g',
+ xor=['transform'],
+ ),
ignore_exception=dict(
deprecated='1.0.0',
nohash=True,
usedefault=True,
),
- in_target=dict(
- argstr='-target %s',
- position=2,
- ),
- in_volume=dict(
+ in_file=dict(
argstr='-in %s',
- exists=True,
- mandatory=False,
- position=0,
+ mandatory=True,
),
- in_xfm=dict(
- argstr='-trans %s',
- exists=True,
- mandatory=False,
- position=1,
+ interpolation=dict(
+ argstr='-interp %s',
+ usedefault=True,
),
out_file=dict(
argstr='-out %s',
- mandatory=False,
- name_source='in_volume',
- name_template='%s_affxfmd.nii.gz',
- position=3,
+ keep_extension=True,
+ name_source='in_file',
+ name_template='%s_affxfmd',
+ ),
+ target=dict(
+ argstr='-target %s',
+ xor=['transform'],
),
terminal_output=dict(
deprecated='1.0.0',
nohash=True,
),
+ transform=dict(
+ argstr='-trans %s',
+ xor=['target', 'translation', 'euler', 'deformation'],
+ ),
+ translation=dict(
+ argstr='-translation %g %g %g',
+ xor=['transform'],
+ ),
)
inputs = affScalarVolTask.input_spec()
diff --git a/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVol.py b/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVol.py
new file mode 100644
index 000000000..8004a09ad
--- /dev/null
+++ b/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVol.py
@@ -0,0 +1,72 @@
+# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
+from __future__ import unicode_literals
+from ..registration import AffSymTensor3DVol
+
+
+def test_AffSymTensor3DVol_inputs():
+ input_map = dict(
+ args=dict(argstr='%s', ),
+ deformation=dict(
+ argstr='-deformation %g %g %g %g %g %g',
+ xor=['transform'],
+ ),
+ environ=dict(
+ nohash=True,
+ usedefault=True,
+ ),
+ euler=dict(
+ argstr='-euler %g %g %g',
+ xor=['transform'],
+ ),
+ ignore_exception=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ usedefault=True,
+ ),
+ in_file=dict(
+ argstr='-in %s',
+ mandatory=True,
+ ),
+ interpolation=dict(
+ argstr='-interp %s',
+ usedefault=True,
+ ),
+ out_file=dict(
+ argstr='-out %s',
+ keep_extension=True,
+ name_source='in_file',
+ name_template='%s_affxfmd',
+ ),
+ reorient=dict(
+ argstr='-reorient %s',
+ usedefault=True,
+ ),
+ target=dict(
+ argstr='-target %s',
+ xor=['transform'],
+ ),
+ terminal_output=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ ),
+ transform=dict(
+ argstr='-trans %s',
+ xor=['target', 'translation', 'euler', 'deformation'],
+ ),
+ translation=dict(
+ argstr='-translation %g %g %g',
+ xor=['transform'],
+ ),
+ )
+ inputs = AffSymTensor3DVol.input_spec()
+
+ for key, metadata in list(input_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(inputs.traits()[key], metakey) == value
+def test_AffSymTensor3DVol_outputs():
+ output_map = dict(out_file=dict(), )
+ outputs = AffSymTensor3DVol.output_spec()
+
+ for key, metadata in list(output_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(outputs.traits()[key], metakey) == value
diff --git a/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py
index 40198fa9e..9b9502a4d 100644
--- a/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py
+++ b/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py
@@ -6,45 +6,57 @@ from ..registration import affSymTensor3DVolTask
def test_affSymTensor3DVolTask_inputs():
input_map = dict(
args=dict(argstr='%s', ),
+ deformation=dict(
+ argstr='-deformation %g %g %g %g %g %g',
+ xor=['transform'],
+ ),
environ=dict(
nohash=True,
usedefault=True,
),
+ euler=dict(
+ argstr='-euler %g %g %g',
+ xor=['transform'],
+ ),
ignore_exception=dict(
deprecated='1.0.0',
nohash=True,
usedefault=True,
),
- in_target=dict(
- argstr='-target %s',
- exists=True,
- mandatory=False,
- position=2,
- ),
- in_tensor=dict(
+ in_file=dict(
argstr='-in %s',
- exists=True,
- mandatory=False,
- position=0,
+ mandatory=True,
),
- in_xfm=dict(
- argstr='-trans %s',
- exists=True,
- mandatory=False,
- position=1,
+ interpolation=dict(
+ argstr='-interp %s',
+ usedefault=True,
),
out_file=dict(
argstr='-out %s',
- exists=True,
- mandatory=False,
- name_source='in_tensor',
- name_template='%s_affxfmd.nii.gz',
- position=3,
+ keep_extension=True,
+ name_source='in_file',
+ name_template='%s_affxfmd',
+ ),
+ reorient=dict(
+ argstr='-reorient %s',
+ usedefault=True,
+ ),
+ target=dict(
+ argstr='-target %s',
+ xor=['transform'],
),
terminal_output=dict(
deprecated='1.0.0',
nohash=True,
),
+ transform=dict(
+ argstr='-trans %s',
+ xor=['target', 'translation', 'euler', 'deformation'],
+ ),
+ translation=dict(
+ argstr='-translation %g %g %g',
+ xor=['transform'],
+ ),
)
inputs = affSymTensor3DVolTask.input_spec()
diff --git a/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVol.py b/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVol.py
new file mode 100644
index 000000000..86512dfa2
--- /dev/null
+++ b/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVol.py
@@ -0,0 +1,62 @@
+# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
+from __future__ import unicode_literals
+from ..registration import DiffeoScalarVol
+
+
+def test_DiffeoScalarVol_inputs():
+ input_map = dict(
+ args=dict(argstr='%s', ),
+ environ=dict(
+ nohash=True,
+ usedefault=True,
+ ),
+ flip=dict(argstr='-flip %d %d %d', ),
+ ignore_exception=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ usedefault=True,
+ ),
+ in_file=dict(
+ argstr='-in %s',
+ mandatory=True,
+ ),
+ interpolation=dict(
+ argstr='-interp %s',
+ usedefault=True,
+ ),
+ out_file=dict(
+ argstr='-out %s',
+ keep_extension=True,
+ name_source='in_file',
+ name_template='%s_diffeoxfmd',
+ ),
+ resampling_type=dict(argstr='-type %s', ),
+ target=dict(
+ argstr='-target %s',
+ xor=['voxel_size'],
+ ),
+ terminal_output=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ ),
+ transform=dict(
+ argstr='-trans %s',
+ mandatory=True,
+ ),
+ voxel_size=dict(
+ argstr='-vsize %g %g %g',
+ xor=['target'],
+ ),
+ )
+ inputs = DiffeoScalarVol.input_spec()
+
+ for key, metadata in list(input_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(inputs.traits()[key], metakey) == value
+def test_DiffeoScalarVol_outputs():
+ output_map = dict(out_file=dict(), )
+ outputs = DiffeoScalarVol.output_spec()
+
+ for key, metadata in list(output_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(outputs.traits()[key], metakey) == value
diff --git a/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py
index 5b02f3498..b3ef9827a 100644
--- a/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py
+++ b/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py
@@ -10,63 +10,43 @@ def test_diffeoScalarVolTask_inputs():
nohash=True,
usedefault=True,
),
+ flip=dict(argstr='-flip %d %d %d', ),
ignore_exception=dict(
deprecated='1.0.0',
nohash=True,
usedefault=True,
),
- in_flip=dict(
- argstr='-flip %s',
- exists=True,
- mandatory=False,
- position=5,
- ),
- in_interp=dict(
- argstr='-interp %s',
- exists=True,
- mandatory=False,
- position=7,
- ),
- in_target=dict(
- argstr='-target %s',
- exists=True,
- mandatory=False,
- position=3,
- ),
- in_type=dict(
- argstr='-type %s',
- exists=True,
- mandatory=False,
- position=6,
- ),
- in_volume=dict(
+ in_file=dict(
argstr='-in %s',
- exists=True,
- mandatory=False,
- position=0,
+ mandatory=True,
),
- in_vsize=dict(
- argstr='-vsize %s',
- exists=True,
- mandatory=False,
- position=4,
- ),
- in_xfm=dict(
- argstr='-trans %s',
- exists=True,
- mandatory=False,
- position=2,
+ interpolation=dict(
+ argstr='-interp %s',
+ usedefault=True,
),
out_file=dict(
argstr='-out %s',
- name_source='in_volume',
- name_template='%s_diffeoxfmd.nii.gz',
- position=1,
+ keep_extension=True,
+ name_source='in_file',
+ name_template='%s_diffeoxfmd',
+ ),
+ resampling_type=dict(argstr='-type %s', ),
+ target=dict(
+ argstr='-target %s',
+ xor=['voxel_size'],
),
terminal_output=dict(
deprecated='1.0.0',
nohash=True,
),
+ transform=dict(
+ argstr='-trans %s',
+ mandatory=True,
+ ),
+ voxel_size=dict(
+ argstr='-vsize %g %g %g',
+ xor=['target'],
+ ),
)
inputs = diffeoScalarVolTask.input_spec()
diff --git a/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVol.py b/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVol.py
new file mode 100644
index 000000000..31a516a7c
--- /dev/null
+++ b/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVol.py
@@ -0,0 +1,70 @@
+# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
+from __future__ import unicode_literals
+from ..registration import DiffeoSymTensor3DVol
+
+
+def test_DiffeoSymTensor3DVol_inputs():
+ input_map = dict(
+ args=dict(argstr='%s', ),
+ df=dict(
+ argstr='-df %s',
+ usedefault=True,
+ ),
+ environ=dict(
+ nohash=True,
+ usedefault=True,
+ ),
+ flip=dict(argstr='-flip %d %d %d', ),
+ ignore_exception=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ usedefault=True,
+ ),
+ in_file=dict(
+ argstr='-in %s',
+ mandatory=True,
+ ),
+ interpolation=dict(
+ argstr='-interp %s',
+ usedefault=True,
+ ),
+ out_file=dict(
+ argstr='-out %s',
+ keep_extension=True,
+ name_source='in_file',
+ name_template='%s_diffeoxfmd',
+ ),
+ reorient=dict(
+ argstr='-reorient %s',
+ usedefault=True,
+ ),
+ resampling_type=dict(argstr='-type %s', ),
+ target=dict(
+ argstr='-target %s',
+ xor=['voxel_size'],
+ ),
+ terminal_output=dict(
+ deprecated='1.0.0',
+ nohash=True,
+ ),
+ transform=dict(
+ argstr='-trans %s',
+ mandatory=True,
+ ),
+ voxel_size=dict(
+ argstr='-vsize %g %g %g',
+ xor=['target'],
+ ),
+ )
+ inputs = DiffeoSymTensor3DVol.input_spec()
+
+ for key, metadata in list(input_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(inputs.traits()[key], metakey) == value
+def test_DiffeoSymTensor3DVol_outputs():
+ output_map = dict(out_file=dict(), )
+ outputs = DiffeoSymTensor3DVol.output_spec()
+
+ for key, metadata in list(output_map.items()):
+ for metakey, value in list(metadata.items()):
+ assert getattr(outputs.traits()[key], metakey) == value
diff --git a/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py
index 7f3926c71..a380fcabc 100644
--- a/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py
+++ b/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py
@@ -6,45 +6,55 @@ from ..registration import diffeoSymTensor3DVolTask
def test_diffeoSymTensor3DVolTask_inputs():
input_map = dict(
args=dict(argstr='%s', ),
+ df=dict(
+ argstr='-df %s',
+ usedefault=True,
+ ),
environ=dict(
nohash=True,
usedefault=True,
),
+ flip=dict(argstr='-flip %d %d %d', ),
ignore_exception=dict(
deprecated='1.0.0',
nohash=True,
usedefault=True,
),
- in_target=dict(
- argstr='-target %s',
- exists=True,
- mandatory=False,
- position=2,
- ),
- in_tensor=dict(
+ in_file=dict(
argstr='-in %s',
- exists=True,
- mandatory=False,
- position=0,
+ mandatory=True,
),
- in_xfm=dict(
- argstr='-trans %s',
- exists=True,
- mandatory=False,
- position=1,
+ interpolation=dict(
+ argstr='-interp %s',
+ usedefault=True,
),
out_file=dict(
argstr='-out %s',
- exists=True,
- mandatory=False,
- name_source='in_tensor',
- name_template='%s_diffeoxfmd.nii.gz',
- position=3,
+ keep_extension=True,
+ name_source='in_file',
+ name_template='%s_diffeoxfmd',
+ ),
+ reorient=dict(
+ argstr='-reorient %s',
+ usedefault=True,
+ ),
+ resampling_type=dict(argstr='-type %s', ),
+ target=dict(
+ argstr='-target %s',
+ xor=['voxel_size'],
),
terminal_output=dict(
deprecated='1.0.0',
nohash=True,
),
+ transform=dict(
+ argstr='-trans %s',
+ mandatory=True,
+ ),
+ voxel_size=dict(
+ argstr='-vsize %g %g %g',
+ xor=['target'],
+ ),
)
inputs = diffeoSymTensor3DVolTask.input_spec()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 5
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
codecov==2.1.13
configparser==5.2.0
coverage==6.2
cycler==0.11.0
decorator==4.4.2
docutils==0.18.1
execnet==1.9.0
funcsigs==1.0.2
future==1.0.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
isodate==0.6.1
Jinja2==3.0.3
kiwisolver==1.3.1
lxml==5.3.1
MarkupSafe==2.0.1
matplotlib==3.3.4
mock==5.2.0
networkx==2.5.1
nibabel==3.2.2
-e git+https://github.com/nipy/nipype.git@e446466290b9ccba5d5aa589971c97e744d9267b#egg=nipype
numpy==1.19.5
numpydoc==1.1.0
packaging==21.3
Pillow==8.4.0
pluggy==1.0.0
prov==1.5.0
py==1.11.0
pydot==1.4.2
pydotplus==2.0.2
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-env==0.6.2
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
rdflib==5.0.0
requests==2.27.1
scipy==1.5.4
simplejson==3.20.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
traits==6.4.1
typing_extensions==4.1.1
urllib3==1.26.20
yapf==0.32.0
zipp==3.6.0
| name: nipype
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- click==8.0.4
- codecov==2.1.13
- configparser==5.2.0
- coverage==6.2
- cycler==0.11.0
- decorator==4.4.2
- docutils==0.18.1
- execnet==1.9.0
- funcsigs==1.0.2
- future==1.0.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isodate==0.6.1
- jinja2==3.0.3
- kiwisolver==1.3.1
- lxml==5.3.1
- markupsafe==2.0.1
- matplotlib==3.3.4
- mock==5.2.0
- networkx==2.5.1
- nibabel==3.2.2
- numpy==1.19.5
- numpydoc==1.1.0
- packaging==21.3
- pillow==8.4.0
- pluggy==1.0.0
- prov==1.5.0
- py==1.11.0
- pydot==1.4.2
- pydotplus==2.0.2
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-env==0.6.2
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- rdflib==5.0.0
- requests==2.27.1
- scipy==1.5.4
- simplejson==3.20.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- traits==6.4.1
- typing-extensions==4.1.1
- urllib3==1.26.20
- yapf==0.32.0
- zipp==3.6.0
prefix: /opt/conda/envs/nipype
| [
"nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py::test_DiffeoTask_inputs",
"nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py::test_affSymTensor3DVolTask_inputs",
"nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py::test_TVtoolTask_inputs",
"nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py::test_SVAdjustVoxSp_inputs",
"nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py::test_ComposeXfm_outputs",
"nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVol.py::test_AffSymTensor3DVol_outputs",
"nipype/interfaces/dtitk/tests/test_auto_TVtool.py::test_TVtool_inputs",
"nipype/interfaces/dtitk/tests/test_auto_TVResample.py::test_TVResample_inputs",
"nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVol.py::test_DiffeoScalarVol_inputs",
"nipype/interfaces/dtitk/tests/test_auto_TVResample.py::test_TVResample_outputs",
"nipype/interfaces/dtitk/tests/test_auto_BinThreshTASK.py::test_BinThreshTask_inputs",
"nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py::test_ComposeXfm_inputs",
"nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py::test_ComposeXfmTask_inputs",
"nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py::test_TVAdjustVoxSp_outputs",
"nipype/interfaces/dtitk/tests/test_auto_Rigid.py::test_Rigid_inputs",
"nipype/interfaces/dtitk/tests/test_auto_affScalarVol.py::test_AffScalarVol_outputs",
"nipype/interfaces/dtitk/tests/test_auto_Rigid.py::test_Rigid_outputs",
"nipype/interfaces/dtitk/tests/test_auto_AffineTask.py::test_AffineTask_inputs",
"nipype/interfaces/dtitk/tests/test_auto_Affine.py::test_Affine_inputs",
"nipype/interfaces/dtitk/tests/test_auto_BinThresh.py::test_BinThresh_inputs",
"nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVol.py::test_DiffeoScalarVol_outputs",
"nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py::test_affScalarVolTask_inputs",
"nipype/interfaces/dtitk/tests/test_auto_SVResample.py::test_SVResample_outputs",
"nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py::test_TVAdjustVoxSp_inputs",
"nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py::test_TVResampleTask_inputs",
"nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py::test_TVAdjustOriginTask_inputs",
"nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py::test_SVResampleTask_inputs",
"nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVol.py::test_AffSymTensor3DVol_inputs",
"nipype/interfaces/dtitk/tests/test_auto_TVtool.py::test_TVtool_outputs",
"nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py::test_TVAdjustVoxSpTask_inputs",
"nipype/interfaces/dtitk/tests/test_auto_BinThresh.py::test_BinThresh_outputs",
"nipype/interfaces/dtitk/tests/test_auto_Diffeo.py::test_Diffeo_outputs",
"nipype/interfaces/dtitk/tests/test_auto_Affine.py::test_Affine_outputs",
"nipype/interfaces/dtitk/tests/test_auto_RigidTask.py::test_RigidTask_inputs",
"nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py::test_SVAdjustVoxSpTask_inputs",
"nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py::test_SVAdjustVoxSp_outputs",
"nipype/interfaces/dtitk/tests/test_auto_affScalarVol.py::test_AffScalarVol_inputs",
"nipype/interfaces/dtitk/tests/test_auto_SVResample.py::test_SVResample_inputs",
"nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py::test_diffeoScalarVolTask_inputs",
"nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py::test_diffeoSymTensor3DVolTask_inputs",
"nipype/interfaces/dtitk/tests/test_auto_Diffeo.py::test_Diffeo_inputs",
"nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVol.py::test_DiffeoSymTensor3DVol_inputs",
"nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVol.py::test_DiffeoSymTensor3DVol_outputs"
]
| []
| [
"nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py::test_DiffeoTask_outputs",
"nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py::test_TVResampleTask_outputs",
"nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py::test_TVAdjustVoxSpTask_outputs",
"nipype/interfaces/afni/tests/test_auto_TProject.py::test_TProject_inputs",
"nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py::test_TVtoolTask_outputs",
"nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py::test_TVAdjustOriginTask_outputs",
"nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py::test_affSymTensor3DVolTask_outputs",
"nipype/interfaces/afni/tests/test_auto_TProject.py::test_TProject_outputs",
"nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py::test_SVResampleTask_outputs",
"nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py::test_ComposeXfmTask_outputs",
"nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py::test_affScalarVolTask_outputs",
"nipype/interfaces/dtitk/tests/test_auto_AffineTask.py::test_AffineTask_outputs",
"nipype/interfaces/dtitk/tests/test_auto_BinThreshTASK.py::test_BinThreshTask_outputs",
"nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py::test_SVAdjustVoxSpTask_outputs",
"nipype/interfaces/dtitk/tests/test_auto_RigidTask.py::test_RigidTask_outputs",
"nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py::test_diffeoSymTensor3DVolTask_outputs",
"nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py::test_diffeoScalarVolTask_outputs"
]
| []
| Apache License 2.0 | 2,334 | [
"nipype/interfaces/afni/preprocess.py",
"nipype/workflows/dmri/dtitk/tensor_registration.py",
"nipype/testing/data/im_affine.aff",
"nipype/testing/data/im_warp.df.nii",
"nipype/interfaces/dtitk/__init__.py",
"nipype/interfaces/dtitk/registration.py",
"nipype/interfaces/dtitk/base.py",
"nipype/workflows/dmri/dtitk/__init__.py",
"nipype/interfaces/dtitk/utils.py"
]
| [
"nipype/interfaces/afni/preprocess.py",
"nipype/workflows/dmri/dtitk/tensor_registration.py",
"nipype/testing/data/im_affine.aff",
"nipype/testing/data/im_warp.df.nii",
"nipype/interfaces/dtitk/__init__.py",
"nipype/interfaces/dtitk/registration.py",
"nipype/interfaces/dtitk/base.py",
"nipype/workflows/dmri/dtitk/__init__.py",
"nipype/interfaces/dtitk/utils.py"
]
|
|
elastic__rally-449 | 7aad2e841ca7a2e022447993a83513f24b49c755 | 2018-03-27 07:48:46 | a5408e0d0d07b271b509df8057a7c73303604c10 | diff --git a/docs/configuration.rst b/docs/configuration.rst
index b4f524ed..08430670 100644
--- a/docs/configuration.rst
+++ b/docs/configuration.rst
@@ -66,7 +66,7 @@ As you can see above, Rally autodetects if git, Gradle and a JDK are installed.
As you can see, Rally tells you that you cannot build Elasticsearch from sources but you can still benchmark official binary distributions.
-It's also possible that Rally cannot automatically find your JDK 8 or JDK 9 home directory. In that case, it will ask you later in the configuration process. If you do not provide a JDK home directory, Rally cannot start Elasticsearch on this machine but you can still use it as a load generator to :doc:`benchmark remote clusters </recipes>`.
+It's also possible that Rally cannot automatically find your JDK 8 or JDK 10 home directory. In that case, it will ask you later in the configuration process. If you do not provide a JDK home directory, Rally cannot start Elasticsearch on this machine but you can still use it as a load generator to :doc:`benchmark remote clusters </recipes>`.
After running the initial detection, Rally will try to autodetect your Elasticsearch project directory (either in the current directory or in ``../elasticsearch``) or will choose a default directory::
@@ -122,7 +122,7 @@ Rally will ask you a few more things in the advanced setup:
* **Benchmark data directory**: Rally stores all benchmark related data in this directory which can take up to several tens of GB. If you want to use a dedicated partition, you can specify a different data directory here.
* **Elasticsearch project directory**: This is the directory where the Elasticsearch sources are located. If you don't actively develop on Elasticsearch you can just leave the default but if you want to benchmark local changes you should point Rally to your project directory. Note that Rally will run builds with Gradle in this directory (it runs ``gradle clean`` and ``gradle :distribution:tar:assemble``).
-* **JDK root directory**: Rally will only ask this if it could not autodetect the JDK home by itself. Just enter the root directory of the JDK you want to use. By default, Rally will choose Java 8 if available and fallback to Java 9.
+* **JDK root directory**: Rally will only ask this if it could not autodetect the JDK home by itself. Just enter the root directory of the JDK you want to use. By default, Rally will choose Java 8 if available and fallback to Java 10.
* **Metrics store type**: You can choose between ``in-memory`` which requires no additional setup or ``elasticsearch`` which requires that you start a dedicated Elasticsearch instance to store metrics but gives you much more flexibility to analyse results.
* **Metrics store settings** (only for metrics store type ``elasticsearch``): Provide the connection details to the Elasticsearch metrics store. This should be an instance that you use just for Rally but it can be a rather small one. A single node cluster with default setting should do it. When using self-signed certificates on the Elasticsearch metrics store, certificate verification can be turned off by setting the ``datastore.ssl.verification_mode`` setting to ``none``. Alternatively you can enter the path to the certificate authority's signing certificate in ``datastore.ssl.certificate_authorities``. Both settings are optional.
* **Name for this benchmark environment** (only for metrics store type ``elasticsearch``): You can use the same metrics store for multiple environments (e.g. local, continuous integration etc.) so you can separate metrics from different environments by choosing a different name.
diff --git a/esrally/config.py b/esrally/config.py
index b8e0d79d..32348a67 100644
--- a/esrally/config.py
+++ b/esrally/config.py
@@ -106,7 +106,7 @@ def auto_load_local_config(base_config, additional_sections=None, config_file_cl
class Config:
- CURRENT_CONFIG_VERSION = 13
+ CURRENT_CONFIG_VERSION = 14
"""
Config is the main entry point to retrieve and set benchmark properties. It provides multiple scopes to allow overriding of values on
@@ -305,13 +305,13 @@ class ConfigFactory:
gradle_bin = "./gradlew" if use_gradle_wrapper else io.guess_install_location("gradle")
java_8_home = runtime_java_home if runtime_java_home else io.guess_java_home(major_version=8)
- java_9_home = java_home if java_home else io.guess_java_home(major_version=9)
+ java_10_home = java_home if java_home else io.guess_java_home(major_version=10)
from esrally.utils import jvm
if java_8_home:
auto_detected_java_home = java_8_home
# Don't auto-detect an EA release and bring trouble to the user later on. They can still configure it manually if they want to.
- elif java_9_home and not jvm.is_early_access_release(java_9_home):
- auto_detected_java_home = java_9_home
+ elif java_10_home and not jvm.is_early_access_release(java_10_home):
+ auto_detected_java_home = java_10_home
else:
auto_detected_java_home = None
@@ -342,14 +342,14 @@ class ConfigFactory:
self.o("* Setting up benchmark data directory in %s" % root_dir)
if benchmark_from_sources:
- if not java_9_home or jvm.is_early_access_release(java_9_home):
- raw_java_9_home = self._ask_property("Enter the JDK 9 root directory", check_path_exists=True, mandatory=False)
- if raw_java_9_home and jvm.major_version(raw_java_9_home) == 9 and not jvm.is_early_access_release(raw_java_9_home):
- java_9_home = io.normalize_path(raw_java_9_home) if raw_java_9_home else None
+ if not java_10_home or jvm.is_early_access_release(java_10_home):
+ raw_java_10_home = self._ask_property("Enter the JDK 10 root directory", check_path_exists=True, mandatory=False)
+ if raw_java_10_home and jvm.major_version(raw_java_10_home) == 10 and not jvm.is_early_access_release(raw_java_10_home):
+ java_10_home = io.normalize_path(raw_java_10_home) if raw_java_10_home else None
else:
benchmark_from_sources = False
self.o("********************************************************************************")
- self.o("You don't have a valid JDK 9 installation and cannot benchmark source builds.")
+ self.o("You don't have a valid JDK 10 installation and cannot benchmark source builds.")
self.o("")
self.o("You can still benchmark binary distributions with e.g.:")
self.o("")
@@ -454,8 +454,8 @@ class ConfigFactory:
config["runtime"] = {}
if java_home:
config["runtime"]["java.home"] = java_home
- if java_9_home:
- config["runtime"]["java9.home"] = java_9_home
+ if java_10_home:
+ config["runtime"]["java10.home"] = java_10_home
config["benchmarks"] = {}
config["benchmarks"]["local.dataset.cache"] = "${node:root.dir}/data"
@@ -829,6 +829,44 @@ def migrate(config_file, current_version, target_version, out=print, i=input):
current_version = 13
config["meta"]["config.version"] = str(current_version)
+ if current_version == 13 and target_version > current_version:
+ # This version replaced java9.home with java10.home
+ if "build" in config and "gradle.bin" in config["build"]:
+ java_10_home = io.guess_java_home(major_version=10)
+ from esrally.utils import jvm
+ if java_10_home and not jvm.is_early_access_release(java_10_home):
+ logger.debug("Autodetected a JDK 10 installation at [%s]" % java_10_home)
+ if "runtime" not in config:
+ config["runtime"] = {}
+ config["runtime"]["java10.home"] = java_10_home
+ else:
+ logger.debug("Could not autodetect a JDK 10 installation. Checking [java.home] already points to a JDK 10.")
+ detected = False
+ if "runtime" in config:
+ java_home = config["runtime"]["java.home"]
+ if jvm.major_version(java_home) == 10 and not jvm.is_early_access_release(java_home):
+ config["runtime"]["java10.home"] = java_home
+ detected = True
+
+ if not detected:
+ logger.debug("Could not autodetect a JDK 10 installation. Asking user.")
+ raw_java_10_home = prompter.ask_property("Enter the JDK 10 root directory", check_path_exists=True, mandatory=False)
+ if raw_java_10_home and jvm.major_version(raw_java_10_home) == 10 and not jvm.is_early_access_release(raw_java_10_home):
+ java_10_home = io.normalize_path(raw_java_10_home) if raw_java_10_home else None
+ config["runtime"]["java10.home"] = java_10_home
+ else:
+ out("********************************************************************************")
+ out("You don't have a valid JDK 10 installation and cannot benchmark source builds.")
+ out("")
+ out("You can still benchmark binary distributions with e.g.:")
+ out("")
+ out(" %s --distribution-version=6.0.0" % PROGRAM_NAME)
+ out("********************************************************************************")
+ out("")
+
+ current_version = 14
+ config["meta"]["config.version"] = str(current_version)
+
# all migrations done
config_file.store(config)
logger.info("Successfully self-upgraded configuration to version [%s]" % target_version)
diff --git a/esrally/mechanic/provisioner.py b/esrally/mechanic/provisioner.py
index 5646a0b2..4a1c3b38 100644
--- a/esrally/mechanic/provisioner.py
+++ b/esrally/mechanic/provisioner.py
@@ -7,12 +7,13 @@ from enum import Enum
import jinja2
from esrally import exceptions
-from esrally.utils import io, console, process, modules
+from esrally.utils import io, console, process, modules, versions
logger = logging.getLogger("rally.provisioner")
def local_provisioner(cfg, car, plugins, cluster_settings, all_node_ips, target_root, node_id):
+ distribution_version = cfg.opts("mechanic", "distribution.version", mandatory=False)
ip = cfg.opts("provisioning", "node.ip")
http_port = cfg.opts("provisioning", "node.http.port")
node_name_prefix = cfg.opts("provisioning", "node.name.prefix")
@@ -24,7 +25,7 @@ def local_provisioner(cfg, car, plugins, cluster_settings, all_node_ips, target_
es_installer = ElasticsearchInstaller(car, node_name, node_root_dir, all_node_ips, ip, http_port)
plugin_installers = [PluginInstaller(plugin) for plugin in plugins]
- return BareProvisioner(cluster_settings, es_installer, plugin_installers, preserve)
+ return BareProvisioner(cluster_settings, es_installer, plugin_installers, preserve, distribution_version=distribution_version)
def no_op_provisioner():
@@ -143,11 +144,12 @@ class BareProvisioner:
of the benchmark candidate to the appropriate place.
"""
- def __init__(self, cluster_settings, es_installer, plugin_installers, preserve, apply_config=_apply_config):
+ def __init__(self, cluster_settings, es_installer, plugin_installers, preserve, distribution_version=None, apply_config=_apply_config):
self.preserve = preserve
self._cluster_settings = cluster_settings
self.es_installer = es_installer
self.plugin_installers = plugin_installers
+ self.distribution_version = distribution_version
self.apply_config = apply_config
def prepare(self, binary):
@@ -184,7 +186,17 @@ class BareProvisioner:
plugin_variables = {}
mandatory_plugins = []
for installer in self.plugin_installers:
- mandatory_plugins.append(installer.plugin_name)
+ # For Elasticsearch < 6.3 more specific plugin names are required for mandatory plugin check
+ # Details in: https://github.com/elastic/elasticsearch/pull/28710
+ # TODO: Remove this section with Elasticsearch <6.3 becomes EOL.
+ try:
+ major, minor, _, _ = versions.components(self.distribution_version)
+ if (major == 6 and minor < 3) or major < 6:
+ mandatory_plugins.append(installer.sub_plugin_name)
+ else:
+ mandatory_plugins.append(installer.plugin_name)
+ except (TypeError, exceptions.InvalidSyntax):
+ mandatory_plugins.append(installer.plugin_name)
plugin_variables.update(installer.variables)
cluster_settings = {}
@@ -373,6 +385,11 @@ class PluginInstaller:
def plugin_name(self):
return self.plugin.name
+ @property
+ def sub_plugin_name(self):
+ # if a plugin consists of multiple plugins (e.g. x-pack) we're interested in that name
+ return self.variables.get("plugin_name", self.plugin_name)
+
class NoOpProvisioner:
def __init__(self, *args):
diff --git a/esrally/mechanic/supplier.py b/esrally/mechanic/supplier.py
index 7cbd4b70..b32106c4 100644
--- a/esrally/mechanic/supplier.py
+++ b/esrally/mechanic/supplier.py
@@ -27,9 +27,9 @@ def create(cfg, sources, distribution, build, challenge_root_path, plugins):
if build_needed:
gradle = cfg.opts("build", "gradle.bin")
- java9_home = _java9_home(cfg)
+ java10_home = _java10_home(cfg)
es_src_dir = os.path.join(_src_dir(cfg), _config_value(src_config, "elasticsearch.src.subdir"))
- builder = Builder(es_src_dir, gradle, java9_home, challenge_root_path)
+ builder = Builder(es_src_dir, gradle, java10_home, challenge_root_path)
else:
builder = None
@@ -68,14 +68,14 @@ def create(cfg, sources, distribution, build, challenge_root_path, plugins):
return CompositeSupplier(suppliers)
-def _java9_home(cfg):
+def _java10_home(cfg):
from esrally import config
try:
- return cfg.opts("runtime", "java9.home")
+ return cfg.opts("runtime", "java10.home")
except config.ConfigError:
- logger.exception("Cannot determine Java 9 home.")
- raise exceptions.SystemSetupError("No JDK 9 is configured. You cannot benchmark source builds of Elasticsearch on this machine. "
- "Please install a JDK 9 and reconfigure Rally with %s configure" % PROGRAM_NAME)
+ logger.exception("Cannot determine Java 10 home.")
+ raise exceptions.SystemSetupError("No JDK 10 is configured. You cannot benchmark source builds of Elasticsearch on this machine. "
+ "Please install a JDK 10 and reconfigure Rally with %s configure" % PROGRAM_NAME)
def _required_version(version):
@@ -427,13 +427,6 @@ class SourceRepository:
class Builder:
- # Tested with Gradle 4.1 on Java 9-ea+161
- JAVA_9_GRADLE_OPTS = "--add-opens=java.base/java.io=ALL-UNNAMED " \
- "--add-opens=java.base/java.lang=ALL-UNNAMED " \
- "--add-opens=java.base/java.lang.invoke=ALL-UNNAMED " \
- "--add-opens=java.base/java.util=ALL-UNNAMED " \
- "--add-opens=jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED " \
- "--add-opens=jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED"
"""
A builder is responsible for creating an installable binary from the source files.
@@ -460,14 +453,8 @@ class Builder:
log_file = "%s/build.log" % self.log_dir
# we capture all output to a dedicated build log file
- jvm_major_version = jvm.major_version(self.java_home)
- if jvm_major_version > 8:
- logger.info("Detected JVM with major version [%d]. Adjusting JDK module access options for the build." % jvm_major_version)
- gradle_opts = "export GRADLE_OPTS=\"%s\"; " % Builder.JAVA_9_GRADLE_OPTS
- else:
- gradle_opts = ""
- build_cmd = "%sexport JAVA_HOME=%s; cd %s; %s %s >> %s 2>&1" % (gradle_opts, self.java_home, src_dir, self.gradle, task, log_file)
+ build_cmd = "export JAVA_HOME=%s; cd %s; %s %s >> %s 2>&1" % (self.java_home, src_dir, self.gradle, task, log_file)
logger.info("Running build command [%s]" % build_cmd)
if process.run_subprocess(build_cmd):
diff --git a/esrally/mechanic/team.py b/esrally/mechanic/team.py
index b56160f2..e37e5c3a 100644
--- a/esrally/mechanic/team.py
+++ b/esrally/mechanic/team.py
@@ -341,5 +341,3 @@ class PluginDescriptor:
def __eq__(self, other):
return isinstance(other, type(self)) and (self.name, self.config, self.core_plugin) == (other.name, other.config, other.core_plugin)
-
-
diff --git a/rally b/rally
index 2ae34e28..ba83469d 100755
--- a/rally
+++ b/rally
@@ -26,4 +26,5 @@ cd "${RALLY_SRC_HOME}" >/dev/null 2>&1
__RALLY_INTERNAL_BINARY_NAME="esrally"
__RALLY_INTERNAL_HUMAN_NAME="Rally"
-source run.sh
\ No newline at end of file
+source run.sh
+
diff --git a/run.sh b/run.sh
old mode 100644
new mode 100755
index ad5c9cb6..7893e2d8
--- a/run.sh
+++ b/run.sh
@@ -11,13 +11,33 @@
readonly BINARY_NAME="${__RALLY_INTERNAL_BINARY_NAME}"
readonly HUMAN_NAME="${__RALLY_INTERNAL_HUMAN_NAME}"
+function install_esrally_with_setuptools() {
+ # Check if optional parameter with Rally binary path, points to an existing executable file.
+ if [[ -n $1 ]]; then
+ if [[ -f $1 && -x $1 ]]; then return; fi
+ fi
+
+ if [[ ${IN_VIRTUALENV} == 0 ]]; then
+ python3 setup.py -q develop --user
+ else
+ python3 setup.py -q develop
+ fi
+}
+
# Attempt to update Rally itself by default but allow user to skip it.
SELF_UPDATE=YES
# Assume that the "main remote" is called "origin"
REMOTE="origin"
# While we could also check via the presence of `VIRTUAL_ENV` this is a bit more reliable.
-python3 -c 'import sys; print(sys.real_prefix)' >/dev/null 2>&1 && IN_VIRTUALENV=1 || IN_VIRTUALENV=0
+# Check for both pyvenv and normal venv environments
+# https://www.python.org/dev/peps/pep-0405/
+if python3 -c 'import os, sys; sys.exit(0) if "VIRTUAL_ENV" in os.environ else sys.exit(1)' >/dev/null 2>&1
+then
+ IN_VIRTUALENV=1
+else
+ IN_VIRTUALENV=0
+fi
# Check for parameters that are intended for this script. Note that they only work if they're specified at the beginning (due to how
# the shell builtin `shift` works. We could make it work for arbitrary positions but that's not worth the complexity for such an
@@ -45,7 +65,7 @@ case ${i} in
esac
done
-if [ ${SELF_UPDATE} == YES ]
+if [[ $SELF_UPDATE == YES ]]
then
# see http://unix.stackexchange.com/a/155077
if output=$(git status --porcelain) && [ -z "$output" ] && on_master=$(git rev-parse --abbrev-ref HEAD) && [ "$on_master" == "master" ]
@@ -56,17 +76,11 @@ then
git fetch ${REMOTE} --quiet >/dev/null 2>&1
exit_code=$?
set -e
- if [ ${exit_code} == 0 ]
+ if [[ $exit_code == 0 ]]
then
echo "Auto-updating Rally from ${REMOTE}"
git rebase ${REMOTE}/master --quiet
- # if we're running in a virtualenv then the --user prefix is not defined (and makes no sense)
- if [ ${IN_VIRTUALENV} == 0 ]
- then
- python3 setup.py -q develop --user
- else
- python3 setup.py -q develop
- fi
+ install_esrally_with_setuptools
#else
# offline - skipping update
fi
@@ -85,16 +99,18 @@ export THESPLOG_FILE_MAXSIZE=${THESPLOG_FILE_MAXSIZE:-204800}
# Provide a consistent binary name to the user and hide the fact that we call another binary under the hood.
export RALLY_ALTERNATIVE_BINARY_NAME=$(basename "$0")
-if [ ${IN_VIRTUALENV} == 0 ]
+if [[ $IN_VIRTUALENV == 0 ]]
then
RALLY_ROOT=$(python3 -c "import site; print(site.USER_BASE)")
RALLY_BIN=${RALLY_ROOT}/bin/${BINARY_NAME}
- if [ -x "$RALLY_BIN" ]
- then
+ install_esrally_with_setuptools "${RALLY_BIN}"
+ if [[ -x $RALLY_BIN ]]; then
${RALLY_BIN} "$@"
else
echo "Cannot execute ${HUMAN_NAME} in ${RALLY_BIN}."
fi
else
+ install_esrally_with_setuptools "${BINARY_NAME}"
+
${BINARY_NAME} "$@"
fi
| Allow first Rally run from non master branch
Current Rally won't run from a source checkout with [disabled automatic updates](http://esrally.readthedocs.io/en/stable/developing.html?highlight=developing#automatic-updates) if the user is running it for the first time.
This is because the execution of [setup.py](https://github.com/elastic/rally/blob/master/run.sh#L65-L68) only happens when updates are enabled.
| elastic/rally | diff --git a/tests/config_test.py b/tests/config_test.py
index 57bafef2..91fc9b3e 100644
--- a/tests/config_test.py
+++ b/tests/config_test.py
@@ -238,7 +238,7 @@ class ConfigFactoryTests(TestCase):
@mock.patch("esrally.utils.io.guess_install_location")
def test_create_simple_config(self, guess_install_location, guess_java_home, is_ea_release, working_copy):
guess_install_location.side_effect = ["/tests/usr/bin/git", "/tests/usr/bin/gradle"]
- guess_java_home.return_value = "/tests/java9/home"
+ guess_java_home.return_value = "/tests/java10/home"
is_ea_release.return_value = False
# Rally checks in the parent and sibling directories whether there is an ES working copy. We don't want this detection logic
# to succeed spuriously (e.g. on developer machines).
@@ -256,7 +256,7 @@ class ConfigFactoryTests(TestCase):
print("%s::%s: %s" % (section, k, v))
self.assertTrue("meta" in config_store.config)
- self.assertEqual("13", config_store.config["meta"]["config.version"])
+ self.assertEqual("14", config_store.config["meta"]["config.version"])
self.assertTrue("system" in config_store.config)
self.assertEqual("local", config_store.config["system"]["env.name"])
@@ -273,8 +273,8 @@ class ConfigFactoryTests(TestCase):
self.assertEqual("/tests/usr/bin/gradle", config_store.config["build"]["gradle.bin"])
self.assertTrue("runtime" in config_store.config)
- self.assertEqual("/tests/java9/home", config_store.config["runtime"]["java.home"])
- self.assertEqual("/tests/java9/home", config_store.config["runtime"]["java9.home"])
+ self.assertEqual("/tests/java10/home", config_store.config["runtime"]["java.home"])
+ self.assertEqual("/tests/java10/home", config_store.config["runtime"]["java10.home"])
self.assertTrue("benchmarks" in config_store.config)
self.assertEqual("${node:root.dir}/data", config_store.config["benchmarks"]["local.dataset.cache"])
@@ -316,13 +316,13 @@ class ConfigFactoryTests(TestCase):
major_jvm_version, jvm_is_early_access_release):
guess_install_location.side_effect = ["/tests/usr/bin/git", "/tests/usr/bin/gradle"]
guess_java_home.return_value = None
- normalize_path.side_effect = ["/home/user/.rally/benchmarks", "/tests/java9/home", "/tests/java8/home",
+ normalize_path.side_effect = ["/home/user/.rally/benchmarks", "/tests/java10/home", "/tests/java8/home",
"/home/user/.rally/benchmarks/src"]
- major_jvm_version.return_value = 9
+ major_jvm_version.return_value = 10
jvm_is_early_access_release.return_value = False
path_exists.return_value = True
- f = config.ConfigFactory(i=MockInput(["/tests/java9/home", "/Projects/elasticsearch/src", "/tests/java8/home"]), o=null_output)
+ f = config.ConfigFactory(i=MockInput(["/tests/java10/home", "/Projects/elasticsearch/src", "/tests/java8/home"]), o=null_output)
config_store = InMemoryConfigStore("test")
f.create_config(config_store)
@@ -337,7 +337,7 @@ class ConfigFactoryTests(TestCase):
guess_install_location.side_effect = ["/tests/usr/bin/git", "/tests/usr/bin/gradle"]
guess_java_home.return_value = None
- # the input is the question for the JDK home and the JDK 9 home directory - the user does not define one
+ # the input is the question for the JDK home and the JDK 10 home directory - the user does not define one
f = config.ConfigFactory(i=MockInput(["", ""]), o=null_output)
config_store = InMemoryConfigStore("test")
@@ -345,14 +345,14 @@ class ConfigFactoryTests(TestCase):
self.assertIsNotNone(config_store.config)
self.assertFalse("java.home" in config_store.config["runtime"])
- self.assertFalse("java9.home" in config_store.config["runtime"])
+ self.assertFalse("java10.home" in config_store.config["runtime"])
@mock.patch("esrally.utils.jvm.is_early_access_release")
@mock.patch("esrally.utils.io.guess_java_home")
@mock.patch("esrally.utils.io.guess_install_location")
def test_create_advanced_config(self, guess_install_location, guess_java_home, is_ea_release):
guess_install_location.side_effect = ["/tests/usr/bin/git", "/tests/usr/bin/gradle"]
- guess_java_home.side_effect = ["/tests/java8/home", "/tests/java9/home"]
+ guess_java_home.side_effect = ["/tests/java8/home", "/tests/java10/home"]
is_ea_release.return_value = False
f = config.ConfigFactory(i=MockInput([
@@ -381,7 +381,7 @@ class ConfigFactoryTests(TestCase):
self.assertIsNotNone(config_store.config)
self.assertTrue("meta" in config_store.config)
- self.assertEqual("13", config_store.config["meta"]["config.version"])
+ self.assertEqual("14", config_store.config["meta"]["config.version"])
self.assertTrue("system" in config_store.config)
self.assertEqual("unittest-env", config_store.config["system"]["env.name"])
self.assertTrue("node" in config_store.config)
@@ -391,7 +391,7 @@ class ConfigFactoryTests(TestCase):
self.assertEqual("/tests/usr/bin/gradle", config_store.config["build"]["gradle.bin"])
self.assertTrue("runtime" in config_store.config)
self.assertEqual("/tests/java8/home", config_store.config["runtime"]["java.home"])
- self.assertEqual("/tests/java9/home", config_store.config["runtime"]["java9.home"])
+ self.assertEqual("/tests/java10/home", config_store.config["runtime"]["java10.home"])
self.assertTrue("benchmarks" in config_store.config)
self.assertTrue("reporting" in config_store.config)
@@ -919,3 +919,129 @@ class ConfigMigrationTests(TestCase):
self.assertEqual("13", config_file.config["meta"]["config.version"])
self.assertEqual("/usr/lib/java8", config_file.config["runtime"]["java.home"])
self.assertEqual("/usr/lib/java9", config_file.config["runtime"]["java9.home"])
+
+ def test_migrate_from_13_to_14_without_gradle(self):
+ config_file = InMemoryConfigStore("test")
+ sample_config = {
+ "meta": {
+ "config.version": 13
+ }
+ }
+ config_file.store(sample_config)
+ config.migrate(config_file, 13, 14, out=null_output)
+
+ self.assertTrue(config_file.backup_created)
+ self.assertEqual("14", config_file.config["meta"]["config.version"])
+
+ @mock.patch("esrally.utils.io.guess_java_home")
+ @mock.patch("esrally.utils.jvm.is_early_access_release")
+ def test_migrate_from_13_to_14_with_gradle_and_jdk8_autodetect_jdk10(self, is_early_access_release, guess_java_home):
+ guess_java_home.return_value = "/usr/lib/java10"
+ is_early_access_release.return_value = False
+
+ config_file = InMemoryConfigStore("test")
+ sample_config = {
+ "meta": {
+ "config.version": 13
+ },
+ "build": {
+ "gradle.bin": "/usr/local/bin/gradle"
+ },
+ "runtime": {
+ "java.home": "/usr/lib/java8"
+ }
+ }
+ config_file.store(sample_config)
+ config.migrate(config_file, 13, 14, out=null_output)
+
+ self.assertTrue(config_file.backup_created)
+ self.assertEqual("14", config_file.config["meta"]["config.version"])
+ self.assertEqual("/usr/lib/java8", config_file.config["runtime"]["java.home"])
+ self.assertEqual("/usr/lib/java10", config_file.config["runtime"]["java10.home"])
+
+ @mock.patch("esrally.utils.io.guess_java_home")
+ @mock.patch("esrally.utils.jvm.is_early_access_release")
+ @mock.patch("esrally.utils.jvm.major_version")
+ def test_migrate_from_13_to_14_with_gradle_and_jdk10(self, major_version, is_early_access_release, guess_java_home):
+ guess_java_home.return_value = None
+ is_early_access_release.return_value = False
+ major_version.return_value = 10
+
+ config_file = InMemoryConfigStore("test")
+ sample_config = {
+ "meta": {
+ "config.version": 13
+ },
+ "build": {
+ "gradle.bin": "/usr/local/bin/gradle"
+ },
+ "runtime": {
+ "java.home": "/usr/lib/java10"
+ }
+ }
+ config_file.store(sample_config)
+ config.migrate(config_file, 13, 14, out=null_output)
+
+ self.assertTrue(config_file.backup_created)
+ self.assertEqual("14", config_file.config["meta"]["config.version"])
+ self.assertEqual("/usr/lib/java10", config_file.config["runtime"]["java.home"])
+ self.assertEqual("/usr/lib/java10", config_file.config["runtime"]["java10.home"])
+
+ @mock.patch("esrally.utils.io.guess_java_home")
+ @mock.patch("esrally.utils.jvm.is_early_access_release")
+ @mock.patch("esrally.utils.jvm.major_version")
+ def test_migrate_from_13_to_14_with_gradle_and_jdk8_ask_user_and_skip(self, major_version, is_early_access_release, guess_java_home):
+ guess_java_home.return_value = None
+ is_early_access_release.return_value = False
+ major_version.return_value = 8
+
+ config_file = InMemoryConfigStore("test")
+ sample_config = {
+ "meta": {
+ "config.version": 13
+ },
+ "build": {
+ "gradle.bin": "/usr/local/bin/gradle"
+ },
+ "runtime": {
+ "java.home": "/usr/lib/java8"
+ }
+ }
+ config_file.store(sample_config)
+ config.migrate(config_file, 13, 14, out=null_output, i=MockInput(inputs=[""]))
+
+ self.assertTrue(config_file.backup_created)
+ self.assertEqual("14", config_file.config["meta"]["config.version"])
+ self.assertEqual("/usr/lib/java8", config_file.config["runtime"]["java.home"])
+ self.assertTrue("java10.home" not in config_file.config["runtime"])
+
+ @mock.patch("esrally.utils.io.exists")
+ @mock.patch("esrally.utils.io.guess_java_home")
+ @mock.patch("esrally.utils.jvm.is_early_access_release")
+ @mock.patch("esrally.utils.jvm.major_version")
+ def test_migrate_from_13_to_14_with_gradle_and_jdk8_ask_user_enter_valid(self, major_version, is_early_access_release, guess_java_home,
+ path_exists):
+ guess_java_home.return_value = None
+ is_early_access_release.return_value = False
+ major_version.side_effect = [8, 10]
+ path_exists.return_value = True
+
+ config_file = InMemoryConfigStore("test")
+ sample_config = {
+ "meta": {
+ "config.version": 13
+ },
+ "build": {
+ "gradle.bin": "/usr/local/bin/gradle"
+ },
+ "runtime": {
+ "java.home": "/usr/lib/java8"
+ }
+ }
+ config_file.store(sample_config)
+ config.migrate(config_file, 13, 14, out=null_output, i=MockInput(inputs=["/usr/lib/java10"]))
+
+ self.assertTrue(config_file.backup_created)
+ self.assertEqual("14", config_file.config["meta"]["config.version"])
+ self.assertEqual("/usr/lib/java8", config_file.config["runtime"]["java.home"])
+ self.assertEqual("/usr/lib/java10", config_file.config["runtime"]["java10.home"])
diff --git a/tests/mechanic/provisioner_test.py b/tests/mechanic/provisioner_test.py
index 426a329c..7c29095a 100644
--- a/tests/mechanic/provisioner_test.py
+++ b/tests/mechanic/provisioner_test.py
@@ -67,6 +67,194 @@ class BareProvisionerTests(TestCase):
}, config_vars)
+ class NoopHookHandler:
+ def __init__(self, plugin):
+ self.hook_calls = {}
+
+ def can_load(self):
+ return False
+
+ def invoke(self, phase, variables):
+ self.hook_calls[phase] = variables
+
+ class MockRallyTeamXPackPlugin:
+ """
+ Mock XPackPlugin settings as found in rally-team repo:
+ https://github.com/elastic/rally-teams/blob/6/plugins/x_pack/security.ini
+ """
+ def __init__(self):
+ self.name = "x-pack"
+ self.core_plugin = False
+ self.config = {
+ 'base': 'internal_base,security'
+ }
+ self.root_path = None
+ self.config_paths = []
+ self.variables = {
+ 'xpack_security_enabled': True,
+ 'plugin_name': 'x-pack-security'
+ }
+
+ def __str__(self):
+ return "Plugin descriptor for [%s]" % self.name
+
+ def __repr__(self):
+ r = []
+ for prop, value in vars(self).items():
+ r.append("%s = [%s]" % (prop, repr(value)))
+ return ", ".join(r)
+
+ @mock.patch("glob.glob", lambda p: ["/opt/elasticsearch-5.0.0"])
+ @mock.patch("esrally.utils.io.decompress")
+ @mock.patch("esrally.utils.io.ensure_dir")
+ @mock.patch("esrally.mechanic.provisioner.PluginInstaller.install")
+ @mock.patch("shutil.rmtree")
+ def test_prepare_distribution_lt_63_with_plugins(self, mock_rm, mock_ensure_dir, mock_install, mock_decompress):
+ """
+ Test that plugin.mandatory is set to the specific plugin name (e.g. `x-pack-security`) and not
+ the meta plugin name (e.g. `x-pack`) for Elasticsearch <6.3
+
+ See: https://github.com/elastic/elasticsearch/pull/28710
+ """
+ apply_config_calls = []
+
+ def null_apply_config(source_root_path, target_root_path, config_vars):
+ apply_config_calls.append((source_root_path, target_root_path, config_vars))
+
+ installer = provisioner.ElasticsearchInstaller(car=
+ team.Car(
+ name="unit-test-car",
+ config_paths=["~/.rally/benchmarks/teams/default/my-car"],
+ variables={"heap": "4g"}),
+ node_name="rally-node-0",
+ node_root_dir="~/.rally/benchmarks/races/unittest",
+ all_node_ips=["10.17.22.22", "10.17.22.23"],
+ ip="10.17.22.23",
+ http_port=9200)
+
+ p = provisioner.BareProvisioner(cluster_settings={"indices.query.bool.max_clause_count": 50000},
+ es_installer=installer,
+ plugin_installers=[
+ provisioner.PluginInstaller(BareProvisionerTests.MockRallyTeamXPackPlugin(),
+ hook_handler_class=BareProvisionerTests.NoopHookHandler)
+ ],
+ preserve=True,
+ distribution_version="6.2.3",
+ apply_config=null_apply_config)
+
+ node_config = p.prepare({"elasticsearch": "/opt/elasticsearch-5.0.0.tar.gz"})
+ self.assertEqual(installer.car, node_config.car)
+ self.assertEqual("/opt/elasticsearch-5.0.0", node_config.binary_path)
+ self.assertEqual(["/opt/elasticsearch-5.0.0/data"], node_config.data_paths)
+
+ self.assertEqual(1, len(apply_config_calls))
+ source_root_path, target_root_path, config_vars = apply_config_calls[0]
+
+ self.assertEqual("~/.rally/benchmarks/teams/default/my-car", source_root_path)
+ self.assertEqual("/opt/elasticsearch-5.0.0", target_root_path)
+
+ self.maxDiff = None
+
+ self.assertEqual({
+ "cluster_settings": {
+ "indices.query.bool.max_clause_count": 50000,
+ "plugin.mandatory": ["x-pack-security"]
+ },
+ "heap": "4g",
+ "cluster_name": "rally-benchmark",
+ "node_name": "rally-node-0",
+ "data_paths": ["/opt/elasticsearch-5.0.0/data"],
+ "log_path": "~/.rally/benchmarks/races/unittest/logs/server",
+ "heap_dump_path": "~/.rally/benchmarks/races/unittest/heapdump",
+ "node_ip": "10.17.22.23",
+ "network_host": "10.17.22.23",
+ "http_port": "9200-9300",
+ "transport_port": "9300-9400",
+ "all_node_ips": "[\"10.17.22.22\",\"10.17.22.23\"]",
+ "minimum_master_nodes": 2,
+ "node_count_per_host": 1,
+ "install_root_path": "/opt/elasticsearch-5.0.0",
+ "plugin_name": "x-pack-security",
+ "xpack_security_enabled": True
+
+ }, config_vars)
+
+ @mock.patch("glob.glob", lambda p: ["/opt/elasticsearch-6.3.0"])
+ @mock.patch("esrally.utils.io.decompress")
+ @mock.patch("esrally.utils.io.ensure_dir")
+ @mock.patch("esrally.mechanic.provisioner.PluginInstaller.install")
+ @mock.patch("shutil.rmtree")
+ def test_prepare_distribution_ge_63_with_plugins(self, mock_rm, mock_ensure_dir, mock_install, mock_decompress):
+ """
+ Test that plugin.mandatory is set to the meta plugin name (e.g. `x-pack`) and not
+ the specific plugin name (e.g. `x-pack-security`) for Elasticsearch >=6.3.0
+
+ See: https://github.com/elastic/elasticsearch/pull/28710
+ """
+ apply_config_calls = []
+
+ def null_apply_config(source_root_path, target_root_path, config_vars):
+ apply_config_calls.append((source_root_path, target_root_path, config_vars))
+
+ installer = provisioner.ElasticsearchInstaller(car=
+ team.Car(
+ name="unit-test-car",
+ config_paths=["~/.rally/benchmarks/teams/default/my-car"],
+ variables={"heap": "4g"}),
+ node_name="rally-node-0",
+ node_root_dir="~/.rally/benchmarks/races/unittest",
+ all_node_ips=["10.17.22.22", "10.17.22.23"],
+ ip="10.17.22.23",
+ http_port=9200)
+
+ p = provisioner.BareProvisioner(cluster_settings={"indices.query.bool.max_clause_count": 50000},
+ es_installer=installer,
+ plugin_installers=[
+ provisioner.PluginInstaller(BareProvisionerTests.MockRallyTeamXPackPlugin(),
+ hook_handler_class=BareProvisionerTests.NoopHookHandler)
+ ],
+ preserve=True,
+ distribution_version="6.3.0",
+ apply_config=null_apply_config)
+
+ node_config = p.prepare({"elasticsearch": "/opt/elasticsearch-6.3.0.tar.gz"})
+ self.assertEqual(installer.car, node_config.car)
+ self.assertEqual("/opt/elasticsearch-6.3.0", node_config.binary_path)
+ self.assertEqual(["/opt/elasticsearch-6.3.0/data"], node_config.data_paths)
+
+ self.assertEqual(1, len(apply_config_calls))
+ source_root_path, target_root_path, config_vars = apply_config_calls[0]
+
+ self.assertEqual("~/.rally/benchmarks/teams/default/my-car", source_root_path)
+ self.assertEqual("/opt/elasticsearch-6.3.0", target_root_path)
+
+ self.maxDiff = None
+
+ self.assertEqual({
+ "cluster_settings": {
+ "indices.query.bool.max_clause_count": 50000,
+ "plugin.mandatory": ["x-pack"]
+ },
+ "heap": "4g",
+ "cluster_name": "rally-benchmark",
+ "node_name": "rally-node-0",
+ "data_paths": ["/opt/elasticsearch-6.3.0/data"],
+ "log_path": "~/.rally/benchmarks/races/unittest/logs/server",
+ "heap_dump_path": "~/.rally/benchmarks/races/unittest/heapdump",
+ "node_ip": "10.17.22.23",
+ "network_host": "10.17.22.23",
+ "http_port": "9200-9300",
+ "transport_port": "9300-9400",
+ "all_node_ips": "[\"10.17.22.22\",\"10.17.22.23\"]",
+ "minimum_master_nodes": 2,
+ "node_count_per_host": 1,
+ "install_root_path": "/opt/elasticsearch-6.3.0",
+ "plugin_name": "x-pack-security",
+ "xpack_security_enabled": True
+
+ }, config_vars)
+
+
class ElasticsearchInstallerTests(TestCase):
@mock.patch("shutil.rmtree")
@mock.patch("os.path.exists")
diff --git a/tests/mechanic/supplier_test.py b/tests/mechanic/supplier_test.py
index be8ae8e2..cb184505 100644
--- a/tests/mechanic/supplier_test.py
+++ b/tests/mechanic/supplier_test.py
@@ -134,20 +134,19 @@ class BuilderTests(TestCase):
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.jvm.major_version")
- def test_build_on_jdk_9(self, jvm_major_version, mock_run_subprocess):
- jvm_major_version.return_value = 9
+ def test_build_on_jdk_10(self, jvm_major_version, mock_run_subprocess):
+ jvm_major_version.return_value = 10
mock_run_subprocess.return_value = False
- b = supplier.Builder(src_dir="/src", gradle="/usr/local/gradle", java_home="/opt/jdk9", log_dir="logs")
+ b = supplier.Builder(src_dir="/src", gradle="/usr/local/gradle", java_home="/opt/jdk10", log_dir="logs")
b.build([supplier.CLEAN_TASK, supplier.ASSEMBLE_TASK])
calls = [
# Actual call
- mock.call("export GRADLE_OPTS=\"%s\"; export JAVA_HOME=/opt/jdk9; cd /src; /usr/local/gradle clean >> logs/build.log 2>&1" %
- supplier.Builder.JAVA_9_GRADLE_OPTS),
+ mock.call("export JAVA_HOME=/opt/jdk10; cd /src; /usr/local/gradle clean >> logs/build.log 2>&1"),
# Return value check
- mock.call("export GRADLE_OPTS=\"%s\"; export JAVA_HOME=/opt/jdk9; cd /src; /usr/local/gradle "
- ":distribution:archives:tar:assemble >> logs/build.log 2>&1" % supplier.Builder.JAVA_9_GRADLE_OPTS),
+ mock.call("export JAVA_HOME=/opt/jdk10; cd /src; /usr/local/gradle "
+ ":distribution:archives:tar:assemble >> logs/build.log 2>&1"),
]
mock_run_subprocess.assert_has_calls(calls)
@@ -335,7 +334,7 @@ class CreateSupplierTests(TestCase):
cfg.add(config.Scope.application, "distributions", "release.url",
"https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz")
cfg.add(config.Scope.application, "distributions", "release.cache", True)
- cfg.add(config.Scope.application, "runtime", "java9.home", "/usr/local/bin/java9/")
+ cfg.add(config.Scope.application, "runtime", "java10.home", "/usr/local/bin/java10/")
cfg.add(config.Scope.application, "node", "root.dir", "/opt/rally")
composite_supplier = supplier.create(cfg, sources=False, distribution=True, build=False, challenge_root_path="/", plugins=[])
@@ -352,7 +351,7 @@ class CreateSupplierTests(TestCase):
cfg.add(config.Scope.application, "distributions", "release.url",
"https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz")
cfg.add(config.Scope.application, "distributions", "release.cache", True)
- cfg.add(config.Scope.application, "runtime", "java9.home", "/usr/local/bin/java9/")
+ cfg.add(config.Scope.application, "runtime", "java10.home", "/usr/local/bin/java10/")
cfg.add(config.Scope.application, "node", "root.dir", "/opt/rally")
cfg.add(config.Scope.application, "source", "plugin.community-plugin.src.dir", "/home/user/Projects/community-plugin")
@@ -383,7 +382,7 @@ class CreateSupplierTests(TestCase):
cfg.add(config.Scope.application, "distributions", "release.url",
"https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz")
cfg.add(config.Scope.application, "distributions", "release.cache", True)
- cfg.add(config.Scope.application, "runtime", "java9.home", "/usr/local/bin/java9/")
+ cfg.add(config.Scope.application, "runtime", "java10.home", "/usr/local/bin/java10/")
cfg.add(config.Scope.application, "node", "root.dir", "/opt/rally")
core_plugin = team.PluginDescriptor("analysis-icu", core_plugin=True)
@@ -406,7 +405,7 @@ class CreateSupplierTests(TestCase):
cfg.add(config.Scope.application, "distributions", "release.url",
"https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz")
cfg.add(config.Scope.application, "distributions", "release.cache", True)
- cfg.add(config.Scope.application, "runtime", "java9.home", "/usr/local/bin/java9/")
+ cfg.add(config.Scope.application, "runtime", "java10.home", "/usr/local/bin/java10/")
cfg.add(config.Scope.application, "node", "root.dir", "/opt/rally")
cfg.add(config.Scope.application, "node", "src.root.dir", "/opt/rally/src")
cfg.add(config.Scope.application, "build", "gradle.bin", "/opt/gradle")
@@ -437,7 +436,7 @@ class CreateSupplierTests(TestCase):
cfg.add(config.Scope.application, "distributions", "release.url",
"https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz")
cfg.add(config.Scope.application, "distributions", "release.cache", True)
- cfg.add(config.Scope.application, "runtime", "java9.home", "/usr/local/bin/java9/")
+ cfg.add(config.Scope.application, "runtime", "java10.home", "/usr/local/bin/java10/")
cfg.add(config.Scope.application, "node", "root.dir", "/opt/rally")
cfg.add(config.Scope.application, "node", "src.root.dir", "/opt/rally/src")
cfg.add(config.Scope.application, "build", "gradle.bin", "/opt/gradle")
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 7
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"tox",
"pytest",
"pytest-benchmark"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
distlib==0.3.9
elasticsearch==6.0.0
-e git+https://github.com/elastic/rally.git@7aad2e841ca7a2e022447993a83513f24b49c755#egg=esrally
filelock==3.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==2.9.5
jsonschema==2.5.1
MarkupSafe==2.0.1
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==5.4.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
py-cpuinfo==3.2.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-benchmark==3.4.1
six==1.17.0
tabulate==0.8.1
thespian==3.9.2
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tox==3.28.0
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.22
virtualenv==20.17.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: rally
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- distlib==0.3.9
- elasticsearch==6.0.0
- filelock==3.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- jinja2==2.9.5
- jsonschema==2.5.1
- markupsafe==2.0.1
- platformdirs==2.4.0
- psutil==5.4.0
- py-cpuinfo==3.2.0
- pytest-benchmark==3.4.1
- six==1.17.0
- tabulate==0.8.1
- thespian==3.9.2
- tox==3.28.0
- urllib3==1.22
- virtualenv==20.17.1
prefix: /opt/conda/envs/rally
| [
"tests/config_test.py::ConfigFactoryTests::test_create_advanced_config",
"tests/config_test.py::ConfigFactoryTests::test_create_simple_config",
"tests/config_test.py::ConfigFactoryTests::test_create_simple_config_no_java_detected",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_13_to_14_with_gradle_and_jdk10",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_13_to_14_with_gradle_and_jdk8_ask_user_and_skip",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_13_to_14_with_gradle_and_jdk8_ask_user_enter_valid",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_13_to_14_with_gradle_and_jdk8_autodetect_jdk10",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_13_to_14_without_gradle",
"tests/mechanic/provisioner_test.py::BareProvisionerTests::test_prepare_distribution_ge_63_with_plugins",
"tests/mechanic/provisioner_test.py::BareProvisionerTests::test_prepare_distribution_lt_63_with_plugins",
"tests/mechanic/supplier_test.py::BuilderTests::test_build_on_jdk_10",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_create_suppliers_for_es_and_plugin_source_build",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_create_suppliers_for_es_distribution_plugin_source_build"
]
| []
| [
"tests/config_test.py::ConfigTests::test_add_all_in_section",
"tests/config_test.py::ConfigTests::test_load_all_opts_in_section",
"tests/config_test.py::ConfigTests::test_load_existing_config",
"tests/config_test.py::ConfigTests::test_load_non_existing_config",
"tests/config_test.py::AutoLoadConfigTests::test_can_create_non_existing_config",
"tests/config_test.py::AutoLoadConfigTests::test_can_load_and_amend_existing_config",
"tests/config_test.py::AutoLoadConfigTests::test_can_migrate_outdated_config",
"tests/config_test.py::ConfigFactoryTests::test_create_simple_config_no_java_installed",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_0_to_latest",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_10_to_11",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_11_to_12_with_custom_src_config",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_11_to_12_with_default_src_config_repo_checked_out",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_11_to_12_with_default_src_config_repo_not_checked_out",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_11_to_12_with_partial_src_config",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_11_to_12_without_src_config",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_12_to_13_with_gradle_and_jdk8_ask_user_and_skip",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_12_to_13_with_gradle_and_jdk8_ask_user_enter_valid",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_12_to_13_with_gradle_and_jdk8_autodetect_jdk9",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_12_to_13_with_gradle_and_jdk9",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_12_to_13_without_gradle",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_2_to_3",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_3_to_4",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_4_to_5",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_5_to_6",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_6_to_7",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_7_to_8",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_8_to_9",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_9_to_10",
"tests/mechanic/provisioner_test.py::BareProvisionerTests::test_prepare_without_plugins",
"tests/mechanic/provisioner_test.py::ElasticsearchInstallerTests::test_cleanup",
"tests/mechanic/provisioner_test.py::ElasticsearchInstallerTests::test_cleanup_nothing_on_preserve",
"tests/mechanic/provisioner_test.py::ElasticsearchInstallerTests::test_prepare_default_data_paths",
"tests/mechanic/provisioner_test.py::ElasticsearchInstallerTests::test_prepare_user_provided_data_path",
"tests/mechanic/provisioner_test.py::PluginInstallerTests::test_install_plugin_successfully",
"tests/mechanic/provisioner_test.py::PluginInstallerTests::test_install_plugin_with_io_error",
"tests/mechanic/provisioner_test.py::PluginInstallerTests::test_install_plugin_with_unknown_error",
"tests/mechanic/provisioner_test.py::PluginInstallerTests::test_install_unknown_plugin",
"tests/mechanic/provisioner_test.py::PluginInstallerTests::test_invokes_hook",
"tests/mechanic/provisioner_test.py::PluginInstallerTests::test_pass_plugin_properties",
"tests/mechanic/provisioner_test.py::InstallHookHandlerTests::test_cannot_register_for_unknown_phase",
"tests/mechanic/provisioner_test.py::InstallHookHandlerTests::test_loads_module",
"tests/mechanic/provisioner_test.py::DockerProvisionerTests::test_provisioning",
"tests/mechanic/supplier_test.py::RevisionExtractorTests::test_invalid_revisions",
"tests/mechanic/supplier_test.py::RevisionExtractorTests::test_multiple_revisions",
"tests/mechanic/supplier_test.py::RevisionExtractorTests::test_single_revision",
"tests/mechanic/supplier_test.py::SourceRepositoryTests::test_checkout_current",
"tests/mechanic/supplier_test.py::SourceRepositoryTests::test_checkout_revision",
"tests/mechanic/supplier_test.py::SourceRepositoryTests::test_checkout_revision_for_local_only_repo",
"tests/mechanic/supplier_test.py::SourceRepositoryTests::test_checkout_ts",
"tests/mechanic/supplier_test.py::SourceRepositoryTests::test_intial_checkout_latest",
"tests/mechanic/supplier_test.py::SourceRepositoryTests::test_is_commit_hash",
"tests/mechanic/supplier_test.py::SourceRepositoryTests::test_is_not_commit_hash",
"tests/mechanic/supplier_test.py::BuilderTests::test_build_on_jdk_8",
"tests/mechanic/supplier_test.py::ElasticsearchSourceSupplierTests::test_add_elasticsearch_binary",
"tests/mechanic/supplier_test.py::ElasticsearchSourceSupplierTests::test_build",
"tests/mechanic/supplier_test.py::ElasticsearchSourceSupplierTests::test_no_build",
"tests/mechanic/supplier_test.py::ExternalPluginSourceSupplierTests::test_add_binary_built_along_elasticsearch",
"tests/mechanic/supplier_test.py::ExternalPluginSourceSupplierTests::test_along_es_plugin_keeps_build_dir",
"tests/mechanic/supplier_test.py::ExternalPluginSourceSupplierTests::test_invalid_config_duplicate_source",
"tests/mechanic/supplier_test.py::ExternalPluginSourceSupplierTests::test_invalid_config_no_source",
"tests/mechanic/supplier_test.py::ExternalPluginSourceSupplierTests::test_resolve_plugin_binary_built_standalone",
"tests/mechanic/supplier_test.py::ExternalPluginSourceSupplierTests::test_standalone_plugin_overrides_build_dir",
"tests/mechanic/supplier_test.py::CorePluginSourceSupplierTests::test_resolve_plugin_binary",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_create_suppliers_for_es_distribution_plugin_source_skip",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_create_suppliers_for_es_missing_distribution_plugin_source_skip",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_create_suppliers_for_es_only_config",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_derive_supply_requirements_es_and_plugin_source_build",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_derive_supply_requirements_es_distribution",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_derive_supply_requirements_es_distribution_and_plugin_source_build",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_derive_supply_requirements_es_distribution_and_plugin_source_skip",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_derive_supply_requirements_es_source_build",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_derive_supply_requirements_es_source_skip",
"tests/mechanic/supplier_test.py::DistributionRepositoryTests::test_invalid_cache_value",
"tests/mechanic/supplier_test.py::DistributionRepositoryTests::test_missing_cache",
"tests/mechanic/supplier_test.py::DistributionRepositoryTests::test_missing_url",
"tests/mechanic/supplier_test.py::DistributionRepositoryTests::test_release_repo_config_with_default_url",
"tests/mechanic/supplier_test.py::DistributionRepositoryTests::test_release_repo_config_with_version_url"
]
| []
| Apache License 2.0 | 2,335 | [
"esrally/mechanic/provisioner.py",
"esrally/mechanic/team.py",
"run.sh",
"esrally/config.py",
"rally",
"esrally/mechanic/supplier.py",
"docs/configuration.rst"
]
| [
"esrally/mechanic/provisioner.py",
"esrally/mechanic/team.py",
"run.sh",
"esrally/config.py",
"rally",
"esrally/mechanic/supplier.py",
"docs/configuration.rst"
]
|
|
streamlink__streamlink-1578 | c2368bea030c50beb794821b01b92bad5e21c5fb | 2018-03-27 10:04:14 | 7018fc8260f47cc1cb2e7a05dee9e6e58244e5b1 | diff --git a/src/streamlink/plugins/rtve.py b/src/streamlink/plugins/rtve.py
index 21df390a..4870585f 100644
--- a/src/streamlink/plugins/rtve.py
+++ b/src/streamlink/plugins/rtve.py
@@ -1,5 +1,6 @@
import base64
import re
+from functools import partial
from Crypto.Cipher import Blowfish
@@ -59,7 +60,7 @@ class Rtve(Plugin):
https?://(?:www\.)?rtve\.es/(?:directo|noticias|television|deportes|alacarta|drmn)/.*?/?
""", re.VERBOSE)
cdn_schema = validate.Schema(
- validate.transform(parse_xml),
+ validate.transform(partial(parse_xml, invalid_char_entities=True)),
validate.xml_findall(".//preset"),
[
validate.union({
diff --git a/src/streamlink/utils/__init__.py b/src/streamlink/utils/__init__.py
index 59cecaee..1e531647 100644
--- a/src/streamlink/utils/__init__.py
+++ b/src/streamlink/utils/__init__.py
@@ -7,7 +7,7 @@ try:
except ImportError: # pragma: no cover
import xml.etree.ElementTree as ET
-from streamlink.compat import urljoin, urlparse, parse_qsl, is_py2, urlunparse
+from streamlink.compat import urljoin, urlparse, parse_qsl, is_py2, urlunparse, is_py3
from streamlink.exceptions import PluginError
from streamlink.utils.named_pipe import NamedPipe
@@ -67,7 +67,7 @@ def parse_json(data, name="JSON", exception=PluginError, schema=None):
return json_data
-def parse_xml(data, name="XML", ignore_ns=False, exception=PluginError, schema=None):
+def parse_xml(data, name="XML", ignore_ns=False, exception=PluginError, schema=None, invalid_char_entities=False):
"""Wrapper around ElementTree.fromstring with some extras.
Provides these extra features:
@@ -77,9 +77,14 @@ def parse_xml(data, name="XML", ignore_ns=False, exception=PluginError, schema=N
"""
if is_py2 and isinstance(data, unicode):
data = data.encode("utf8")
+ elif is_py3:
+ data = bytearray(data, "utf8")
if ignore_ns:
- data = re.sub(" xmlns=\"(.+?)\"", "", data)
+ data = re.sub(br" xmlns=\"(.+?)\"", b"", data)
+
+ if invalid_char_entities:
+ data = re.sub(br'&(?!(?:#(?:[0-9]+|[Xx][0-9A-Fa-f]+)|[A-Za-z0-9]+);)', b'&', data)
try:
tree = ET.fromstring(data)
| Plugin: Rtve.es: Unable to parse XML: not well-formed (invalid token)
### Checklist
- [x] This is a bug report.
### Description
Rtve plugin always gives XML parsing error for all video urls.
### Reproduction steps / Explicit stream URLs to test
1. http://www.rtve.es/alacarta/videos/telediario/telediario-15-horas-26-03-18/4540424/
2. http://www.rtve.es/alacarta/videos/aguila-roja/aguila-roja-t9-capitulo-116/3771566/
3. http://www.rtve.es/directo/la-1
### Logs
```
[plugin.rtve][debug] Found content with id: 4540424
Plugin error: Unable to parse XML: not well-formed (invalid token): line 1, column 762 (b"<?xml version='1.0'?><quality><pr ...)
Process finished with exit code 1
```
### Comments, screenshots, etc.
xml contains `&` characters, replacing with `&` gets it working.
Resolved by modifying streamlink\utils\__init__.py and adding `data = re.sub("&", "&", data.decode('utf8'))`
```
[plugin.rtve][debug] Found content with id: 4540424
OrderedDict([('540p_http', <HTTPStream('http://mvod.lvlt.rtve.es/resources/TE_NGVA/mp4/3/8/1522075587483.mp4')>), ('360p_http', <HTTPStream('http://mvod.lvlt.rtve.es/resources/TE_NGVA/mp4/1/1/1522075683411.mp4')>), ('270p_http', <HTTPStream('http://mvod.lvlt.rtve.es/resources/TE_NGVA/mp4/3/0/1522075727303.mp4')>), ('270p_alt', <HLSStream('http://hlsvod.lvlt.rtve.es/resources/TE_NGVA/mp4/3/0/1522075727303.mp4/1522075727303-audio=48001-video=620000.m3u8?hls_minimum_fragment_length=6&hls_client_manifest_version=3')>), ('270p', <HLSStream('http://hlsvod2017b.akamaized.net/resources/TE_NGVA/mp4/3/0/1522075727303.mp4/1522075727303-audio=48001-video=620000.m3u8?hls_minimum_fragment_length=6&hls_client_manifest_version=3')>), ('360p_alt', <HLSStream('http://hlsvod.lvlt.rtve.es/resources/TE_NGVA/mp4/1/1/1522075683411.mp4/1522075683411-audio=64001-video=720000.m3u8?hls_minimum_fragment_length=6&hls_client_manifest_version=3')>), ('360p', <HLSStream('http://hlsvod2017b.akamaized.net/resources/TE_NGVA/mp4/1/1/1522075683411.mp4/1522075683411-audio=64001-video=720000.m3u8?hls_minimum_fragment_length=6&hls_client_manifest_version=3')>), ('540p_alt', <HLSStream('http://hlsvod.lvlt.rtve.es/resources/TE_NGVA/mp4/3/8/1522075587483.mp4/1522075587483-audio=64001-video=1400000.m3u8?hls_minimum_fragment_length=6&hls_client_manifest_version=3')>), ('540p', <HLSStream('http://hlsvod2017b.akamaized.net/resources/TE_NGVA/mp4/3/8/1522075587483.mp4/1522075587483-audio=64001-video=1400000.m3u8?hls_minimum_fragment_length=6&hls_client_manifest_version=3')>), ('worst', <HLSStream('http://hlsvod.lvlt.rtve.es/resources/TE_NGVA/mp4/3/0/1522075727303.mp4/1522075727303-audio=48001-video=620000.m3u8?hls_minimum_fragment_length=6&hls_client_manifest_version=3')>), ('best', <HLSStream('http://hlsvod2017b.akamaized.net/resources/TE_NGVA/mp4/3/8/1522075587483.mp4/1522075587483-audio=64001-video=1400000.m3u8?hls_minimum_fragment_length=6&hls_client_manifest_version=3')>)])
Process finished with exit code 0
```
**Don't know if this breaks other plugins as I only use this one.**
```
def parse_xml(data, name="XML", ignore_ns=False, exception=PluginError, schema=None):
"""Wrapper around ElementTree.fromstring with some extras
Provides these extra features:
- Handles incorrectly encoded XML
- Allows stripping namespace information
- Wraps errors in custom exception with a snippet of the data in the message
"""
if is_py2 and isinstance(data, unicode):
data = data.encode("utf8")
if ignore_ns:
data = re.sub(" xmlns=\"(.+?)\"", "", data)
data = re.sub("&", "&", data.decode('utf8'))
try:
tree = ET.fromstring(data)
except Exception as err:
snippet = repr(data)
if len(snippet) > 35:
snippet = snippet[:35] + " ..."
raise exception("Unable to parse {0}: {1} ({2})".format(name, err, snippet))
if schema:
tree = schema.validate(tree, name=name, exception=exception)
return tree
``` | streamlink/streamlink | diff --git a/tests/test_utils.py b/tests/test_utils.py
index 42419165..3fbefd4f 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -76,6 +76,20 @@ class TestUtil(unittest.TestCase):
self.assertEqual(expected.tag, actual.tag)
self.assertEqual(expected.attrib, actual.attrib)
+ def test_parse_xml_entities_fail(self):
+ self.assertRaises(PluginError,
+ parse_xml, u"""<test foo="bar &"/>""")
+
+
+ def test_parse_xml_entities(self):
+ expected = ET.Element("test", {"foo": "bar &"})
+ actual = parse_xml(u"""<test foo="bar &"/>""",
+ schema=validate.Schema(xml_element(tag="test", attrib={"foo": text})),
+ invalid_char_entities=True)
+ self.assertEqual(expected.tag, actual.tag)
+ self.assertEqual(expected.attrib, actual.attrib)
+
+
def test_parse_qsd(self):
self.assertEqual(
{"test": "1", "foo": "bar"},
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 2
} | 0.11 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc",
"pip install --disable-pip-version-check --upgrade pip setuptools",
"pip install -r dev-requirements.txt"
],
"python": "3.9",
"reqs_path": [
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
codecov==2.1.13
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
iso-639==0.4.5
iso3166==2.1.1
Jinja2==3.1.6
MarkupSafe==3.0.2
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pycryptodome==3.22.0
pynsist==2.8
PySocks==1.7.1
pytest==8.3.5
pytest-cov==6.0.0
requests==2.32.3
requests-mock==1.12.1
requests_download==0.1.2
-e git+https://github.com/streamlink/streamlink.git@c2368bea030c50beb794821b01b92bad5e21c5fb#egg=streamlink
tomli==2.2.1
urllib3==2.3.0
websocket-client==1.8.0
yarg==0.1.10
| name: streamlink
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- codecov==2.1.13
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- iso-639==0.4.5
- iso3166==2.1.1
- jinja2==3.1.6
- markupsafe==3.0.2
- mock==5.2.0
- packaging==24.2
- pip==25.0.1
- pluggy==1.5.0
- pycryptodome==3.22.0
- pynsist==2.8
- pysocks==1.7.1
- pytest==8.3.5
- pytest-cov==6.0.0
- requests==2.32.3
- requests-download==0.1.2
- requests-mock==1.12.1
- setuptools==78.1.0
- tomli==2.2.1
- urllib3==2.3.0
- websocket-client==1.8.0
- yarg==0.1.10
prefix: /opt/conda/envs/streamlink
| [
"tests/test_utils.py::TestUtil::test_parse_xml_entities"
]
| []
| [
"tests/test_utils.py::TestUtil::test_absolute_url",
"tests/test_utils.py::TestUtil::test_parse_json",
"tests/test_utils.py::TestUtil::test_parse_qsd",
"tests/test_utils.py::TestUtil::test_parse_xml",
"tests/test_utils.py::TestUtil::test_parse_xml_entities_fail",
"tests/test_utils.py::TestUtil::test_parse_xml_fail",
"tests/test_utils.py::TestUtil::test_parse_xml_ns",
"tests/test_utils.py::TestUtil::test_parse_xml_ns_ignore",
"tests/test_utils.py::TestUtil::test_parse_xml_validate",
"tests/test_utils.py::TestUtil::test_prepend_www",
"tests/test_utils.py::TestUtil::test_update_scheme",
"tests/test_utils.py::TestUtil::test_verifyjson"
]
| []
| BSD 2-Clause "Simplified" License | 2,336 | [
"src/streamlink/plugins/rtve.py",
"src/streamlink/utils/__init__.py"
]
| [
"src/streamlink/plugins/rtve.py",
"src/streamlink/utils/__init__.py"
]
|
|
zopefoundation__ZEO-109 | 711232fff3ab6964fbdd45f2a742e04a12d13be7 | 2018-03-27 12:56:49 | 5efce5d6821ac2455f37a425de8b377493d71101 | diff --git a/.travis.yml b/.travis.yml
index 1e68dbcc..dd7e38ca 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,11 +5,13 @@ matrix:
- os: linux
python: 2.7
- os: linux
- python: pypy-5.6.0
+ python: pypy
- os: linux
python: 3.4
- os: linux
python: 3.5
+ - os: linux
+ python: 3.6
- os: linux
python: 3.4
env: ZEO_MTACCEPTOR=1
@@ -35,6 +37,6 @@ cache:
directories:
- eggs
script:
- - bin/test -v1j99
+ - bin/test -vv -j99
notifications:
email: false
diff --git a/CHANGES.rst b/CHANGES.rst
index 5b88339c..1ebb5b70 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -4,7 +4,9 @@ Changelog
5.1.2 (unreleased)
------------------
-- Nothing changed yet.
+- Allow ``zodbpickle.binary`` to be used in RPC requests, which is
+ necessary for compatibility with ZODB 5.4.0 on Python 2. See `issue
+ 107 <https://github.com/zopefoundation/ZEO/issues/107>`_.
5.1.1 (2017-12-18)
diff --git a/src/ZEO/asyncio/client.py b/src/ZEO/asyncio/client.py
index 345517bb..49b8c9b4 100644
--- a/src/ZEO/asyncio/client.py
+++ b/src/ZEO/asyncio/client.py
@@ -122,7 +122,7 @@ class Protocol(base.Protocol):
cr = self.loop.create_unix_connection(
self.protocol_factory, self.addr, ssl=self.ssl)
- self._connecting = cr = asyncio.async(cr, loop=self.loop)
+ self._connecting = cr = asyncio.ensure_future(cr, loop=self.loop)
@cr.add_done_callback
def done_connecting(future):
diff --git a/src/ZEO/asyncio/marshal.py b/src/ZEO/asyncio/marshal.py
index e3f06f10..680a8456 100644
--- a/src/ZEO/asyncio/marshal.py
+++ b/src/ZEO/asyncio/marshal.py
@@ -156,9 +156,10 @@ def find_global(module, name):
def server_find_global(module, name):
"""Helper for message unpickler"""
+ if module not in ('ZopeUndo.Prefix', 'copy_reg', '__builtin__', 'zodbpickle'):
+ raise ImportError("Module not allowed: %s" % (module,))
+
try:
- if module not in ('ZopeUndo.Prefix', 'copy_reg', '__builtin__'):
- raise ImportError
m = __import__(module, _globals, _globals, _silly)
except ImportError as msg:
raise ImportError("import error %s: %s" % (module, msg))
diff --git a/src/ZEO/asyncio/mtacceptor.py b/src/ZEO/asyncio/mtacceptor.py
index 4b189c2f..a08b8691 100644
--- a/src/ZEO/asyncio/mtacceptor.py
+++ b/src/ZEO/asyncio/mtacceptor.py
@@ -191,7 +191,7 @@ class Acceptor(asyncore.dispatcher):
server_hostname=''
)
- asyncio.async(cr, loop=loop)
+ asyncio.ensure_future(cr, loop=loop)
loop.run_forever()
loop.close()
diff --git a/src/ZEO/asyncio/server.py b/src/ZEO/asyncio/server.py
index 7f5ed8a6..a2c8f6e7 100644
--- a/src/ZEO/asyncio/server.py
+++ b/src/ZEO/asyncio/server.py
@@ -152,7 +152,7 @@ assert best_protocol_version in ServerProtocol.protocols
def new_connection(loop, addr, socket, zeo_storage, msgpack):
protocol = ServerProtocol(loop, addr, zeo_storage, msgpack)
cr = loop.create_connection((lambda : protocol), sock=socket)
- asyncio.async(cr, loop=loop)
+ asyncio.ensure_future(cr, loop=loop)
class Delay(object):
"""Used to delay response to client for synchronous calls.
@@ -231,7 +231,7 @@ class Acceptor(object):
else:
cr = loop.create_unix_server(self.factory, addr, ssl=ssl)
- f = asyncio.async(cr, loop=loop)
+ f = asyncio.ensure_future(cr, loop=loop)
server = loop.run_until_complete(f)
self.server = server
@@ -271,7 +271,7 @@ class Acceptor(object):
self.server.close()
- f = asyncio.async(self.server.wait_closed(), loop=loop)
+ f = asyncio.ensure_future(self.server.wait_closed(), loop=loop)
@f.add_done_callback
def server_closed(f):
# stop the loop when the server closes:
| UnboundLocalError: local variable
We started seeing the error below today. We are running w/ ZODB 5.4.0
Thx
Carlos
```
2018-03-26T11:27:54 (/Users/cutz/Documents/NextThought/nti.dataserver-buildout/var/zeosocket) disconnected
2018-03-26T11:27:54 Connected server protocol
2018-03-26T11:27:55 received handshake 'Z5'
2018-03-26T11:27:55 can't decode message: '((ccopy_reg\n_reconstructor\n(czodbpickle\nbinary\nc__b...'
2018-03-26T11:27:55 Can't deserialize message
Traceback (most recent call last):
File "/Users/cutz/Documents/NextThought/nti.dataserver-buildout/eggs/ZEO-5.1.1-py2.7.egg/ZEO/asyncio/server.py", line 89, in message_received
message_id, async, name, args = self.decode(message)
File "/Users/cutz/Documents/NextThought/nti.dataserver-buildout/eggs/ZEO-5.1.1-py2.7.egg/ZEO/asyncio/marshal.py", line 114, in pickle_server_decode
return unpickler.load() # msgid, flags, name, args
File "/Users/cutz/Documents/NextThought/nti.dataserver-buildout/eggs/ZEO-5.1.1-py2.7.egg/ZEO/asyncio/marshal.py", line 164, in server_find_global
raise ImportError("import error %s: %s" % (module, msg))
ImportError: import error zodbpickle:
2018-03-26T11:27:55 data_received 4 0 True
Traceback (most recent call last):
File "/Users/cutz/Documents/NextThought/nti.dataserver-buildout/eggs/ZEO-5.1.1-py2.7.egg/ZEO/asyncio/base.py", line 128, in data_received
self.message_received(collected)
File "/Users/cutz/Documents/NextThought/nti.dataserver-buildout/eggs/ZEO-5.1.1-py2.7.egg/ZEO/asyncio/server.py", line 94, in message_received
if message_id == -1:
UnboundLocalError: local variable 'message_id' referenced before assignment
``` | zopefoundation/ZEO | diff --git a/src/ZEO/asyncio/tests.py b/src/ZEO/asyncio/tests.py
index 02eac779..f8fa2ee7 100644
--- a/src/ZEO/asyncio/tests.py
+++ b/src/ZEO/asyncio/tests.py
@@ -180,7 +180,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# Now we're connected, the cache was initialized, and the
# queued message has been sent:
- self.assert_(client.connected.done())
+ self.assertTrue(client.connected.done())
self.assertEqual(cache.getLastTid(), 'a'*8)
self.assertEqual(self.pop(), (4, False, 'foo', (1, 2)))
@@ -192,7 +192,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# Now we can make async calls:
f2 = self.async('bar', 3, 4)
- self.assert_(f2.done() and f2.exception() is None)
+ self.assertTrue(f2.done() and f2.exception() is None)
self.assertEqual(self.pop(), (0, True, 'bar', (3, 4)))
# Loading objects gets special handling to leverage the cache.
@@ -289,8 +289,8 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self.assertEqual(f1.exception().args, (exc,))
# Because we reconnected, a new protocol and transport were created:
- self.assert_(protocol is not loop.protocol)
- self.assert_(transport is not loop.transport)
+ self.assertTrue(protocol is not loop.protocol)
+ self.assertTrue(transport is not loop.transport)
protocol = loop.protocol
transport = loop.transport
@@ -313,7 +313,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# Because the server tid matches the cache tid, we're done connecting
wrapper.notify_connected.assert_called_with(client, {'length': 42})
- self.assert_(client.connected.done() and not transport.data)
+ self.assertTrue(client.connected.done() and not transport.data)
self.assertEqual(cache.getLastTid(), b'e'*8)
# Because we were able to update the cache, we didn't have to
@@ -322,7 +322,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# The close method closes the connection and cache:
client.close()
- self.assert_(transport.closed and cache.closed)
+ self.assertTrue(transport.closed and cache.closed)
# The client doesn't reconnect
self.assertEqual(loop.protocol, protocol)
@@ -351,7 +351,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self.respond(4, dict(length=42))
# Now that verification is done, we're done connecting
- self.assert_(client.connected.done() and not transport.data)
+ self.assertTrue(client.connected.done() and not transport.data)
self.assertEqual(cache.getLastTid(), b'e'*8)
# And the cache has been updated:
@@ -388,7 +388,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self.respond(4, dict(length=42))
# Now that verification is done, we're done connecting
- self.assert_(client.connected.done() and not transport.data)
+ self.assertTrue(client.connected.done() and not transport.data)
self.assertEqual(cache.getLastTid(), b'e'*8)
# But the cache is now empty and we invalidated the database cache
@@ -402,7 +402,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
addrs, ())
# We haven't connected yet
- self.assert_(protocol is None and transport is None)
+ self.assertTrue(protocol is None and transport is None)
# There are 2 connection attempts outstanding:
self.assertEqual(sorted(loop.connecting), addrs)
@@ -413,7 +413,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# The failed connection is attempted in the future:
delay, func, args, _ = loop.later.pop(0)
- self.assert_(1 <= delay <= 2)
+ self.assertTrue(1 <= delay <= 2)
func(*args)
self.assertEqual(sorted(loop.connecting), addrs)
@@ -447,7 +447,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self.pop()
self.assertFalse(client.connected.done() or transport.data)
delay, func, args, _ = loop.later.pop(1) # first in later is heartbeat
- self.assert_(8 < delay < 10)
+ self.assertTrue(8 < delay < 10)
self.assertEqual(len(loop.later), 1) # first in later is heartbeat
func(*args) # connect again
self.assertFalse(protocol is loop.protocol)
@@ -461,8 +461,8 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self.pop(4)
self.assertEqual(self.pop(), (3, False, 'get_info', ()))
self.respond(3, dict(length=42))
- self.assert_(client.connected.done() and not transport.data)
- self.assert_(client.ready)
+ self.assertTrue(client.connected.done() and not transport.data)
+ self.assertTrue(client.ready)
def test_readonly_fallback(self):
addrs = [('1.2.3.4', 8200), ('2.2.3.4', 8200)]
@@ -493,7 +493,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# At this point, the client is ready and using the protocol,
# and the protocol is read-only:
- self.assert_(client.ready)
+ self.assertTrue(client.ready)
self.assertEqual(client.protocol, protocol)
self.assertEqual(protocol.read_only, True)
connected = client.connected
@@ -502,7 +502,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self.assertEqual(self.pop(), (4, False, 'get_info', ()))
self.respond(4, dict(length=42))
- self.assert_(connected.done())
+ self.assertTrue(connected.done())
# We connect the second address:
loop.connect_connecting(addrs[1])
@@ -527,7 +527,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self.assertFalse(client.protocol is protocol)
self.assertEqual(client.protocol, loop.protocol)
self.assertEqual(protocol.closed, True)
- self.assert_(client.connected is not connected)
+ self.assertTrue(client.connected is not connected)
self.assertFalse(client.connected.done())
protocol, transport = loop.protocol, loop.transport
self.assertEqual(protocol.read_only, False)
@@ -535,8 +535,8 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# Now, we finish verification
self.respond(2, 'b'*8)
self.respond(3, dict(length=42))
- self.assert_(client.ready)
- self.assert_(client.connected.done())
+ self.assertTrue(client.ready)
+ self.assertTrue(client.connected.done())
def test_invalidations_while_verifying(self):
# While we're verifying, invalidations are ignored
@@ -553,8 +553,8 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# We'll disconnect:
protocol.connection_lost(Exception("lost"))
- self.assert_(protocol is not loop.protocol)
- self.assert_(transport is not loop.transport)
+ self.assertTrue(protocol is not loop.protocol)
+ self.assertTrue(transport is not loop.transport)
protocol = loop.protocol
transport = loop.transport
@@ -606,7 +606,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
with mock.patch("ZEO.asyncio.client.logger.error") as error:
self.assertFalse(error.called)
protocol.data_received(sized(self.enc + b'200'))
- self.assert_(isinstance(error.call_args[0][1], ProtocolError))
+ self.assertTrue(isinstance(error.call_args[0][1], ProtocolError))
def test_get_peername(self):
diff --git a/src/ZEO/tests/ConnectionTests.py b/src/ZEO/tests/ConnectionTests.py
index 901353cf..7d0d4dae 100644
--- a/src/ZEO/tests/ConnectionTests.py
+++ b/src/ZEO/tests/ConnectionTests.py
@@ -266,7 +266,7 @@ class ConnectionTests(CommonSetupTearDown):
self.startServer(create=0, index=0, ro_svr=1)
# Start a read-only-fallback client
self._storage = self.openClientStorage(read_only_fallback=1)
- self.assert_(self._storage.isReadOnly())
+ self.assertTrue(self._storage.isReadOnly())
# Stores should fail here
self.assertRaises(ReadOnlyError, self._dostore)
self._storage.close()
@@ -493,7 +493,7 @@ class ConnectionTests(CommonSetupTearDown):
# Wait for all threads to finish
for t in threads:
t.join(60)
- self.failIf(t.isAlive(), "%s didn't die" % t.getName())
+ self.assertFalse(t.isAlive(), "%s didn't die" % t.getName())
finally:
for t in threads:
t.closeclients()
@@ -949,7 +949,7 @@ class ReconnectionTests(CommonSetupTearDown):
break
except ClientDisconnected:
time.sleep(0.5)
- self.assert_(did_a_store)
+ self.assertTrue(did_a_store)
self._storage.close()
class TimeoutTests(CommonSetupTearDown):
@@ -971,7 +971,7 @@ class TimeoutTests(CommonSetupTearDown):
):
break
else:
- self.assert_(False, 'bad logging')
+ self.assertTrue(False, 'bad logging')
storage.close()
@@ -993,7 +993,7 @@ class TimeoutTests(CommonSetupTearDown):
def checkTimeoutAfterVote(self):
self._storage = storage = self.openClientStorage()
# Assert that the zeo cache is empty
- self.assert_(not list(storage._cache.contents()))
+ self.assertTrue(not list(storage._cache.contents()))
# Create the object
oid = storage.new_oid()
obj = MinPO(7)
@@ -1005,17 +1005,17 @@ class TimeoutTests(CommonSetupTearDown):
storage.tpc_vote(t)
# Now sleep long enough for the storage to time out
time.sleep(3)
- self.assert_(
+ self.assertTrue(
(not storage.is_connected())
or
(storage.connection_count_for_tests > old_connection_count)
)
storage._wait()
- self.assert_(storage.is_connected())
+ self.assertTrue(storage.is_connected())
# We expect finish to fail
self.assertRaises(ClientDisconnected, storage.tpc_finish, t)
# The cache should still be empty
- self.assert_(not list(storage._cache.contents()))
+ self.assertTrue(not list(storage._cache.contents()))
# Load should fail since the object should not be in either the cache
# or the server.
self.assertRaises(KeyError, storage.load, oid, '')
@@ -1079,10 +1079,10 @@ class MSTThread(threading.Thread):
for c in clients:
# Check that we got serials for all oids
for oid in c.__oids:
- testcase.failUnless(oid in c.__serials)
+ testcase.assertIn(oid, c.__serials)
# Check that we got serials for no other oids
for oid in c.__serials.keys():
- testcase.failUnless(oid in c.__oids)
+ testcase.assertIn(oid, c.__oids)
def closeclients(self):
# Close clients opened by run()
@@ -1102,7 +1102,8 @@ def short_timeout(self):
# Run IPv6 tests if V6 sockets are supported
try:
- socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
+ pass
except (socket.error, AttributeError):
pass
else:
diff --git a/src/ZEO/tests/IterationTests.py b/src/ZEO/tests/IterationTests.py
index 90816db3..74056dc7 100644
--- a/src/ZEO/tests/IterationTests.py
+++ b/src/ZEO/tests/IterationTests.py
@@ -33,7 +33,7 @@ class IterationTests(object):
# make sure there's no race conditions cleaning out the weak refs
gc.disable()
try:
- self.assertEquals(0, len(self._storage._iterator_ids))
+ self.assertEqual(0, len(self._storage._iterator_ids))
except AssertionError:
# Ok, we have ids. That should also mean that the
# weak dictionary has the same length.
@@ -50,7 +50,7 @@ class IterationTests(object):
self.assertEqual(len(self._storage._iterators),
len(self._storage._iterator_ids))
- self.assertEquals(0, len(self._storage._iterator_ids))
+ self.assertEqual(0, len(self._storage._iterator_ids))
finally:
if gc_enabled:
gc.enable()
@@ -63,7 +63,7 @@ class IterationTests(object):
iid = server.iterator_start(None, None)
# None signals the end of iteration.
- self.assertEquals(None, server.iterator_next(iid))
+ self.assertEqual(None, server.iterator_next(iid))
# The server has disposed the iterator already.
self.assertRaises(KeyError, server.iterator_next, iid)
@@ -80,10 +80,10 @@ class IterationTests(object):
# At this point, a wrapping iterator might not have called the CS
# iterator yet. We'll consume one item to make sure this happens.
six.advance_iterator(iterator)
- self.assertEquals(1, len(self._storage._iterator_ids))
+ self.assertEqual(1, len(self._storage._iterator_ids))
iid = list(self._storage._iterator_ids)[0]
- self.assertEquals([], list(iterator))
- self.assertEquals(0, len(self._storage._iterator_ids))
+ self.assertEqual([], list(iterator))
+ self.assertEqual(0, len(self._storage._iterator_ids))
# The iterator has run through, so the server has already disposed it.
self.assertRaises(KeyError, self._storage._call, 'iterator_next', iid)
@@ -98,7 +98,7 @@ class IterationTests(object):
# don't see the transaction we just wrote being picked up, because
# iterators only see the state from the point in time when they were
# created.)
- self.assert_(list(iterator))
+ self.assertTrue(list(iterator))
def checkIteratorGCStorageCommitting(self):
# We want the iterator to be garbage-collected, so we don't keep any
@@ -111,7 +111,7 @@ class IterationTests(object):
self._dostore()
six.advance_iterator(self._storage.iterator())
- self.assertEquals(1, len(self._storage._iterator_ids))
+ self.assertEqual(1, len(self._storage._iterator_ids))
iid = list(self._storage._iterator_ids)[0]
# GC happens at the transaction boundary. After that, both the storage
@@ -154,7 +154,7 @@ class IterationTests(object):
# as well. I'm calling this directly to avoid accidentally
# calling tpc_abort implicitly.
self._storage.notify_disconnected()
- self.assertEquals(0, len(self._storage._iterator_ids))
+ self.assertEqual(0, len(self._storage._iterator_ids))
def checkIteratorParallel(self):
self._dostore()
@@ -163,10 +163,10 @@ class IterationTests(object):
iter2 = self._storage.iterator()
txn_info1 = six.advance_iterator(iter1)
txn_info2 = six.advance_iterator(iter2)
- self.assertEquals(txn_info1.tid, txn_info2.tid)
+ self.assertEqual(txn_info1.tid, txn_info2.tid)
txn_info1 = six.advance_iterator(iter1)
txn_info2 = six.advance_iterator(iter2)
- self.assertEquals(txn_info1.tid, txn_info2.tid)
+ self.assertEqual(txn_info1.tid, txn_info2.tid)
self.assertRaises(StopIteration, next, iter1)
self.assertRaises(StopIteration, next, iter2)
diff --git a/src/ZEO/tests/ThreadTests.py b/src/ZEO/tests/ThreadTests.py
index 154441a3..df4a5ad0 100644
--- a/src/ZEO/tests/ThreadTests.py
+++ b/src/ZEO/tests/ThreadTests.py
@@ -119,7 +119,7 @@ class ThreadTests(object):
for t in threads:
t.join(30)
for i in threads:
- self.failUnless(not t.isAlive())
+ self.assertFalse(t.isAlive())
# Helper for checkMTStores
def mtstorehelper(self):
diff --git a/src/ZEO/tests/testConversionSupport.py b/src/ZEO/tests/testConversionSupport.py
index 8cf549cb..4200ebb5 100644
--- a/src/ZEO/tests/testConversionSupport.py
+++ b/src/ZEO/tests/testConversionSupport.py
@@ -122,6 +122,9 @@ First, fake out the connection manager so we can make a connection:
... next = None
...
... return oid, oid*8, 'data ' + oid, next
+ ...
+ ... def close(self):
+ ... pass
>>> client = ZEO.client(
... '', wait=False, _client_factory=Client)
@@ -138,6 +141,7 @@ Now we'll have our way with it's private _server attr:
2
3
4
+ >>> client.close()
"""
diff --git a/src/ZEO/tests/testTransactionBuffer.py b/src/ZEO/tests/testTransactionBuffer.py
index 2ca46807..bac5fe5a 100644
--- a/src/ZEO/tests/testTransactionBuffer.py
+++ b/src/ZEO/tests/testTransactionBuffer.py
@@ -51,6 +51,7 @@ class TransBufTests(unittest.TestCase):
for i, (oid, d, resolved) in enumerate(tbuf):
self.assertEqual((oid, d), data[i][0])
self.assertEqual(resolved, data[i][1])
+ tbuf.close()
def test_suite():
return unittest.makeSuite(TransBufTests, 'check')
diff --git a/src/ZEO/tests/testZEO.py b/src/ZEO/tests/testZEO.py
index 9bba0b99..3296263c 100644
--- a/src/ZEO/tests/testZEO.py
+++ b/src/ZEO/tests/testZEO.py
@@ -221,15 +221,15 @@ class MiscZEOTests(object):
# available right after successful connection, this is required now.
addr = self._storage._addr
storage2 = ClientStorage(addr, **self._client_options())
- self.assert_(storage2.is_connected())
- self.assertEquals(ZODB.utils.z64, storage2.lastTransaction())
+ self.assertTrue(storage2.is_connected())
+ self.assertEqual(ZODB.utils.z64, storage2.lastTransaction())
storage2.close()
self._dostore()
storage3 = ClientStorage(addr, **self._client_options())
- self.assert_(storage3.is_connected())
- self.assertEquals(8, len(storage3.lastTransaction()))
- self.assertNotEquals(ZODB.utils.z64, storage3.lastTransaction())
+ self.assertTrue(storage3.is_connected())
+ self.assertEqual(8, len(storage3.lastTransaction()))
+ self.assertNotEqual(ZODB.utils.z64, storage3.lastTransaction())
storage3.close()
class GenericTestBase(
@@ -422,12 +422,12 @@ class FileStorageTests(FullGenericTests):
# ClientStorage itself doesn't implement IStorageIteration, but the
# FileStorage on the other end does, and thus the ClientStorage
# instance that is connected to it reflects this.
- self.failIf(ZODB.interfaces.IStorageIteration.implementedBy(
+ self.assertFalse(ZODB.interfaces.IStorageIteration.implementedBy(
ZEO.ClientStorage.ClientStorage))
- self.failUnless(ZODB.interfaces.IStorageIteration.providedBy(
+ self.assertTrue(ZODB.interfaces.IStorageIteration.providedBy(
self._storage))
# This is communicated using ClientStorage's _info object:
- self.assertEquals(self._expected_interfaces,
+ self.assertEqual(self._expected_interfaces,
self._storage._info['interfaces']
)
@@ -552,7 +552,7 @@ class ZRPCConnectionTests(ZEO.tests.ConnectionTests.CommonSetupTearDown):
log = str(handler)
handler.uninstall()
- self.assert_("Client loop stopped unexpectedly" in log)
+ self.assertTrue("Client loop stopped unexpectedly" in log)
def checkExceptionLogsAtError(self):
# Test the exceptions are logged at error
@@ -570,7 +570,7 @@ class ZRPCConnectionTests(ZEO.tests.ConnectionTests.CommonSetupTearDown):
self.assertRaises(ZODB.POSException.POSKeyError,
self._storage.history, None, None)
handler.uninstall()
- self.assertEquals(str(handler), '')
+ self.assertEqual(str(handler), '')
def checkConnectionInvalidationOnReconnect(self):
@@ -639,7 +639,7 @@ class CommonBlobTests(object):
tfname = bd_fh.name
oid = self._storage.new_oid()
data = zodb_pickle(blob)
- self.assert_(os.path.exists(tfname))
+ self.assertTrue(os.path.exists(tfname))
t = TransactionMetaData()
try:
@@ -650,9 +650,9 @@ class CommonBlobTests(object):
except:
self._storage.tpc_abort(t)
raise
- self.assert_(not os.path.exists(tfname))
+ self.assertTrue(not os.path.exists(tfname))
filename = self._storage.fshelper.getBlobFilename(oid, revid)
- self.assert_(os.path.exists(filename))
+ self.assertTrue(os.path.exists(filename))
with open(filename, 'rb') as f:
self.assertEqual(somedata, f.read())
@@ -693,11 +693,11 @@ class CommonBlobTests(object):
filename = self._storage.loadBlob(oid, serial)
with open(filename, 'rb') as f:
self.assertEqual(somedata, f.read())
- self.assert_(not(os.stat(filename).st_mode & stat.S_IWRITE))
- self.assert_((os.stat(filename).st_mode & stat.S_IREAD))
+ self.assertTrue(not(os.stat(filename).st_mode & stat.S_IWRITE))
+ self.assertTrue((os.stat(filename).st_mode & stat.S_IREAD))
def checkTemporaryDirectory(self):
- self.assertEquals(os.path.join(self.blob_cache_dir, 'tmp'),
+ self.assertEqual(os.path.join(self.blob_cache_dir, 'tmp'),
self._storage.temporaryDirectory())
def checkTransactionBufferCleanup(self):
@@ -726,14 +726,14 @@ class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests):
somedata.write(("%s\n" % i).encode('ascii'))
def check_data(path):
- self.assert_(os.path.exists(path))
- f = open(path, 'rb')
+ self.assertTrue(os.path.exists(path))
somedata.seek(0)
d1 = d2 = 1
- while d1 or d2:
- d1 = f.read(8096)
- d2 = somedata.read(8096)
- self.assertEqual(d1, d2)
+ with open(path, 'rb') as f:
+ while d1 or d2:
+ d1 = f.read(8096)
+ d2 = somedata.read(8096)
+ self.assertEqual(d1, d2)
somedata.seek(0)
blob = Blob()
@@ -743,7 +743,7 @@ class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests):
tfname = bd_fh.name
oid = self._storage.new_oid()
data = zodb_pickle(blob)
- self.assert_(os.path.exists(tfname))
+ self.assertTrue(os.path.exists(tfname))
t = TransactionMetaData()
try:
@@ -756,7 +756,7 @@ class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests):
raise
# The uncommitted data file should have been removed
- self.assert_(not os.path.exists(tfname))
+ self.assertTrue(not os.path.exists(tfname))
# The file should be in the cache ...
filename = self._storage.fshelper.getBlobFilename(oid, revid)
@@ -768,7 +768,7 @@ class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests):
ZODB.blob.BushyLayout().getBlobFilePath(oid, revid),
)
- self.assert_(server_filename.startswith(self.blobdir))
+ self.assertTrue(server_filename.startswith(self.blobdir))
check_data(server_filename)
# If we remove it from the cache and call loadBlob, it should
@@ -1203,7 +1203,7 @@ def runzeo_without_configfile():
... ''' % sys.path)
>>> import subprocess, re
- >>> print(re.sub(b'\d\d+|[:]', b'', subprocess.Popen(
+ >>> print(re.sub(br'\d\d+|[:]', b'', subprocess.Popen(
... [sys.executable, 'runzeo', '-a:0', '-ft', '--test'],
... stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
... ).stdout.read()).decode('ascii'))
diff --git a/src/ZEO/tests/testZEO2.py b/src/ZEO/tests/testZEO2.py
index e4b37946..5f095869 100644
--- a/src/ZEO/tests/testZEO2.py
+++ b/src/ZEO/tests/testZEO2.py
@@ -149,6 +149,7 @@ We can start another client and get the storage lock.
>>> zs1.tpc_finish('1').set_sender(0, zs1.connection)
>>> fs.close()
+ >>> server.close()
"""
def errors_in_vote_should_clear_lock():
@@ -408,6 +409,7 @@ If clients disconnect while waiting, they will be dequeued:
>>> logging.getLogger('ZEO').setLevel(logging.NOTSET)
>>> logging.getLogger('ZEO').removeHandler(handler)
+ >>> server.close()
"""
def lock_sanity_check():
@@ -489,6 +491,8 @@ ZEOStorage as closed and see if trying to get a lock cleans it up:
>>> logging.getLogger('ZEO').setLevel(logging.NOTSET)
>>> logging.getLogger('ZEO').removeHandler(handler)
+
+ >>> server.close()
"""
def test_suite():
@@ -507,4 +511,3 @@ def test_suite():
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
-
diff --git a/src/ZEO/tests/test_cache.py b/src/ZEO/tests/test_cache.py
index 2d8fe161..922f214b 100644
--- a/src/ZEO/tests/test_cache.py
+++ b/src/ZEO/tests/test_cache.py
@@ -141,12 +141,12 @@ class CacheTests(ZODB.tests.util.TestCase):
for i in range(50):
n = p64(i)
cache.store(n, n, None, data[i])
- self.assertEquals(len(cache), i + 1)
+ self.assertEqual(len(cache), i + 1)
# The cache is now almost full. The next insert
# should delete some objects.
n = p64(50)
cache.store(n, n, None, data[51])
- self.assert_(len(cache) < 51)
+ self.assertTrue(len(cache) < 51)
# TODO: Need to make sure eviction of non-current data
# are handled correctly.
@@ -174,41 +174,44 @@ class CacheTests(ZODB.tests.util.TestCase):
eq(dict([(k, dict(v)) for (k, v) in copy.noncurrent.items()]),
dict([(k, dict(v)) for (k, v) in self.cache.noncurrent.items()]),
)
+ copy.close()
def testCurrentObjectLargerThanCache(self):
if self.cache.path:
os.remove(self.cache.path)
+ self.cache.close()
self.cache = ZEO.cache.ClientCache(size=50)
# We store an object that is a bit larger than the cache can handle.
self.cache.store(n1, n2, None, "x"*64)
# We can see that it was not stored.
- self.assertEquals(None, self.cache.load(n1))
+ self.assertEqual(None, self.cache.load(n1))
# If an object cannot be stored in the cache, it must not be
# recorded as current.
- self.assert_(n1 not in self.cache.current)
+ self.assertTrue(n1 not in self.cache.current)
# Regression test: invalidation must still work.
self.cache.invalidate(n1, n2)
def testOldObjectLargerThanCache(self):
if self.cache.path:
os.remove(self.cache.path)
+ self.cache.close()
cache = ZEO.cache.ClientCache(size=50)
# We store an object that is a bit larger than the cache can handle.
cache.store(n1, n2, n3, "x"*64)
# We can see that it was not stored.
- self.assertEquals(None, cache.load(n1))
+ self.assertEqual(None, cache.load(n1))
# If an object cannot be stored in the cache, it must not be
# recorded as non-current.
- self.assert_(1 not in cache.noncurrent)
+ self.assertTrue(1 not in cache.noncurrent)
def testVeryLargeCaches(self):
cache = ZEO.cache.ClientCache('cache', size=(1<<32)+(1<<20))
cache.store(n1, n2, None, b"x")
cache.close()
cache = ZEO.cache.ClientCache('cache', size=(1<<33)+(1<<20))
- self.assertEquals(cache.load(n1), (b'x', n2))
+ self.assertEqual(cache.load(n1), (b'x', n2))
cache.close()
def testConversionOfLargeFreeBlocks(self):
@@ -225,8 +228,8 @@ class CacheTests(ZODB.tests.util.TestCase):
cache.close()
with open('cache', 'rb') as f:
f.seek(12)
- self.assertEquals(f.read(1), b'f')
- self.assertEquals(struct.unpack(">I", f.read(4))[0],
+ self.assertEqual(f.read(1), b'f')
+ self.assertEqual(struct.unpack(">I", f.read(4))[0],
ZEO.cache.max_block_size)
if not sys.platform.startswith('linux'):
@@ -261,8 +264,8 @@ class CacheTests(ZODB.tests.util.TestCase):
'cache', size=ZEO.cache.ZEC_HEADER_SIZE+100*recsize+extra)
for i in range(100):
cache.store(p64(i), n1, None, data)
- self.assertEquals(len(cache), 100)
- self.assertEquals(os.path.getsize(
+ self.assertEqual(len(cache), 100)
+ self.assertEqual(os.path.getsize(
'cache'), ZEO.cache.ZEC_HEADER_SIZE+100*recsize+extra)
# Now make it smaller
@@ -270,10 +273,10 @@ class CacheTests(ZODB.tests.util.TestCase):
small = 50
cache = ZEO.cache.ClientCache(
'cache', size=ZEO.cache.ZEC_HEADER_SIZE+small*recsize+extra)
- self.assertEquals(len(cache), small)
- self.assertEquals(os.path.getsize(
+ self.assertEqual(len(cache), small)
+ self.assertEqual(os.path.getsize(
'cache'), ZEO.cache.ZEC_HEADER_SIZE+small*recsize+extra)
- self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()),
+ self.assertEqual(set(u64(oid) for (oid, tid) in cache.contents()),
set(range(small)))
for i in range(100, 110):
cache.store(p64(i), n1, None, data)
@@ -282,9 +285,9 @@ class CacheTests(ZODB.tests.util.TestCase):
# evicted because of the optimization to assure that we
# always get a free block after a new allocated block.
expected_len = small - 1
- self.assertEquals(len(cache), expected_len)
+ self.assertEqual(len(cache), expected_len)
expected_oids = set(list(range(11, 50))+list(range(100, 110)))
- self.assertEquals(
+ self.assertEqual(
set(u64(oid) for (oid, tid) in cache.contents()),
expected_oids)
@@ -292,8 +295,8 @@ class CacheTests(ZODB.tests.util.TestCase):
cache.close()
cache = ZEO.cache.ClientCache(
'cache', size=ZEO.cache.ZEC_HEADER_SIZE+small*recsize+extra)
- self.assertEquals(len(cache), expected_len)
- self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()),
+ self.assertEqual(len(cache), expected_len)
+ self.assertEqual(set(u64(oid) for (oid, tid) in cache.contents()),
expected_oids)
# Now make it bigger
@@ -301,10 +304,10 @@ class CacheTests(ZODB.tests.util.TestCase):
large = 150
cache = ZEO.cache.ClientCache(
'cache', size=ZEO.cache.ZEC_HEADER_SIZE+large*recsize+extra)
- self.assertEquals(len(cache), expected_len)
- self.assertEquals(os.path.getsize(
+ self.assertEqual(len(cache), expected_len)
+ self.assertEqual(os.path.getsize(
'cache'), ZEO.cache.ZEC_HEADER_SIZE+large*recsize+extra)
- self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()),
+ self.assertEqual(set(u64(oid) for (oid, tid) in cache.contents()),
expected_oids)
@@ -313,19 +316,19 @@ class CacheTests(ZODB.tests.util.TestCase):
# We use large-2 for the same reason we used small-1 above.
expected_len = large-2
- self.assertEquals(len(cache), expected_len)
+ self.assertEqual(len(cache), expected_len)
expected_oids = set(list(range(11, 50)) +
list(range(106, 110)) +
list(range(200, 305)))
- self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()),
+ self.assertEqual(set(u64(oid) for (oid, tid) in cache.contents()),
expected_oids)
# Make sure we can reopen with same size
cache.close()
cache = ZEO.cache.ClientCache(
'cache', size=ZEO.cache.ZEC_HEADER_SIZE+large*recsize+extra)
- self.assertEquals(len(cache), expected_len)
- self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()),
+ self.assertEqual(len(cache), expected_len)
+ self.assertEqual(set(u64(oid) for (oid, tid) in cache.contents()),
expected_oids)
# Cleanup
diff --git a/src/ZEO/tests/testssl.py b/src/ZEO/tests/testssl.py
index 70500232..b71af111 100644
--- a/src/ZEO/tests/testssl.py
+++ b/src/ZEO/tests/testssl.py
@@ -118,7 +118,7 @@ class SSLConfigTest(ZEOConfigTestBase):
stop()
@unittest.skipIf(forker.ZEO4_SERVER, "ZEO4 servers don't support SSL")
[email protected](('asyncio' if PY3 else 'trollius') + '.async')
[email protected](('asyncio' if PY3 else 'trollius') + '.ensure_future')
@mock.patch(('asyncio' if PY3 else 'trollius') + '.set_event_loop')
@mock.patch(('asyncio' if PY3 else 'trollius') + '.new_event_loop')
@mock.patch('ZEO.asyncio.client.new_event_loop')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 6
} | 5.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"zope-testrunner",
"manuel",
"random2",
"mock",
"msgpack-python",
"pytest"
],
"pre_install": null,
"python": "3.4",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
BTrees==4.11.3
certifi==2021.5.30
cffi==1.15.1
importlib-metadata==4.8.3
iniconfig==1.1.1
manuel==1.13.0
mock==5.2.0
msgpack-python==0.5.6
packaging==21.3
persistent==4.9.3
pluggy==1.0.0
py==1.11.0
pycparser==2.21
pyparsing==3.1.4
pytest==7.0.1
random2==1.0.2
six==1.17.0
tomli==1.2.3
transaction==3.1.0
typing_extensions==4.1.1
zc.lockfile==2.0
ZConfig==3.6.1
zdaemon==4.4
-e git+https://github.com/zopefoundation/ZEO.git@711232fff3ab6964fbdd45f2a742e04a12d13be7#egg=ZEO
zipp==3.6.0
ZODB==5.8.1
zodbpickle==2.6
zope.exceptions==4.6
zope.interface==5.5.2
zope.testing==5.0.1
zope.testrunner==5.6
| name: ZEO
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- btrees==4.11.3
- cffi==1.15.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- manuel==1.13.0
- mock==5.2.0
- msgpack-python==0.5.6
- packaging==21.3
- persistent==4.9.3
- pluggy==1.0.0
- py==1.11.0
- pycparser==2.21
- pyparsing==3.1.4
- pytest==7.0.1
- random2==1.0.2
- six==1.17.0
- tomli==1.2.3
- transaction==3.1.0
- typing-extensions==4.1.1
- zc-lockfile==2.0
- zconfig==3.6.1
- zdaemon==4.4
- zipp==3.6.0
- zodb==5.8.1
- zodbpickle==2.6
- zope-exceptions==4.6
- zope-interface==5.5.2
- zope-testing==5.0.1
- zope-testrunner==5.6
prefix: /opt/conda/envs/ZEO
| [
"src/ZEO/tests/testssl.py::SSLConfigTestMockiavellian::test_ssl_mockiavellian_server_no_ssl",
"src/ZEO/tests/testssl.py::SSLConfigTestMockiavellian::test_ssl_mockiavellian_server_ssl_auth_dir",
"src/ZEO/tests/testssl.py::SSLConfigTestMockiavellian::test_ssl_mockiavellian_server_ssl_auth_file",
"src/ZEO/tests/testssl.py::SSLConfigTestMockiavellian::test_ssl_mockiavellian_server_ssl_pw"
]
| [
"src/ZEO/tests/testssl.py::SSLConfigTest::test_ssl_basic",
"src/ZEO/tests/testssl.py::SSLConfigTest::test_ssl_hostname_check"
]
| [
"src/ZEO/asyncio/tests.py::ClientTests::testClientBasics",
"src/ZEO/asyncio/tests.py::ClientTests::test_ClientDisconnected_on_call_timeout",
"src/ZEO/asyncio/tests.py::ClientTests::test_bad_protocol",
"src/ZEO/asyncio/tests.py::ClientTests::test_bad_server_tid",
"src/ZEO/asyncio/tests.py::ClientTests::test_cache_behind",
"src/ZEO/asyncio/tests.py::ClientTests::test_cache_way_behind",
"src/ZEO/asyncio/tests.py::ClientTests::test_call_async_from_same_thread",
"src/ZEO/asyncio/tests.py::ClientTests::test_errors_in_data_received",
"src/ZEO/asyncio/tests.py::ClientTests::test_flow_control",
"src/ZEO/asyncio/tests.py::ClientTests::test_get_peername",
"src/ZEO/asyncio/tests.py::ClientTests::test_heartbeat",
"src/ZEO/asyncio/tests.py::ClientTests::test_invalidations_while_verifying",
"src/ZEO/asyncio/tests.py::ClientTests::test_multiple_addresses",
"src/ZEO/asyncio/tests.py::ClientTests::test_readonly_fallback",
"src/ZEO/asyncio/tests.py::MsgpackClientTests::testClientBasics",
"src/ZEO/asyncio/tests.py::MsgpackClientTests::test_ClientDisconnected_on_call_timeout",
"src/ZEO/asyncio/tests.py::MsgpackClientTests::test_bad_protocol",
"src/ZEO/asyncio/tests.py::MsgpackClientTests::test_bad_server_tid",
"src/ZEO/asyncio/tests.py::MsgpackClientTests::test_cache_behind",
"src/ZEO/asyncio/tests.py::MsgpackClientTests::test_cache_way_behind",
"src/ZEO/asyncio/tests.py::MsgpackClientTests::test_call_async_from_same_thread",
"src/ZEO/asyncio/tests.py::MsgpackClientTests::test_errors_in_data_received",
"src/ZEO/asyncio/tests.py::MsgpackClientTests::test_flow_control",
"src/ZEO/asyncio/tests.py::MsgpackClientTests::test_get_peername",
"src/ZEO/asyncio/tests.py::MsgpackClientTests::test_heartbeat",
"src/ZEO/asyncio/tests.py::MsgpackClientTests::test_invalidations_while_verifying",
"src/ZEO/asyncio/tests.py::MsgpackClientTests::test_multiple_addresses",
"src/ZEO/asyncio/tests.py::MsgpackClientTests::test_readonly_fallback",
"src/ZEO/asyncio/tests.py::ServerTests::testServerBasics",
"src/ZEO/asyncio/tests.py::ServerTests::test_invalid_methods",
"src/ZEO/asyncio/tests.py::MsgpackServerTests::testServerBasics",
"src/ZEO/asyncio/tests.py::MsgpackServerTests::test_invalid_methods",
"src/ZEO/asyncio/tests.py::test_suite",
"src/ZEO/tests/testConversionSupport.py::test_server_record_iternext",
"src/ZEO/tests/testConversionSupport.py::test_client_record_iternext",
"src/ZEO/tests/testConversionSupport.py::test_suite",
"src/ZEO/tests/testTransactionBuffer.py::test_suite",
"src/ZEO/tests/testZEO.py::Test_convenience_functions::test_ZEO_DB_convenience_error",
"src/ZEO/tests/testZEO.py::Test_convenience_functions::test_ZEO_DB_convenience_ok",
"src/ZEO/tests/testZEO.py::Test_convenience_functions::test_ZEO_client_convenience",
"src/ZEO/tests/testZEO.py::Test_convenience_functions::test_ZEO_connection_convenience_ok",
"src/ZEO/tests/testZEO.py::Test_convenience_functions::test_ZEO_connection_convenience_value",
"src/ZEO/tests/testZEO.py::test_server_status",
"src/ZEO/tests/testZEO.py::test_ruok",
"src/ZEO/tests/testZEO.py::test_runzeo_msgpack_support",
"src/ZEO/tests/testZEO.py::MultiprocessingTests::test_work_with_multiprocessing",
"src/ZEO/tests/testZEO.py::test_suite",
"src/ZEO/tests/testZEO2.py::test_suite",
"src/ZEO/tests/test_cache.py::CacheTests::testChangingCacheSize",
"src/ZEO/tests/test_cache.py::CacheTests::testConversionOfLargeFreeBlocks",
"src/ZEO/tests/test_cache.py::CacheTests::testCurrentObjectLargerThanCache",
"src/ZEO/tests/test_cache.py::CacheTests::testEviction",
"src/ZEO/tests/test_cache.py::CacheTests::testException",
"src/ZEO/tests/test_cache.py::CacheTests::testInvalidate",
"src/ZEO/tests/test_cache.py::CacheTests::testLastTid",
"src/ZEO/tests/test_cache.py::CacheTests::testLoad",
"src/ZEO/tests/test_cache.py::CacheTests::testNonCurrent",
"src/ZEO/tests/test_cache.py::CacheTests::testOldObjectLargerThanCache",
"src/ZEO/tests/test_cache.py::CacheTests::testSerialization",
"src/ZEO/tests/test_cache.py::CacheTests::testSetAnyLastTidOnEmptyCache",
"src/ZEO/tests/test_cache.py::CacheTests::testVeryLargeCaches",
"src/ZEO/tests/test_cache.py::CacheTests::test_clear_zeo_cache",
"src/ZEO/tests/test_cache.py::CacheTests::test_loadBefore_doesnt_miss_current",
"src/ZEO/tests/test_cache.py::test_suite",
"src/ZEO/tests/testssl.py::SSLConfigTest::test_ssl_pw",
"src/ZEO/tests/testssl.py::SSLConfigTestMockiavellian::test_ssl_mockiavellian_client_auth_dir",
"src/ZEO/tests/testssl.py::SSLConfigTestMockiavellian::test_ssl_mockiavellian_client_auth_file",
"src/ZEO/tests/testssl.py::SSLConfigTestMockiavellian::test_ssl_mockiavellian_client_check_hostname",
"src/ZEO/tests/testssl.py::SSLConfigTestMockiavellian::test_ssl_mockiavellian_client_no_ssl",
"src/ZEO/tests/testssl.py::SSLConfigTestMockiavellian::test_ssl_mockiavellian_client_pw",
"src/ZEO/tests/testssl.py::SSLConfigTestMockiavellian::test_ssl_mockiavellian_client_server_hostname",
"src/ZEO/tests/testssl.py::SSLConfigTestMockiavellian::test_ssl_mockiavellian_client_server_signed",
"src/ZEO/tests/testssl.py::SSLConfigTestMockiavellian::test_ssl_mockiavellian_server_ssl_no_auth",
"src/ZEO/tests/testssl.py::test_suite"
]
| []
| Zope Public License 2.1 | 2,337 | [
"src/ZEO/asyncio/marshal.py",
"src/ZEO/asyncio/client.py",
".travis.yml",
"src/ZEO/asyncio/server.py",
"src/ZEO/asyncio/mtacceptor.py",
"CHANGES.rst"
]
| [
"src/ZEO/asyncio/marshal.py",
"src/ZEO/asyncio/client.py",
".travis.yml",
"src/ZEO/asyncio/server.py",
"src/ZEO/asyncio/mtacceptor.py",
"CHANGES.rst"
]
|
|
zopefoundation__ZEO-111 | 23a5cf0ff2a6a35c1c3ebe3fbf6367e4e8c655a0 | 2018-03-27 14:38:18 | 5efce5d6821ac2455f37a425de8b377493d71101 | diff --git a/CHANGES.rst b/CHANGES.rst
index 0de427dd..ceb8a5b3 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -7,6 +7,10 @@ Changelog
- Fix ``ZEO.server`` relying on test dependencies. See `issue 105
<https://github.com/zopefoundation/ZEO/issues/105>`_.
+- Disallow passing strings as addresses to ClientStorage under Windows
+ because string addresses are used for unix-domain sockets, which
+ aren't supported on Windows. See `issue 107
+ <https://github.com/zopefoundation/ZEO/issues/107>`_.
5.1.2 (2018-03-27)
------------------
diff --git a/src/ZEO/ClientStorage.py b/src/ZEO/ClientStorage.py
index 21225ba8..589bbfa5 100644
--- a/src/ZEO/ClientStorage.py
+++ b/src/ZEO/ClientStorage.py
@@ -41,6 +41,7 @@ import six
from persistent.TimeStamp import TimeStamp
from ZEO._compat import get_ident
+from ZEO._compat import WIN
from ZEO.Exceptions import ClientDisconnected
from ZEO.TransactionBuffer import TransactionBuffer
from ZODB import POSException
@@ -191,6 +192,8 @@ class ClientStorage(ZODB.ConflictResolution.ConflictResolvingStorage):
self.__name__ = name or str(addr) # Standard convention for storages
if isinstance(addr, six.string_types):
+ if WIN:
+ raise ValueError("Unix sockets are not available on Windows")
addr = [addr]
elif (isinstance(addr, tuple) and len(addr) == 2 and
isinstance(addr[0], six.string_types) and isinstance(addr[1], int)):
diff --git a/src/ZEO/_compat.py b/src/ZEO/_compat.py
index 43515190..5a0d4676 100644
--- a/src/ZEO/_compat.py
+++ b/src/ZEO/_compat.py
@@ -19,6 +19,7 @@ import platform
PY3 = sys.version_info[0] >= 3
PY32 = sys.version_info[:2] == (3, 2)
PYPY = getattr(platform, 'python_implementation', lambda: None)() == 'PyPy'
+WIN = sys.platform.startswith('win')
if PY3:
from pickle import Pickler, Unpickler as _Unpickler, dump, dumps, loads
| ZEO ClientStorage wait_timeout is ignored on Windows
I was writing unit tests for zodbbrowser when I noticed that under Windows the following code never times out:
```python
from ZEO.ClientStorage import ClientStorage
storage = ClientStorage('/no/such/zeo/socket', wait_timeout=0.001, strorage='1', read_only=True)
```
Instead it hangs forever (I killed the hung Jenkins job after 19 hours).
The timeout works fine on Linux. | zopefoundation/ZEO | diff --git a/src/ZEO/tests/forker.py b/src/ZEO/tests/forker.py
index b6e146a6..df0f06a4 100644
--- a/src/ZEO/tests/forker.py
+++ b/src/ZEO/tests/forker.py
@@ -29,6 +29,7 @@ import six
import ZODB.tests.util
import zope.testing.setupstack
+from ZEO._compat import WIN
from ZEO import _forker
logger = logging.getLogger('ZEO.tests.forker')
@@ -60,7 +61,7 @@ runner = _forker.runner
stop_runner = _forker.stop_runner
start_zeo_server = _forker.start_zeo_server
-if sys.platform[:3].lower() == "win":
+if WIN:
def _quote_arg(s):
return '"%s"' % s
else:
diff --git a/src/ZEO/tests/testZEO.py b/src/ZEO/tests/testZEO.py
index 3296263c..824d91e6 100644
--- a/src/ZEO/tests/testZEO.py
+++ b/src/ZEO/tests/testZEO.py
@@ -20,6 +20,7 @@ from ZEO.ClientStorage import ClientStorage
from ZEO.tests import forker, Cache, CommitLockTests, ThreadTests
from ZEO.tests import IterationTests
from ZEO._compat import PY3
+from ZEO._compat import WIN
from ZODB.Connection import TransactionMetaData
from ZODB.tests import StorageTestBase, BasicStorage, \
@@ -1583,7 +1584,7 @@ if not os.environ.get('ZEO4_SERVER'):
>>> conn.close(); s()
"""
-if sys.platform.startswith('win'):
+if WIN:
del runzeo_logrotate_on_sigusr2
del unix_domain_sockets
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 3
} | 5.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"zope-testrunner",
"manuel",
"random2",
"mock",
"msgpack-python",
"pytest"
],
"pre_install": null,
"python": "3.4",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
BTrees==4.11.3
certifi==2021.5.30
cffi==1.15.1
importlib-metadata==4.8.3
iniconfig==1.1.1
manuel==1.13.0
mock==5.2.0
msgpack-python==0.5.6
packaging==21.3
persistent==4.9.3
pluggy==1.0.0
py==1.11.0
pycparser==2.21
pyparsing==3.1.4
pytest==7.0.1
random2==1.0.2
six==1.17.0
tomli==1.2.3
transaction==3.1.0
typing_extensions==4.1.1
zc.lockfile==2.0
ZConfig==3.6.1
zdaemon==4.4
-e git+https://github.com/zopefoundation/ZEO.git@23a5cf0ff2a6a35c1c3ebe3fbf6367e4e8c655a0#egg=ZEO
zipp==3.6.0
ZODB==5.8.1
zodbpickle==2.6
zope.exceptions==4.6
zope.interface==5.5.2
zope.testing==5.0.1
zope.testrunner==5.6
| name: ZEO
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- btrees==4.11.3
- cffi==1.15.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- manuel==1.13.0
- mock==5.2.0
- msgpack-python==0.5.6
- packaging==21.3
- persistent==4.9.3
- pluggy==1.0.0
- py==1.11.0
- pycparser==2.21
- pyparsing==3.1.4
- pytest==7.0.1
- random2==1.0.2
- six==1.17.0
- tomli==1.2.3
- transaction==3.1.0
- typing-extensions==4.1.1
- zc-lockfile==2.0
- zconfig==3.6.1
- zdaemon==4.4
- zipp==3.6.0
- zodb==5.8.1
- zodbpickle==2.6
- zope-exceptions==4.6
- zope-interface==5.5.2
- zope-testing==5.0.1
- zope-testrunner==5.6
prefix: /opt/conda/envs/ZEO
| [
"src/ZEO/tests/testZEO.py::Test_convenience_functions::test_ZEO_DB_convenience_error",
"src/ZEO/tests/testZEO.py::Test_convenience_functions::test_ZEO_DB_convenience_ok",
"src/ZEO/tests/testZEO.py::Test_convenience_functions::test_ZEO_client_convenience",
"src/ZEO/tests/testZEO.py::Test_convenience_functions::test_ZEO_connection_convenience_ok",
"src/ZEO/tests/testZEO.py::Test_convenience_functions::test_ZEO_connection_convenience_value",
"src/ZEO/tests/testZEO.py::test_server_status",
"src/ZEO/tests/testZEO.py::test_ruok",
"src/ZEO/tests/testZEO.py::test_runzeo_msgpack_support",
"src/ZEO/tests/testZEO.py::MultiprocessingTests::test_work_with_multiprocessing",
"src/ZEO/tests/testZEO.py::test_suite"
]
| []
| []
| []
| Zope Public License 2.1 | 2,338 | [
"src/ZEO/ClientStorage.py",
"src/ZEO/_compat.py",
"CHANGES.rst"
]
| [
"src/ZEO/ClientStorage.py",
"src/ZEO/_compat.py",
"CHANGES.rst"
]
|
|
zopefoundation__ZEO-112 | 5efce5d6821ac2455f37a425de8b377493d71101 | 2018-03-27 15:15:50 | 5efce5d6821ac2455f37a425de8b377493d71101 | diff --git a/CHANGES.rst b/CHANGES.rst
index 759d40e2..0de427dd 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -4,7 +4,8 @@ Changelog
5.1.3 (unreleased)
------------------
-- Nothing changed yet.
+- Fix ``ZEO.server`` relying on test dependencies. See `issue 105
+ <https://github.com/zopefoundation/ZEO/issues/105>`_.
5.1.2 (2018-03-27)
diff --git a/src/ZEO/__init__.py b/src/ZEO/__init__.py
index ba340e4c..f0796e43 100644
--- a/src/ZEO/__init__.py
+++ b/src/ZEO/__init__.py
@@ -81,10 +81,10 @@ def server(path=None, blob_dir=None, storage_conf=None, zeo_conf=None,
dynamically.
"""
- import os, ZEO.tests.forker
+ import ZEO._forker as forker
if storage_conf is None and path is None:
storage_conf = '<mappingstorage>\n</mappingstorage>'
- return ZEO.tests.forker.start_zeo_server(
+ return forker.start_zeo_server(
storage_conf, zeo_conf, port, keep=True, path=path,
blob_dir=blob_dir, suicide=False, threaded=threaded, **kw)
diff --git a/src/ZEO/_forker.py b/src/ZEO/_forker.py
new file mode 100644
index 00000000..b941025e
--- /dev/null
+++ b/src/ZEO/_forker.py
@@ -0,0 +1,292 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE
+#
+##############################################################################
+"""Library for forking storage server and connecting client storage"""
+from __future__ import print_function
+import gc
+import os
+import sys
+import multiprocessing
+import logging
+import tempfile
+
+from six.moves.queue import Empty
+import six
+
+from ZEO._compat import StringIO
+
+logger = logging.getLogger('ZEO.tests.forker')
+
+DEBUG = os.environ.get('ZEO_TEST_SERVER_DEBUG')
+
+ZEO4_SERVER = os.environ.get('ZEO4_SERVER')
+
+class ZEOConfig(object):
+ """Class to generate ZEO configuration file. """
+
+ def __init__(self, addr, log=None, **options):
+ if log:
+ if isinstance(log, str):
+ self.logpath = log
+ elif isinstance(addr, str):
+ self.logpath = addr+'.log'
+ else:
+ self.logpath = 'server.log'
+
+ if not isinstance(addr, six.string_types):
+ addr = '%s:%s' % addr
+
+ self.log = log
+ self.address = addr
+ self.read_only = None
+ self.loglevel = 'INFO'
+ self.__dict__.update(options)
+
+ def dump(self, f):
+ print("<zeo>", file=f)
+ print("address " + self.address, file=f)
+ if self.read_only is not None:
+ print("read-only", self.read_only and "true" or "false", file=f)
+
+ for name in (
+ 'invalidation_queue_size', 'invalidation_age',
+ 'transaction_timeout', 'pid_filename', 'msgpack',
+ 'ssl_certificate', 'ssl_key', 'client_conflict_resolution',
+ ):
+ v = getattr(self, name, None)
+ if v:
+ print(name.replace('_', '-'), v, file=f)
+
+ print("</zeo>", file=f)
+
+ if self.log:
+ print("""
+ <eventlog>
+ level %s
+ <logfile>
+ path %s
+ </logfile>
+ </eventlog>
+ """ % (self.loglevel, self.logpath), file=f)
+
+ def __str__(self):
+ f = StringIO()
+ self.dump(f)
+ return f.getvalue()
+
+
+def runner(config, qin, qout, timeout=None,
+ debug=False, name=None,
+ keep=False, protocol=None):
+
+ if debug or DEBUG:
+ debug_logging()
+
+ old_protocol = None
+ if protocol:
+ import ZEO.asyncio.server
+ old_protocol = ZEO.asyncio.server.best_protocol_version
+ ZEO.asyncio.server.best_protocol_version = protocol
+ old_protocols = ZEO.asyncio.server.ServerProtocol.protocols
+ ZEO.asyncio.server.ServerProtocol.protocols = tuple(sorted(
+ set(old_protocols) | set([protocol])
+ ))
+
+ try:
+ import threading
+
+ if ZEO4_SERVER:
+ # XXX: test dependency. In practice this is
+ # probably ok
+ from ZEO.tests.ZEO4 import runzeo
+ else:
+ from . import runzeo
+
+ options = runzeo.ZEOOptions()
+ options.realize(['-C', config])
+ server = runzeo.ZEOServer(options)
+ globals()[(name if name else 'last') + '_server'] = server
+ server.open_storages()
+ server.clear_socket()
+ server.create_server()
+ logger.debug('SERVER CREATED')
+ if ZEO4_SERVER:
+ qout.put(server.server.addr)
+ else:
+ qout.put(server.server.acceptor.addr)
+ logger.debug('ADDRESS SENT')
+ thread = threading.Thread(
+ target=server.server.loop, kwargs=dict(timeout=.2),
+ name=(None if name is None else name + '-server'),
+ )
+ thread.setDaemon(True)
+ thread.start()
+ os.remove(config)
+
+ try:
+ qin.get(timeout=timeout) # wait for shutdown
+ except Empty:
+ pass
+ server.server.close()
+ thread.join(3)
+
+ if not keep:
+ # Try to cleanup storage files
+ for storage in server.server.storages.values():
+ try:
+ storage.cleanup()
+ except AttributeError:
+ pass
+
+ qout.put(thread.is_alive())
+
+ except Exception:
+ logger.exception("In server thread")
+
+ finally:
+ if old_protocol:
+ ZEO.asyncio.server.best_protocol_version = old_protocol
+ ZEO.asyncio.server.ServerProtocol.protocols = old_protocols
+
+def stop_runner(thread, config, qin, qout, stop_timeout=19, pid=None):
+ qin.put('stop')
+ try:
+ dirty = qout.get(timeout=stop_timeout)
+ except Empty:
+ print("WARNING Couldn't stop server", file=sys.stderr)
+ if hasattr(thread, 'terminate'):
+ thread.terminate()
+ os.waitpid(thread.pid, 0)
+ else:
+ if dirty:
+ print("WARNING SERVER DIDN'T STOP CLEANLY", file=sys.stderr)
+
+ # The runner thread didn't stop. If it was a process,
+ # give it some time to exit
+ if hasattr(thread, 'pid') and thread.pid:
+ os.waitpid(thread.pid, 0)
+
+ thread.join(stop_timeout)
+
+ gc.collect()
+
+def start_zeo_server(storage_conf=None, zeo_conf=None, port=None, keep=False,
+ path='Data.fs', protocol=None, blob_dir=None,
+ suicide=True, debug=False,
+ threaded=False, start_timeout=33, name=None, log=None,
+ show_config=False):
+ """Start a ZEO server in a separate process.
+
+ Takes two positional arguments a string containing the storage conf
+ and a ZEOConfig object.
+
+ Returns the ZEO address, the test server address, the pid, and the path
+ to the config file.
+ """
+
+ if not storage_conf:
+ storage_conf = '<filestorage>\npath %s\n</filestorage>' % path
+
+ if blob_dir:
+ storage_conf = '<blobstorage>\nblob-dir %s\n%s\n</blobstorage>' % (
+ blob_dir, storage_conf)
+
+ if zeo_conf is None or isinstance(zeo_conf, dict):
+ if port is None:
+ port = 0
+
+ if isinstance(port, int):
+ addr = '127.0.0.1', port
+ else:
+ addr = port
+
+ z = ZEOConfig(addr, log=log)
+ if zeo_conf:
+ z.__dict__.update(zeo_conf)
+ zeo_conf = str(z)
+
+ zeo_conf = str(zeo_conf) + '\n\n' + storage_conf
+ if show_config:
+ print(zeo_conf)
+
+ # Store the config info in a temp file.
+ fd, tmpfile = tempfile.mkstemp(".conf", prefix='ZEO_forker', dir=os.getcwd())
+ with os.fdopen(fd, 'w') as fp:
+ fp.write(zeo_conf)
+
+ if threaded:
+ from threading import Thread
+ from six.moves.queue import Queue
+ else:
+ from multiprocessing import Process as Thread
+ Queue = ThreadlessQueue
+
+ qin = Queue()
+ qout = Queue()
+ thread = Thread(
+ target=runner,
+ args=[tmpfile, qin, qout, 999 if suicide else None],
+ kwargs=dict(debug=debug, name=name, protocol=protocol, keep=keep),
+ name=(None if name is None else name + '-server-runner'),
+ )
+ thread.daemon = True
+ thread.start()
+ try:
+ addr = qout.get(timeout=start_timeout)
+ except Exception:
+ whine("SERVER FAILED TO START")
+ if thread.is_alive():
+ whine("Server thread/process is still running")
+ elif not threaded:
+ whine("Exit status", thread.exitcode)
+ raise
+
+ def stop(stop_timeout=99):
+ stop_runner(thread, tmpfile, qin, qout, stop_timeout)
+
+ return addr, stop
+
+
+def shutdown_zeo_server(stop):
+ stop()
+
+
+def debug_logging(logger='ZEO', stream='stderr', level=logging.DEBUG):
+ handler = logging.StreamHandler(getattr(sys, stream))
+ logger = logging.getLogger(logger)
+ logger.addHandler(handler)
+ logger.setLevel(level)
+
+ def stop():
+ logger.removeHandler(handler)
+ logger.setLevel(logging.NOTSET)
+
+ return stop
+
+def whine(*message):
+ print(*message, file=sys.stderr)
+ sys.stderr.flush()
+
+class ThreadlessQueue(object):
+
+ def __init__(self):
+ self.cin, self.cout = multiprocessing.Pipe(False)
+
+ def put(self, v):
+ self.cout.send(v)
+
+ def get(self, timeout=None):
+ if self.cin.poll(timeout):
+ return self.cin.recv()
+ else:
+ raise Empty()
| Quick Server-Client setup is broken
ZEO 's quick Server-Client setup seems to depend on a module from its tests folder, which depends on something inside ZODB's tests, which requires a module that existed in ZODB 5.1.1, but no longer in 5.3.0.
```
Traceback (most recent call last):
File "/home/qqwy/.local/share/virtualenvs/pydash-bxa1s-wp/bin/flask", line 11, in <module>
sys.exit(main())
File "/home/qqwy/.local/share/virtualenvs/pydash-bxa1s-wp/lib/python3.6/site-packages/flask/cli.py", line 513, in main
cli.main(args=args, prog_name=name)
File "/home/qqwy/.local/share/virtualenvs/pydash-bxa1s-wp/lib/python3.6/site-packages/flask/cli.py", line 380, in main
return AppGroup.main(self, *args, **kwargs)
File "/home/qqwy/.local/share/virtualenvs/pydash-bxa1s-wp/lib/python3.6/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/home/qqwy/.local/share/virtualenvs/pydash-bxa1s-wp/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/qqwy/.local/share/virtualenvs/pydash-bxa1s-wp/lib/python3.6/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/qqwy/.local/share/virtualenvs/pydash-bxa1s-wp/lib/python3.6/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/home/qqwy/.local/share/virtualenvs/pydash-bxa1s-wp/lib/python3.6/site-packages/click/decorators.py", line 17, in new_func
return f(get_current_context(), *args, **kwargs)
File "/home/qqwy/.local/share/virtualenvs/pydash-bxa1s-wp/lib/python3.6/site-packages/flask/cli.py", line 256, in decorator
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
File "/home/qqwy/.local/share/virtualenvs/pydash-bxa1s-wp/lib/python3.6/site-packages/flask/cli.py", line 237, in load_app
rv = locate_app(self.app_import_path)
File "/home/qqwy/.local/share/virtualenvs/pydash-bxa1s-wp/lib/python3.6/site-packages/flask/cli.py", line 90, in locate_app
__import__(module)
File "/run/media/qqwy/Serendipity/Programming/RUG/SoftwareEng/PyDashIO/2018-PyDash.io/pydash/pydash.py", line 1, in <module>
from pydash_web import flask_webapp
File "/run/media/qqwy/Serendipity/Programming/RUG/SoftwareEng/PyDashIO/2018-PyDash.io/pydash/pydash_web/__init__.py", line 20, in <module>
from pydash_web import routes # Needs to be below flask_webapp instantiation to prevent circular dependency
File "/run/media/qqwy/Serendipity/Programming/RUG/SoftwareEng/PyDashIO/2018-PyDash.io/pydash/pydash_web/routes.py", line 10, in <module>
import pydash_web.controller as controller
File "/run/media/qqwy/Serendipity/Programming/RUG/SoftwareEng/PyDashIO/2018-PyDash.io/pydash/pydash_web/controller/__init__.py", line 5, in <module>
from .login import login
File "/run/media/qqwy/Serendipity/Programming/RUG/SoftwareEng/PyDashIO/2018-PyDash.io/pydash/pydash_web/controller/login.py", line 8, in <module>
import pydash_app.user
File "/run/media/qqwy/Serendipity/Programming/RUG/SoftwareEng/PyDashIO/2018-PyDash.io/pydash/pydash_app/user/__init__.py", line 6, in <module>
import pydash_app.user.user_repository
File "/run/media/qqwy/Serendipity/Programming/RUG/SoftwareEng/PyDashIO/2018-PyDash.io/pydash/pydash_app/user/user_repository.py", line 19, in <module>
from ..impl.database import database_root, MultiIndexedPersistentCollection
File "/run/media/qqwy/Serendipity/Programming/RUG/SoftwareEng/PyDashIO/2018-PyDash.io/pydash/pydash_app/impl/database.py", line 12, in <module>
connection = ZEO.server(path='zeo_filestorage.fs')
File "/home/qqwy/.local/share/virtualenvs/pydash-bxa1s-wp/lib/python3.6/site-packages/ZEO/__init__.py", line 84, in server
import os, ZEO.tests.forker
File "/home/qqwy/.local/share/virtualenvs/pydash-bxa1s-wp/lib/python3.6/site-packages/ZEO/tests/forker.py", line 29, in <module>
import ZODB.tests.util
File "/home/qqwy/.local/share/virtualenvs/pydash-bxa1s-wp/lib/python3.6/site-packages/ZODB/tests/util.py", line 29, in <module>
import zope.testing.setupstack
ModuleNotFoundError: No module named 'zope.testing'
```
| zopefoundation/ZEO | diff --git a/src/ZEO/tests/forker.py b/src/ZEO/tests/forker.py
index e87643a1..b6e146a6 100644
--- a/src/ZEO/tests/forker.py
+++ b/src/ZEO/tests/forker.py
@@ -13,86 +13,38 @@
##############################################################################
"""Library for forking storage server and connecting client storage"""
from __future__ import print_function
-import gc
-import os
+
+
import random
import sys
import time
-import errno
-import multiprocessing
+
+
import socket
-import subprocess
+
import logging
-import tempfile
+
import six
-from six.moves.queue import Empty
+
import ZODB.tests.util
import zope.testing.setupstack
-from ZEO._compat import StringIO
+
+from ZEO import _forker
logger = logging.getLogger('ZEO.tests.forker')
-DEBUG = os.environ.get('ZEO_TEST_SERVER_DEBUG')
+DEBUG = _forker.DEBUG
+
+ZEO4_SERVER = _forker.ZEO4_SERVER
-ZEO4_SERVER = os.environ.get('ZEO4_SERVER')
skip_if_testing_client_against_zeo4 = (
(lambda func: None)
if ZEO4_SERVER else
(lambda func: func)
)
-class ZEOConfig(object):
- """Class to generate ZEO configuration file. """
- def __init__(self, addr, log=None, **options):
- if log:
- if isinstance(log, str):
- self.logpath = log
- elif isinstance(addr, str):
- self.logpath = addr+'.log'
- else:
- self.logpath = 'server.log'
-
- if not isinstance(addr, six.string_types):
- addr = '%s:%s' % addr
-
- self.log = log
- self.address = addr
- self.read_only = None
- self.loglevel = 'INFO'
- self.__dict__.update(options)
-
- def dump(self, f):
- print("<zeo>", file=f)
- print("address " + self.address, file=f)
- if self.read_only is not None:
- print("read-only", self.read_only and "true" or "false", file=f)
-
- for name in (
- 'invalidation_queue_size', 'invalidation_age',
- 'transaction_timeout', 'pid_filename', 'msgpack',
- 'ssl_certificate', 'ssl_key', 'client_conflict_resolution',
- ):
- v = getattr(self, name, None)
- if v:
- print(name.replace('_', '-'), v, file=f)
-
- print("</zeo>", file=f)
-
- if self.log:
- print("""
- <eventlog>
- level %s
- <logfile>
- path %s
- </logfile>
- </eventlog>
- """ % (self.loglevel, self.logpath), file=f)
-
- def __str__(self):
- f = StringIO()
- self.dump(f)
- return f.getvalue()
+ZEOConfig = _forker.ZEOConfig
def encode_format(fmt):
@@ -103,176 +55,10 @@ def encode_format(fmt):
fmt = fmt.replace(*xform)
return fmt
-def runner(config, qin, qout, timeout=None,
- debug=False, name=None,
- keep=False, protocol=None):
-
- if debug or DEBUG:
- debug_logging()
-
- old_protocol = None
- if protocol:
- import ZEO.asyncio.server
- old_protocol = ZEO.asyncio.server.best_protocol_version
- ZEO.asyncio.server.best_protocol_version = protocol
- old_protocols = ZEO.asyncio.server.ServerProtocol.protocols
- ZEO.asyncio.server.ServerProtocol.protocols = tuple(sorted(
- set(old_protocols) | set([protocol])
- ))
-
- try:
- import threading
-
- if ZEO4_SERVER:
- from .ZEO4 import runzeo
- else:
- from .. import runzeo
-
- options = runzeo.ZEOOptions()
- options.realize(['-C', config])
- server = runzeo.ZEOServer(options)
- globals()[(name if name else 'last') + '_server'] = server
- server.open_storages()
- server.clear_socket()
- server.create_server()
- logger.debug('SERVER CREATED')
- if ZEO4_SERVER:
- qout.put(server.server.addr)
- else:
- qout.put(server.server.acceptor.addr)
- logger.debug('ADDRESS SENT')
- thread = threading.Thread(
- target=server.server.loop, kwargs=dict(timeout=.2),
- name = None if name is None else name + '-server',
- )
- thread.setDaemon(True)
- thread.start()
- os.remove(config)
-
- try:
- qin.get(timeout=timeout) # wait for shutdown
- except Empty:
- pass
- server.server.close()
- thread.join(3)
-
- if not keep:
- # Try to cleanup storage files
- for storage in server.server.storages.values():
- try:
- storage.cleanup()
- except AttributeError:
- pass
-
- qout.put(thread.is_alive())
+runner = _forker.runner
- except Exception:
- logger.exception("In server thread")
-
- finally:
- if old_protocol:
- ZEO.asyncio.server.best_protocol_version = old_protocol
- ZEO.asyncio.server.ServerProtocol.protocols = old_protocols
-
-def stop_runner(thread, config, qin, qout, stop_timeout=19, pid=None):
- qin.put('stop')
- try:
- dirty = qout.get(timeout=stop_timeout)
- except Empty:
- print("WARNING Couldn't stop server", file=sys.stderr)
- if hasattr(thread, 'terminate'):
- thread.terminate()
- os.waitpid(thread.pid, 0)
- else:
- if dirty:
- print("WARNING SERVER DIDN'T STOP CLEANLY", file=sys.stderr)
-
- # The runner thread didn't stop. If it was a process,
- # give it some time to exit
- if hasattr(thread, 'pid') and thread.pid:
- os.waitpid(thread.pid, 0)
-
- thread.join(stop_timeout)
-
- gc.collect()
-
-def start_zeo_server(storage_conf=None, zeo_conf=None, port=None, keep=False,
- path='Data.fs', protocol=None, blob_dir=None,
- suicide=True, debug=False,
- threaded=False, start_timeout=33, name=None, log=None,
- show_config=False
- ):
- """Start a ZEO server in a separate process.
-
- Takes two positional arguments a string containing the storage conf
- and a ZEOConfig object.
-
- Returns the ZEO address, the test server address, the pid, and the path
- to the config file.
- """
-
- if not storage_conf:
- storage_conf = '<filestorage>\npath %s\n</filestorage>' % path
-
- if blob_dir:
- storage_conf = '<blobstorage>\nblob-dir %s\n%s\n</blobstorage>' % (
- blob_dir, storage_conf)
-
- if zeo_conf is None or isinstance(zeo_conf, dict):
- if port is None:
- port = 0
-
- if isinstance(port, int):
- addr = '127.0.0.1', port
- else:
- addr = port
-
- z = ZEOConfig(addr, log=log)
- if zeo_conf:
- z.__dict__.update(zeo_conf)
- zeo_conf = str(z)
-
- zeo_conf = str(zeo_conf) + '\n\n' + storage_conf
- if show_config:
- print(zeo_conf)
-
- # Store the config info in a temp file.
- tmpfile = tempfile.mktemp(".conf", dir=os.getcwd())
- fp = open(tmpfile, 'w')
- fp.write(zeo_conf)
- fp.close()
-
- if threaded:
- from threading import Thread
- from six.moves.queue import Queue
- else:
- from multiprocessing import Process as Thread
- Queue = ThreadlessQueue
-
- qin = Queue()
- qout = Queue()
- thread = Thread(
- target=runner,
- args=[tmpfile, qin, qout, 999 if suicide else None],
- kwargs=dict(debug=debug, name=name, protocol=protocol, keep=keep),
- name = None if name is None else name + '-server-runner',
- )
- thread.daemon = True
- thread.start()
- try:
- addr = qout.get(timeout=start_timeout)
- except Exception:
- whine("SERVER FAILED TO START")
- if thread.is_alive():
- whine("Server thread/process is still running")
- elif not threaded:
- whine("Exit status", thread.exitcode)
- raise
-
- def stop(stop_timeout=99):
- stop_runner(thread, tmpfile, qin, qout, stop_timeout)
-
- return addr, stop
+stop_runner = _forker.stop_runner
+start_zeo_server = _forker.start_zeo_server
if sys.platform[:3].lower() == "win":
def _quote_arg(s):
@@ -281,8 +67,7 @@ else:
def _quote_arg(s):
return s
-def shutdown_zeo_server(stop):
- stop()
+shutdown_zeo_server = _forker.shutdown_zeo_server
def get_port(ignored=None):
"""Return a port that is not in use.
@@ -295,7 +80,7 @@ def get_port(ignored=None):
Raises RuntimeError after 10 tries.
"""
- for i in range(10):
+ for _i in range(10):
port = random.randrange(20000, 30000)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
@@ -413,34 +198,9 @@ def wait_connected(storage):
def wait_disconnected(storage):
wait_until("storage is disconnected",
- lambda : not storage.is_connected())
-
-def debug_logging(logger='ZEO', stream='stderr', level=logging.DEBUG):
- handler = logging.StreamHandler(getattr(sys, stream))
- logger = logging.getLogger(logger)
- logger.addHandler(handler)
- logger.setLevel(level)
-
- def stop():
- logger.removeHandler(handler)
- logger.setLevel(logging.NOTSET)
+ lambda: not storage.is_connected())
- return stop
-def whine(*message):
- print(*message, file=sys.stderr)
- sys.stderr.flush()
-
-class ThreadlessQueue(object):
-
- def __init__(self):
- self.cin, self.cout = multiprocessing.Pipe(False)
-
- def put(self, v):
- self.cout.send(v)
-
- def get(self, timeout=None):
- if self.cin.poll(timeout):
- return self.cin.recv()
- else:
- raise Empty()
+debug_logging = _forker.debug_logging
+whine = _forker.whine
+ThreadlessQueue = _forker.ThreadlessQueue
diff --git a/src/ZEO/tests/test_sync.py b/src/ZEO/tests/test_sync.py
index ec3d33d6..18e949fd 100644
--- a/src/ZEO/tests/test_sync.py
+++ b/src/ZEO/tests/test_sync.py
@@ -4,7 +4,7 @@ from zope.testing import setupstack
from .. import server, client
-from . import forker
+from ZEO import _forker as forker
if forker.ZEO4_SERVER:
server_ping_method = 'lastTransaction'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | 5.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"zope-testrunner",
"ZConfig",
"mock",
"msgpack",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
BTrees==4.11.3
certifi==2021.5.30
cffi==1.15.1
importlib-metadata==4.8.3
iniconfig==1.1.1
manuel==1.13.0
mock==5.2.0
msgpack==1.0.5
msgpack-python==0.5.6
packaging==21.3
persistent==4.9.3
pluggy==1.0.0
py==1.11.0
pycparser==2.21
pyparsing==3.1.4
pytest==7.0.1
random2==1.0.2
six==1.17.0
tomli==1.2.3
transaction==3.1.0
typing_extensions==4.1.1
zc.lockfile==2.0
ZConfig==3.6.1
zdaemon==4.4
-e git+https://github.com/zopefoundation/ZEO.git@5efce5d6821ac2455f37a425de8b377493d71101#egg=ZEO
zipp==3.6.0
ZODB==5.8.1
zodbpickle==2.6
zope.exceptions==4.6
zope.interface==5.5.2
zope.testing==5.0.1
zope.testrunner==5.6
| name: ZEO
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- btrees==4.11.3
- cffi==1.15.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- manuel==1.13.0
- mock==5.2.0
- msgpack==1.0.5
- msgpack-python==0.5.6
- packaging==21.3
- persistent==4.9.3
- pluggy==1.0.0
- py==1.11.0
- pycparser==2.21
- pyparsing==3.1.4
- pytest==7.0.1
- random2==1.0.2
- six==1.17.0
- tomli==1.2.3
- transaction==3.1.0
- typing-extensions==4.1.1
- zc-lockfile==2.0
- zconfig==3.6.1
- zdaemon==4.4
- zipp==3.6.0
- zodb==5.8.1
- zodbpickle==2.6
- zope-exceptions==4.6
- zope-interface==5.5.2
- zope-testing==5.0.1
- zope-testrunner==5.6
prefix: /opt/conda/envs/ZEO
| [
"src/ZEO/tests/test_sync.py::SyncTests::test_server_sync"
]
| []
| []
| []
| Zope Public License 2.1 | 2,339 | [
"src/ZEO/_forker.py",
"src/ZEO/__init__.py",
"CHANGES.rst"
]
| [
"src/ZEO/_forker.py",
"src/ZEO/__init__.py",
"CHANGES.rst"
]
|
|
elastic__rally-451 | 7aad2e841ca7a2e022447993a83513f24b49c755 | 2018-03-27 17:26:22 | a5408e0d0d07b271b509df8057a7c73303604c10 | diff --git a/docs/configuration.rst b/docs/configuration.rst
index b4f524ed..08430670 100644
--- a/docs/configuration.rst
+++ b/docs/configuration.rst
@@ -66,7 +66,7 @@ As you can see above, Rally autodetects if git, Gradle and a JDK are installed.
As you can see, Rally tells you that you cannot build Elasticsearch from sources but you can still benchmark official binary distributions.
-It's also possible that Rally cannot automatically find your JDK 8 or JDK 9 home directory. In that case, it will ask you later in the configuration process. If you do not provide a JDK home directory, Rally cannot start Elasticsearch on this machine but you can still use it as a load generator to :doc:`benchmark remote clusters </recipes>`.
+It's also possible that Rally cannot automatically find your JDK 8 or JDK 10 home directory. In that case, it will ask you later in the configuration process. If you do not provide a JDK home directory, Rally cannot start Elasticsearch on this machine but you can still use it as a load generator to :doc:`benchmark remote clusters </recipes>`.
After running the initial detection, Rally will try to autodetect your Elasticsearch project directory (either in the current directory or in ``../elasticsearch``) or will choose a default directory::
@@ -122,7 +122,7 @@ Rally will ask you a few more things in the advanced setup:
* **Benchmark data directory**: Rally stores all benchmark related data in this directory which can take up to several tens of GB. If you want to use a dedicated partition, you can specify a different data directory here.
* **Elasticsearch project directory**: This is the directory where the Elasticsearch sources are located. If you don't actively develop on Elasticsearch you can just leave the default but if you want to benchmark local changes you should point Rally to your project directory. Note that Rally will run builds with Gradle in this directory (it runs ``gradle clean`` and ``gradle :distribution:tar:assemble``).
-* **JDK root directory**: Rally will only ask this if it could not autodetect the JDK home by itself. Just enter the root directory of the JDK you want to use. By default, Rally will choose Java 8 if available and fallback to Java 9.
+* **JDK root directory**: Rally will only ask this if it could not autodetect the JDK home by itself. Just enter the root directory of the JDK you want to use. By default, Rally will choose Java 8 if available and fallback to Java 10.
* **Metrics store type**: You can choose between ``in-memory`` which requires no additional setup or ``elasticsearch`` which requires that you start a dedicated Elasticsearch instance to store metrics but gives you much more flexibility to analyse results.
* **Metrics store settings** (only for metrics store type ``elasticsearch``): Provide the connection details to the Elasticsearch metrics store. This should be an instance that you use just for Rally but it can be a rather small one. A single node cluster with default setting should do it. When using self-signed certificates on the Elasticsearch metrics store, certificate verification can be turned off by setting the ``datastore.ssl.verification_mode`` setting to ``none``. Alternatively you can enter the path to the certificate authority's signing certificate in ``datastore.ssl.certificate_authorities``. Both settings are optional.
* **Name for this benchmark environment** (only for metrics store type ``elasticsearch``): You can use the same metrics store for multiple environments (e.g. local, continuous integration etc.) so you can separate metrics from different environments by choosing a different name.
diff --git a/esrally/config.py b/esrally/config.py
index b8e0d79d..32348a67 100644
--- a/esrally/config.py
+++ b/esrally/config.py
@@ -106,7 +106,7 @@ def auto_load_local_config(base_config, additional_sections=None, config_file_cl
class Config:
- CURRENT_CONFIG_VERSION = 13
+ CURRENT_CONFIG_VERSION = 14
"""
Config is the main entry point to retrieve and set benchmark properties. It provides multiple scopes to allow overriding of values on
@@ -305,13 +305,13 @@ class ConfigFactory:
gradle_bin = "./gradlew" if use_gradle_wrapper else io.guess_install_location("gradle")
java_8_home = runtime_java_home if runtime_java_home else io.guess_java_home(major_version=8)
- java_9_home = java_home if java_home else io.guess_java_home(major_version=9)
+ java_10_home = java_home if java_home else io.guess_java_home(major_version=10)
from esrally.utils import jvm
if java_8_home:
auto_detected_java_home = java_8_home
# Don't auto-detect an EA release and bring trouble to the user later on. They can still configure it manually if they want to.
- elif java_9_home and not jvm.is_early_access_release(java_9_home):
- auto_detected_java_home = java_9_home
+ elif java_10_home and not jvm.is_early_access_release(java_10_home):
+ auto_detected_java_home = java_10_home
else:
auto_detected_java_home = None
@@ -342,14 +342,14 @@ class ConfigFactory:
self.o("* Setting up benchmark data directory in %s" % root_dir)
if benchmark_from_sources:
- if not java_9_home or jvm.is_early_access_release(java_9_home):
- raw_java_9_home = self._ask_property("Enter the JDK 9 root directory", check_path_exists=True, mandatory=False)
- if raw_java_9_home and jvm.major_version(raw_java_9_home) == 9 and not jvm.is_early_access_release(raw_java_9_home):
- java_9_home = io.normalize_path(raw_java_9_home) if raw_java_9_home else None
+ if not java_10_home or jvm.is_early_access_release(java_10_home):
+ raw_java_10_home = self._ask_property("Enter the JDK 10 root directory", check_path_exists=True, mandatory=False)
+ if raw_java_10_home and jvm.major_version(raw_java_10_home) == 10 and not jvm.is_early_access_release(raw_java_10_home):
+ java_10_home = io.normalize_path(raw_java_10_home) if raw_java_10_home else None
else:
benchmark_from_sources = False
self.o("********************************************************************************")
- self.o("You don't have a valid JDK 9 installation and cannot benchmark source builds.")
+ self.o("You don't have a valid JDK 10 installation and cannot benchmark source builds.")
self.o("")
self.o("You can still benchmark binary distributions with e.g.:")
self.o("")
@@ -454,8 +454,8 @@ class ConfigFactory:
config["runtime"] = {}
if java_home:
config["runtime"]["java.home"] = java_home
- if java_9_home:
- config["runtime"]["java9.home"] = java_9_home
+ if java_10_home:
+ config["runtime"]["java10.home"] = java_10_home
config["benchmarks"] = {}
config["benchmarks"]["local.dataset.cache"] = "${node:root.dir}/data"
@@ -829,6 +829,44 @@ def migrate(config_file, current_version, target_version, out=print, i=input):
current_version = 13
config["meta"]["config.version"] = str(current_version)
+ if current_version == 13 and target_version > current_version:
+ # This version replaced java9.home with java10.home
+ if "build" in config and "gradle.bin" in config["build"]:
+ java_10_home = io.guess_java_home(major_version=10)
+ from esrally.utils import jvm
+ if java_10_home and not jvm.is_early_access_release(java_10_home):
+ logger.debug("Autodetected a JDK 10 installation at [%s]" % java_10_home)
+ if "runtime" not in config:
+ config["runtime"] = {}
+ config["runtime"]["java10.home"] = java_10_home
+ else:
+ logger.debug("Could not autodetect a JDK 10 installation. Checking [java.home] already points to a JDK 10.")
+ detected = False
+ if "runtime" in config:
+ java_home = config["runtime"]["java.home"]
+ if jvm.major_version(java_home) == 10 and not jvm.is_early_access_release(java_home):
+ config["runtime"]["java10.home"] = java_home
+ detected = True
+
+ if not detected:
+ logger.debug("Could not autodetect a JDK 10 installation. Asking user.")
+ raw_java_10_home = prompter.ask_property("Enter the JDK 10 root directory", check_path_exists=True, mandatory=False)
+ if raw_java_10_home and jvm.major_version(raw_java_10_home) == 10 and not jvm.is_early_access_release(raw_java_10_home):
+ java_10_home = io.normalize_path(raw_java_10_home) if raw_java_10_home else None
+ config["runtime"]["java10.home"] = java_10_home
+ else:
+ out("********************************************************************************")
+ out("You don't have a valid JDK 10 installation and cannot benchmark source builds.")
+ out("")
+ out("You can still benchmark binary distributions with e.g.:")
+ out("")
+ out(" %s --distribution-version=6.0.0" % PROGRAM_NAME)
+ out("********************************************************************************")
+ out("")
+
+ current_version = 14
+ config["meta"]["config.version"] = str(current_version)
+
# all migrations done
config_file.store(config)
logger.info("Successfully self-upgraded configuration to version [%s]" % target_version)
diff --git a/esrally/mechanic/supplier.py b/esrally/mechanic/supplier.py
index 7cbd4b70..b32106c4 100644
--- a/esrally/mechanic/supplier.py
+++ b/esrally/mechanic/supplier.py
@@ -27,9 +27,9 @@ def create(cfg, sources, distribution, build, challenge_root_path, plugins):
if build_needed:
gradle = cfg.opts("build", "gradle.bin")
- java9_home = _java9_home(cfg)
+ java10_home = _java10_home(cfg)
es_src_dir = os.path.join(_src_dir(cfg), _config_value(src_config, "elasticsearch.src.subdir"))
- builder = Builder(es_src_dir, gradle, java9_home, challenge_root_path)
+ builder = Builder(es_src_dir, gradle, java10_home, challenge_root_path)
else:
builder = None
@@ -68,14 +68,14 @@ def create(cfg, sources, distribution, build, challenge_root_path, plugins):
return CompositeSupplier(suppliers)
-def _java9_home(cfg):
+def _java10_home(cfg):
from esrally import config
try:
- return cfg.opts("runtime", "java9.home")
+ return cfg.opts("runtime", "java10.home")
except config.ConfigError:
- logger.exception("Cannot determine Java 9 home.")
- raise exceptions.SystemSetupError("No JDK 9 is configured. You cannot benchmark source builds of Elasticsearch on this machine. "
- "Please install a JDK 9 and reconfigure Rally with %s configure" % PROGRAM_NAME)
+ logger.exception("Cannot determine Java 10 home.")
+ raise exceptions.SystemSetupError("No JDK 10 is configured. You cannot benchmark source builds of Elasticsearch on this machine. "
+ "Please install a JDK 10 and reconfigure Rally with %s configure" % PROGRAM_NAME)
def _required_version(version):
@@ -427,13 +427,6 @@ class SourceRepository:
class Builder:
- # Tested with Gradle 4.1 on Java 9-ea+161
- JAVA_9_GRADLE_OPTS = "--add-opens=java.base/java.io=ALL-UNNAMED " \
- "--add-opens=java.base/java.lang=ALL-UNNAMED " \
- "--add-opens=java.base/java.lang.invoke=ALL-UNNAMED " \
- "--add-opens=java.base/java.util=ALL-UNNAMED " \
- "--add-opens=jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED " \
- "--add-opens=jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED"
"""
A builder is responsible for creating an installable binary from the source files.
@@ -460,14 +453,8 @@ class Builder:
log_file = "%s/build.log" % self.log_dir
# we capture all output to a dedicated build log file
- jvm_major_version = jvm.major_version(self.java_home)
- if jvm_major_version > 8:
- logger.info("Detected JVM with major version [%d]. Adjusting JDK module access options for the build." % jvm_major_version)
- gradle_opts = "export GRADLE_OPTS=\"%s\"; " % Builder.JAVA_9_GRADLE_OPTS
- else:
- gradle_opts = ""
- build_cmd = "%sexport JAVA_HOME=%s; cd %s; %s %s >> %s 2>&1" % (gradle_opts, self.java_home, src_dir, self.gradle, task, log_file)
+ build_cmd = "export JAVA_HOME=%s; cd %s; %s %s >> %s 2>&1" % (self.java_home, src_dir, self.gradle, task, log_file)
logger.info("Running build command [%s]" % build_cmd)
if process.run_subprocess(build_cmd):
| Require JDK 10 four source builds
When https://github.com/elastic/elasticsearch/pull/29174 gets merged, source builds of Elasticsearch master will require Java 10, at minimum.
We need to modify the existing code to check for a Java 10 installation / and the user and set it when users want to build Elasticsearch from sources.
Relates #387
Relates #412 | elastic/rally | diff --git a/tests/config_test.py b/tests/config_test.py
index 57bafef2..91fc9b3e 100644
--- a/tests/config_test.py
+++ b/tests/config_test.py
@@ -238,7 +238,7 @@ class ConfigFactoryTests(TestCase):
@mock.patch("esrally.utils.io.guess_install_location")
def test_create_simple_config(self, guess_install_location, guess_java_home, is_ea_release, working_copy):
guess_install_location.side_effect = ["/tests/usr/bin/git", "/tests/usr/bin/gradle"]
- guess_java_home.return_value = "/tests/java9/home"
+ guess_java_home.return_value = "/tests/java10/home"
is_ea_release.return_value = False
# Rally checks in the parent and sibling directories whether there is an ES working copy. We don't want this detection logic
# to succeed spuriously (e.g. on developer machines).
@@ -256,7 +256,7 @@ class ConfigFactoryTests(TestCase):
print("%s::%s: %s" % (section, k, v))
self.assertTrue("meta" in config_store.config)
- self.assertEqual("13", config_store.config["meta"]["config.version"])
+ self.assertEqual("14", config_store.config["meta"]["config.version"])
self.assertTrue("system" in config_store.config)
self.assertEqual("local", config_store.config["system"]["env.name"])
@@ -273,8 +273,8 @@ class ConfigFactoryTests(TestCase):
self.assertEqual("/tests/usr/bin/gradle", config_store.config["build"]["gradle.bin"])
self.assertTrue("runtime" in config_store.config)
- self.assertEqual("/tests/java9/home", config_store.config["runtime"]["java.home"])
- self.assertEqual("/tests/java9/home", config_store.config["runtime"]["java9.home"])
+ self.assertEqual("/tests/java10/home", config_store.config["runtime"]["java.home"])
+ self.assertEqual("/tests/java10/home", config_store.config["runtime"]["java10.home"])
self.assertTrue("benchmarks" in config_store.config)
self.assertEqual("${node:root.dir}/data", config_store.config["benchmarks"]["local.dataset.cache"])
@@ -316,13 +316,13 @@ class ConfigFactoryTests(TestCase):
major_jvm_version, jvm_is_early_access_release):
guess_install_location.side_effect = ["/tests/usr/bin/git", "/tests/usr/bin/gradle"]
guess_java_home.return_value = None
- normalize_path.side_effect = ["/home/user/.rally/benchmarks", "/tests/java9/home", "/tests/java8/home",
+ normalize_path.side_effect = ["/home/user/.rally/benchmarks", "/tests/java10/home", "/tests/java8/home",
"/home/user/.rally/benchmarks/src"]
- major_jvm_version.return_value = 9
+ major_jvm_version.return_value = 10
jvm_is_early_access_release.return_value = False
path_exists.return_value = True
- f = config.ConfigFactory(i=MockInput(["/tests/java9/home", "/Projects/elasticsearch/src", "/tests/java8/home"]), o=null_output)
+ f = config.ConfigFactory(i=MockInput(["/tests/java10/home", "/Projects/elasticsearch/src", "/tests/java8/home"]), o=null_output)
config_store = InMemoryConfigStore("test")
f.create_config(config_store)
@@ -337,7 +337,7 @@ class ConfigFactoryTests(TestCase):
guess_install_location.side_effect = ["/tests/usr/bin/git", "/tests/usr/bin/gradle"]
guess_java_home.return_value = None
- # the input is the question for the JDK home and the JDK 9 home directory - the user does not define one
+ # the input is the question for the JDK home and the JDK 10 home directory - the user does not define one
f = config.ConfigFactory(i=MockInput(["", ""]), o=null_output)
config_store = InMemoryConfigStore("test")
@@ -345,14 +345,14 @@ class ConfigFactoryTests(TestCase):
self.assertIsNotNone(config_store.config)
self.assertFalse("java.home" in config_store.config["runtime"])
- self.assertFalse("java9.home" in config_store.config["runtime"])
+ self.assertFalse("java10.home" in config_store.config["runtime"])
@mock.patch("esrally.utils.jvm.is_early_access_release")
@mock.patch("esrally.utils.io.guess_java_home")
@mock.patch("esrally.utils.io.guess_install_location")
def test_create_advanced_config(self, guess_install_location, guess_java_home, is_ea_release):
guess_install_location.side_effect = ["/tests/usr/bin/git", "/tests/usr/bin/gradle"]
- guess_java_home.side_effect = ["/tests/java8/home", "/tests/java9/home"]
+ guess_java_home.side_effect = ["/tests/java8/home", "/tests/java10/home"]
is_ea_release.return_value = False
f = config.ConfigFactory(i=MockInput([
@@ -381,7 +381,7 @@ class ConfigFactoryTests(TestCase):
self.assertIsNotNone(config_store.config)
self.assertTrue("meta" in config_store.config)
- self.assertEqual("13", config_store.config["meta"]["config.version"])
+ self.assertEqual("14", config_store.config["meta"]["config.version"])
self.assertTrue("system" in config_store.config)
self.assertEqual("unittest-env", config_store.config["system"]["env.name"])
self.assertTrue("node" in config_store.config)
@@ -391,7 +391,7 @@ class ConfigFactoryTests(TestCase):
self.assertEqual("/tests/usr/bin/gradle", config_store.config["build"]["gradle.bin"])
self.assertTrue("runtime" in config_store.config)
self.assertEqual("/tests/java8/home", config_store.config["runtime"]["java.home"])
- self.assertEqual("/tests/java9/home", config_store.config["runtime"]["java9.home"])
+ self.assertEqual("/tests/java10/home", config_store.config["runtime"]["java10.home"])
self.assertTrue("benchmarks" in config_store.config)
self.assertTrue("reporting" in config_store.config)
@@ -919,3 +919,129 @@ class ConfigMigrationTests(TestCase):
self.assertEqual("13", config_file.config["meta"]["config.version"])
self.assertEqual("/usr/lib/java8", config_file.config["runtime"]["java.home"])
self.assertEqual("/usr/lib/java9", config_file.config["runtime"]["java9.home"])
+
+ def test_migrate_from_13_to_14_without_gradle(self):
+ config_file = InMemoryConfigStore("test")
+ sample_config = {
+ "meta": {
+ "config.version": 13
+ }
+ }
+ config_file.store(sample_config)
+ config.migrate(config_file, 13, 14, out=null_output)
+
+ self.assertTrue(config_file.backup_created)
+ self.assertEqual("14", config_file.config["meta"]["config.version"])
+
+ @mock.patch("esrally.utils.io.guess_java_home")
+ @mock.patch("esrally.utils.jvm.is_early_access_release")
+ def test_migrate_from_13_to_14_with_gradle_and_jdk8_autodetect_jdk10(self, is_early_access_release, guess_java_home):
+ guess_java_home.return_value = "/usr/lib/java10"
+ is_early_access_release.return_value = False
+
+ config_file = InMemoryConfigStore("test")
+ sample_config = {
+ "meta": {
+ "config.version": 13
+ },
+ "build": {
+ "gradle.bin": "/usr/local/bin/gradle"
+ },
+ "runtime": {
+ "java.home": "/usr/lib/java8"
+ }
+ }
+ config_file.store(sample_config)
+ config.migrate(config_file, 13, 14, out=null_output)
+
+ self.assertTrue(config_file.backup_created)
+ self.assertEqual("14", config_file.config["meta"]["config.version"])
+ self.assertEqual("/usr/lib/java8", config_file.config["runtime"]["java.home"])
+ self.assertEqual("/usr/lib/java10", config_file.config["runtime"]["java10.home"])
+
+ @mock.patch("esrally.utils.io.guess_java_home")
+ @mock.patch("esrally.utils.jvm.is_early_access_release")
+ @mock.patch("esrally.utils.jvm.major_version")
+ def test_migrate_from_13_to_14_with_gradle_and_jdk10(self, major_version, is_early_access_release, guess_java_home):
+ guess_java_home.return_value = None
+ is_early_access_release.return_value = False
+ major_version.return_value = 10
+
+ config_file = InMemoryConfigStore("test")
+ sample_config = {
+ "meta": {
+ "config.version": 13
+ },
+ "build": {
+ "gradle.bin": "/usr/local/bin/gradle"
+ },
+ "runtime": {
+ "java.home": "/usr/lib/java10"
+ }
+ }
+ config_file.store(sample_config)
+ config.migrate(config_file, 13, 14, out=null_output)
+
+ self.assertTrue(config_file.backup_created)
+ self.assertEqual("14", config_file.config["meta"]["config.version"])
+ self.assertEqual("/usr/lib/java10", config_file.config["runtime"]["java.home"])
+ self.assertEqual("/usr/lib/java10", config_file.config["runtime"]["java10.home"])
+
+ @mock.patch("esrally.utils.io.guess_java_home")
+ @mock.patch("esrally.utils.jvm.is_early_access_release")
+ @mock.patch("esrally.utils.jvm.major_version")
+ def test_migrate_from_13_to_14_with_gradle_and_jdk8_ask_user_and_skip(self, major_version, is_early_access_release, guess_java_home):
+ guess_java_home.return_value = None
+ is_early_access_release.return_value = False
+ major_version.return_value = 8
+
+ config_file = InMemoryConfigStore("test")
+ sample_config = {
+ "meta": {
+ "config.version": 13
+ },
+ "build": {
+ "gradle.bin": "/usr/local/bin/gradle"
+ },
+ "runtime": {
+ "java.home": "/usr/lib/java8"
+ }
+ }
+ config_file.store(sample_config)
+ config.migrate(config_file, 13, 14, out=null_output, i=MockInput(inputs=[""]))
+
+ self.assertTrue(config_file.backup_created)
+ self.assertEqual("14", config_file.config["meta"]["config.version"])
+ self.assertEqual("/usr/lib/java8", config_file.config["runtime"]["java.home"])
+ self.assertTrue("java10.home" not in config_file.config["runtime"])
+
+ @mock.patch("esrally.utils.io.exists")
+ @mock.patch("esrally.utils.io.guess_java_home")
+ @mock.patch("esrally.utils.jvm.is_early_access_release")
+ @mock.patch("esrally.utils.jvm.major_version")
+ def test_migrate_from_13_to_14_with_gradle_and_jdk8_ask_user_enter_valid(self, major_version, is_early_access_release, guess_java_home,
+ path_exists):
+ guess_java_home.return_value = None
+ is_early_access_release.return_value = False
+ major_version.side_effect = [8, 10]
+ path_exists.return_value = True
+
+ config_file = InMemoryConfigStore("test")
+ sample_config = {
+ "meta": {
+ "config.version": 13
+ },
+ "build": {
+ "gradle.bin": "/usr/local/bin/gradle"
+ },
+ "runtime": {
+ "java.home": "/usr/lib/java8"
+ }
+ }
+ config_file.store(sample_config)
+ config.migrate(config_file, 13, 14, out=null_output, i=MockInput(inputs=["/usr/lib/java10"]))
+
+ self.assertTrue(config_file.backup_created)
+ self.assertEqual("14", config_file.config["meta"]["config.version"])
+ self.assertEqual("/usr/lib/java8", config_file.config["runtime"]["java.home"])
+ self.assertEqual("/usr/lib/java10", config_file.config["runtime"]["java10.home"])
diff --git a/tests/mechanic/supplier_test.py b/tests/mechanic/supplier_test.py
index be8ae8e2..cb184505 100644
--- a/tests/mechanic/supplier_test.py
+++ b/tests/mechanic/supplier_test.py
@@ -134,20 +134,19 @@ class BuilderTests(TestCase):
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.jvm.major_version")
- def test_build_on_jdk_9(self, jvm_major_version, mock_run_subprocess):
- jvm_major_version.return_value = 9
+ def test_build_on_jdk_10(self, jvm_major_version, mock_run_subprocess):
+ jvm_major_version.return_value = 10
mock_run_subprocess.return_value = False
- b = supplier.Builder(src_dir="/src", gradle="/usr/local/gradle", java_home="/opt/jdk9", log_dir="logs")
+ b = supplier.Builder(src_dir="/src", gradle="/usr/local/gradle", java_home="/opt/jdk10", log_dir="logs")
b.build([supplier.CLEAN_TASK, supplier.ASSEMBLE_TASK])
calls = [
# Actual call
- mock.call("export GRADLE_OPTS=\"%s\"; export JAVA_HOME=/opt/jdk9; cd /src; /usr/local/gradle clean >> logs/build.log 2>&1" %
- supplier.Builder.JAVA_9_GRADLE_OPTS),
+ mock.call("export JAVA_HOME=/opt/jdk10; cd /src; /usr/local/gradle clean >> logs/build.log 2>&1"),
# Return value check
- mock.call("export GRADLE_OPTS=\"%s\"; export JAVA_HOME=/opt/jdk9; cd /src; /usr/local/gradle "
- ":distribution:archives:tar:assemble >> logs/build.log 2>&1" % supplier.Builder.JAVA_9_GRADLE_OPTS),
+ mock.call("export JAVA_HOME=/opt/jdk10; cd /src; /usr/local/gradle "
+ ":distribution:archives:tar:assemble >> logs/build.log 2>&1"),
]
mock_run_subprocess.assert_has_calls(calls)
@@ -335,7 +334,7 @@ class CreateSupplierTests(TestCase):
cfg.add(config.Scope.application, "distributions", "release.url",
"https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz")
cfg.add(config.Scope.application, "distributions", "release.cache", True)
- cfg.add(config.Scope.application, "runtime", "java9.home", "/usr/local/bin/java9/")
+ cfg.add(config.Scope.application, "runtime", "java10.home", "/usr/local/bin/java10/")
cfg.add(config.Scope.application, "node", "root.dir", "/opt/rally")
composite_supplier = supplier.create(cfg, sources=False, distribution=True, build=False, challenge_root_path="/", plugins=[])
@@ -352,7 +351,7 @@ class CreateSupplierTests(TestCase):
cfg.add(config.Scope.application, "distributions", "release.url",
"https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz")
cfg.add(config.Scope.application, "distributions", "release.cache", True)
- cfg.add(config.Scope.application, "runtime", "java9.home", "/usr/local/bin/java9/")
+ cfg.add(config.Scope.application, "runtime", "java10.home", "/usr/local/bin/java10/")
cfg.add(config.Scope.application, "node", "root.dir", "/opt/rally")
cfg.add(config.Scope.application, "source", "plugin.community-plugin.src.dir", "/home/user/Projects/community-plugin")
@@ -383,7 +382,7 @@ class CreateSupplierTests(TestCase):
cfg.add(config.Scope.application, "distributions", "release.url",
"https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz")
cfg.add(config.Scope.application, "distributions", "release.cache", True)
- cfg.add(config.Scope.application, "runtime", "java9.home", "/usr/local/bin/java9/")
+ cfg.add(config.Scope.application, "runtime", "java10.home", "/usr/local/bin/java10/")
cfg.add(config.Scope.application, "node", "root.dir", "/opt/rally")
core_plugin = team.PluginDescriptor("analysis-icu", core_plugin=True)
@@ -406,7 +405,7 @@ class CreateSupplierTests(TestCase):
cfg.add(config.Scope.application, "distributions", "release.url",
"https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz")
cfg.add(config.Scope.application, "distributions", "release.cache", True)
- cfg.add(config.Scope.application, "runtime", "java9.home", "/usr/local/bin/java9/")
+ cfg.add(config.Scope.application, "runtime", "java10.home", "/usr/local/bin/java10/")
cfg.add(config.Scope.application, "node", "root.dir", "/opt/rally")
cfg.add(config.Scope.application, "node", "src.root.dir", "/opt/rally/src")
cfg.add(config.Scope.application, "build", "gradle.bin", "/opt/gradle")
@@ -437,7 +436,7 @@ class CreateSupplierTests(TestCase):
cfg.add(config.Scope.application, "distributions", "release.url",
"https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz")
cfg.add(config.Scope.application, "distributions", "release.cache", True)
- cfg.add(config.Scope.application, "runtime", "java9.home", "/usr/local/bin/java9/")
+ cfg.add(config.Scope.application, "runtime", "java10.home", "/usr/local/bin/java10/")
cfg.add(config.Scope.application, "node", "root.dir", "/opt/rally")
cfg.add(config.Scope.application, "node", "src.root.dir", "/opt/rally/src")
cfg.add(config.Scope.application, "build", "gradle.bin", "/opt/gradle")
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 3
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-benchmark"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc python3-pip python3-dev"
],
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
elasticsearch==6.0.0
-e git+https://github.com/elastic/rally.git@7aad2e841ca7a2e022447993a83513f24b49c755#egg=esrally
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==2.9.5
jsonschema==2.5.1
MarkupSafe==2.0.1
packaging==21.3
pluggy==1.0.0
psutil==5.4.0
py==1.11.0
py-cpuinfo==3.2.0
pyparsing==3.1.4
pytest==7.0.1
pytest-benchmark==3.4.1
tabulate==0.8.1
thespian==3.9.2
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.22
zipp==3.6.0
| name: rally
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- elasticsearch==6.0.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==2.9.5
- jsonschema==2.5.1
- markupsafe==2.0.1
- packaging==21.3
- pluggy==1.0.0
- psutil==5.4.0
- py==1.11.0
- py-cpuinfo==3.2.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-benchmark==3.4.1
- tabulate==0.8.1
- thespian==3.9.2
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.22
- zipp==3.6.0
prefix: /opt/conda/envs/rally
| [
"tests/config_test.py::ConfigFactoryTests::test_create_advanced_config",
"tests/config_test.py::ConfigFactoryTests::test_create_simple_config",
"tests/config_test.py::ConfigFactoryTests::test_create_simple_config_no_java_detected",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_13_to_14_with_gradle_and_jdk10",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_13_to_14_with_gradle_and_jdk8_ask_user_and_skip",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_13_to_14_with_gradle_and_jdk8_ask_user_enter_valid",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_13_to_14_with_gradle_and_jdk8_autodetect_jdk10",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_13_to_14_without_gradle",
"tests/mechanic/supplier_test.py::BuilderTests::test_build_on_jdk_10",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_create_suppliers_for_es_and_plugin_source_build",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_create_suppliers_for_es_distribution_plugin_source_build"
]
| []
| [
"tests/config_test.py::ConfigTests::test_add_all_in_section",
"tests/config_test.py::ConfigTests::test_load_all_opts_in_section",
"tests/config_test.py::ConfigTests::test_load_existing_config",
"tests/config_test.py::ConfigTests::test_load_non_existing_config",
"tests/config_test.py::AutoLoadConfigTests::test_can_create_non_existing_config",
"tests/config_test.py::AutoLoadConfigTests::test_can_load_and_amend_existing_config",
"tests/config_test.py::AutoLoadConfigTests::test_can_migrate_outdated_config",
"tests/config_test.py::ConfigFactoryTests::test_create_simple_config_no_java_installed",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_0_to_latest",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_10_to_11",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_11_to_12_with_custom_src_config",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_11_to_12_with_default_src_config_repo_checked_out",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_11_to_12_with_default_src_config_repo_not_checked_out",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_11_to_12_with_partial_src_config",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_11_to_12_without_src_config",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_12_to_13_with_gradle_and_jdk8_ask_user_and_skip",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_12_to_13_with_gradle_and_jdk8_ask_user_enter_valid",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_12_to_13_with_gradle_and_jdk8_autodetect_jdk9",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_12_to_13_with_gradle_and_jdk9",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_12_to_13_without_gradle",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_2_to_3",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_3_to_4",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_4_to_5",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_5_to_6",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_6_to_7",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_7_to_8",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_8_to_9",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_9_to_10",
"tests/mechanic/supplier_test.py::RevisionExtractorTests::test_invalid_revisions",
"tests/mechanic/supplier_test.py::RevisionExtractorTests::test_multiple_revisions",
"tests/mechanic/supplier_test.py::RevisionExtractorTests::test_single_revision",
"tests/mechanic/supplier_test.py::SourceRepositoryTests::test_checkout_current",
"tests/mechanic/supplier_test.py::SourceRepositoryTests::test_checkout_revision",
"tests/mechanic/supplier_test.py::SourceRepositoryTests::test_checkout_revision_for_local_only_repo",
"tests/mechanic/supplier_test.py::SourceRepositoryTests::test_checkout_ts",
"tests/mechanic/supplier_test.py::SourceRepositoryTests::test_intial_checkout_latest",
"tests/mechanic/supplier_test.py::SourceRepositoryTests::test_is_commit_hash",
"tests/mechanic/supplier_test.py::SourceRepositoryTests::test_is_not_commit_hash",
"tests/mechanic/supplier_test.py::BuilderTests::test_build_on_jdk_8",
"tests/mechanic/supplier_test.py::ElasticsearchSourceSupplierTests::test_add_elasticsearch_binary",
"tests/mechanic/supplier_test.py::ElasticsearchSourceSupplierTests::test_build",
"tests/mechanic/supplier_test.py::ElasticsearchSourceSupplierTests::test_no_build",
"tests/mechanic/supplier_test.py::ExternalPluginSourceSupplierTests::test_add_binary_built_along_elasticsearch",
"tests/mechanic/supplier_test.py::ExternalPluginSourceSupplierTests::test_along_es_plugin_keeps_build_dir",
"tests/mechanic/supplier_test.py::ExternalPluginSourceSupplierTests::test_invalid_config_duplicate_source",
"tests/mechanic/supplier_test.py::ExternalPluginSourceSupplierTests::test_invalid_config_no_source",
"tests/mechanic/supplier_test.py::ExternalPluginSourceSupplierTests::test_resolve_plugin_binary_built_standalone",
"tests/mechanic/supplier_test.py::ExternalPluginSourceSupplierTests::test_standalone_plugin_overrides_build_dir",
"tests/mechanic/supplier_test.py::CorePluginSourceSupplierTests::test_resolve_plugin_binary",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_create_suppliers_for_es_distribution_plugin_source_skip",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_create_suppliers_for_es_missing_distribution_plugin_source_skip",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_create_suppliers_for_es_only_config",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_derive_supply_requirements_es_and_plugin_source_build",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_derive_supply_requirements_es_distribution",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_derive_supply_requirements_es_distribution_and_plugin_source_build",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_derive_supply_requirements_es_distribution_and_plugin_source_skip",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_derive_supply_requirements_es_source_build",
"tests/mechanic/supplier_test.py::CreateSupplierTests::test_derive_supply_requirements_es_source_skip",
"tests/mechanic/supplier_test.py::DistributionRepositoryTests::test_invalid_cache_value",
"tests/mechanic/supplier_test.py::DistributionRepositoryTests::test_missing_cache",
"tests/mechanic/supplier_test.py::DistributionRepositoryTests::test_missing_url",
"tests/mechanic/supplier_test.py::DistributionRepositoryTests::test_release_repo_config_with_default_url",
"tests/mechanic/supplier_test.py::DistributionRepositoryTests::test_release_repo_config_with_version_url"
]
| []
| Apache License 2.0 | 2,340 | [
"esrally/config.py",
"esrally/mechanic/supplier.py",
"docs/configuration.rst"
]
| [
"esrally/config.py",
"esrally/mechanic/supplier.py",
"docs/configuration.rst"
]
|
|
dask__dask-3343 | a18e4e9934eda54ed78edb11b80228cf9124c607 | 2018-03-27 22:42:34 | 48c4a589393ebc5b335cc5c7df291901401b0b15 | jcrist: This error isn't only thrown for custom functions, this also shows up when the user does an operation that's invalid:
```pytb
In [1]: import dask.dataframe as dd
In [2]: import pandas as pd
In [3]: df = pd.DataFrame({'a': ['x', 'y', 'y'], 'b': ['x', 'y', 'z'], 'c': [1, 2, 3]})
In [4]: ddf = dd.from_pandas(df, npartitions=1)
In [5]: ddf.a + ddf.c
---------------------------------------------------------------------------
...
ValueError: Metadata inference failed in `add`.
Original error is below:
------------------------
TypeError("ufunc 'add' did not contain a loop with signature matching types dtype('<U21') dtype('<U21') dtype('<U21')",)
Traceback:
---------
File "/Users/jcrist/anaconda/envs/dask/lib/python3.5/site-packages/dask-0.17.1-py3.5.egg/dask/dataframe/utils.py", line 137, in raise_on_meta_error
yield
File "/Users/jcrist/anaconda/envs/dask/lib/python3.5/site-packages/dask-0.17.1-py3.5.egg/dask/dataframe/core.py", line 3062, in elemwise
meta = partial_by_order(*parts, function=op, other=other)
File "/Users/jcrist/anaconda/envs/dask/lib/python3.5/site-packages/dask-0.17.1-py3.5.egg/dask/utils.py", line 879, in partial_by_order
return function(*args2, **kwargs)
File "/Users/jcrist/anaconda/envs/dask/lib/python3.5/site-packages/pandas/core/ops.py", line 739, in wrapper
result = wrap_results(safe_na_op(lvalues, rvalues))
File "/Users/jcrist/anaconda/envs/dask/lib/python3.5/site-packages/pandas/core/ops.py", line 710, in safe_na_op
lambda x: op(x, rvalues))
File "pandas/_libs/algos_common_helper.pxi", line 1212, in pandas._libs.algos.arrmap_object
File "/Users/jcrist/anaconda/envs/dask/lib/python3.5/site-packages/pandas/core/ops.py", line 710, in <lambda>
lambda x: op(x, rvalues))
```
I recommend adding a flag to `raise_on_meta_error` to optionally add this suggestion, and then set it appropriately in the proper places. | diff --git a/dask/dataframe/core.py b/dask/dataframe/core.py
index ff2e21e63..c4d6eb99f 100644
--- a/dask/dataframe/core.py
+++ b/dask/dataframe/core.py
@@ -1970,7 +1970,7 @@ Dask Name: {name}, {task} tasks""".format(klass=self.__class__.__name__,
enumerate(self.__dask_keys__())}
dsk.update(self.dask)
if meta is no_default:
- meta = _emulate(M.map, self, arg, na_action=na_action)
+ meta = _emulate(M.map, self, arg, na_action=na_action, udf=True)
else:
meta = make_meta(meta)
@@ -2121,7 +2121,7 @@ Dask Name: {name}, {task} tasks""".format(klass=self.__class__.__name__,
meta = _emulate(M.apply, self._meta_nonempty, func,
convert_dtype=convert_dtype,
- args=args, **kwds)
+ args=args, udf=True, **kwds)
return map_partitions(M.apply, self, func,
convert_dtype, args, meta=meta, **kwds)
@@ -2794,7 +2794,7 @@ class DataFrame(_Frame):
warnings.warn(msg)
meta = _emulate(M.apply, self._meta_nonempty, func,
- axis=axis, args=args, **kwds)
+ axis=axis, args=args, udf=True, **kwds)
return map_partitions(M.apply, self, func, axis,
False, False, None, args, meta=meta, **kwds)
@@ -3291,8 +3291,8 @@ def apply_concat_apply(args, chunk=None, aggregate=None, combine=None,
dsk[(b, j)] = (aggregate, conc)
if meta is no_default:
- meta_chunk = _emulate(chunk, *args, **chunk_kwargs)
- meta = _emulate(aggregate, _concat([meta_chunk]),
+ meta_chunk = _emulate(chunk, *args, udf=True, **chunk_kwargs)
+ meta = _emulate(aggregate, _concat([meta_chunk]), udf=True,
**aggregate_kwargs)
meta = make_meta(meta)
@@ -3332,7 +3332,7 @@ def _emulate(func, *args, **kwargs):
Apply a function using args / kwargs. If arguments contain dd.DataFrame /
dd.Series, using internal cache (``_meta``) for calculation
"""
- with raise_on_meta_error(funcname(func)):
+ with raise_on_meta_error(funcname(func), udf=kwargs.pop('udf', False)):
return func(*_extract_meta(args, True), **_extract_meta(kwargs, True))
@@ -3369,7 +3369,7 @@ def map_partitions(func, *args, **kwargs):
args = _maybe_align_partitions(args)
if meta is no_default:
- meta = _emulate(func, *args, **kwargs)
+ meta = _emulate(func, *args, udf=True, **kwargs)
if all(isinstance(arg, Scalar) for arg in args):
dask = {(name, 0):
diff --git a/dask/dataframe/utils.py b/dask/dataframe/utils.py
index 1f23badea..dbbc66fed 100644
--- a/dask/dataframe/utils.py
+++ b/dask/dataframe/utils.py
@@ -124,7 +124,7 @@ def insert_meta_param_description(*args, **kwargs):
@contextmanager
-def raise_on_meta_error(funcname=None):
+def raise_on_meta_error(funcname=None, udf=False):
"""Reraise errors in this block to show metadata inference failure.
Parameters
@@ -138,15 +138,19 @@ def raise_on_meta_error(funcname=None):
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = ''.join(traceback.format_tb(exc_traceback))
- msg = ("Metadata inference failed{0}.\n\n"
- "Original error is below:\n"
- "------------------------\n"
- "{1}\n\n"
- "Traceback:\n"
- "---------\n"
- "{2}"
- ).format(" in `{0}`".format(funcname) if funcname else "",
- repr(e), tb)
+ msg = "Metadata inference failed{0}.\n\n"
+ if udf:
+ msg += ("You have supplied a custom function and Dask is unable to \n"
+ "determine the type of output that that function returns. \n\n"
+ "To resolve this please provide a meta= keyword.\n"
+ "The docstring of the Dask function you ran should have more information.\n\n")
+ msg += ("Original error is below:\n"
+ "------------------------\n"
+ "{1}\n\n"
+ "Traceback:\n"
+ "---------\n"
+ "{2}")
+ msg = msg.format(" in `{0}`".format(funcname) if funcname else "", repr(e), tb)
raise ValueError(msg)
diff --git a/dask/delayed.py b/dask/delayed.py
index 484a45564..1da6e040c 100644
--- a/dask/delayed.py
+++ b/dask/delayed.py
@@ -132,7 +132,9 @@ def delayed(obj, name=None, pure=None, nout=None, traverse=True):
The function or object to wrap
name : string or hashable, optional
The key to use in the underlying graph for the wrapped object. Defaults
- to hashing content.
+ to hashing content. Note that this only affects the name of the object
+ wrapped by this call to delayed, and *not* the output of delayed
+ function calls - for that use ``dask_key_name=`` as described below.
pure : bool, optional
Indicates whether calling the resulting ``Delayed`` object is a pure
operation. If True, arguments to the call are hashed to produce
diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
index fb39b4e1d..6d144c877 100644
--- a/docs/source/changelog.rst
+++ b/docs/source/changelog.rst
@@ -17,6 +17,7 @@ DataFrame
- Allow `t` as shorthand for `table` in `to_hdf` for pandas compatibility `Jörg Dietrich`_
- Added top level `isna` method for Dask DataFrames (:pr:`3294`) `Christopher Ren`_
- Fix selection on partition column on ``read_parquet`` for ``engine="pyarrow"`` (:pr:`3207`) `Uwe Korn`_
+- Provide more informative error message for meta= errors (:pr:`3343`) `Matthew Rocklin`_
Bag
+++
| DataFrame: Splitting a list to multiple columns is throwing error
I was trying to split a string and saved value in a temp column and use that column to populate additional columns. I am able to split string and convert to a list. But while fetching elements from list and assigning to new column is throwing error.
I have tried to recreate the error with below code:
```
df = pd.DataFrame({ 'a' : [1, 2, 3] ,
'b' : ['a|b|c|d|e|f|g', 'a|b|c|d|e|f|g', 'a|b|c|d|e|f|g']
})
ddf = dd.from_pandas(df,1)
ddf['c'] = ddf.b.str.split('|')
ddf = ddf.assign(col1= ddf.c.map(lambda x: x[0]))
ddf = ddf.assign(col2= ddf.c.map(lambda x: x[1]))
ddf = ddf.assign(col3= ddf.c.map(lambda x: x[2]))
ddf = ddf.assign(col4= ddf.c.map(lambda x: x[3]))
```
Getting error while executing last line >> ddf = ddf.assign(col4= ddf.c.map(lambda x: x[3]))
IndexError('string index out of range',)
| dask/dask | diff --git a/dask/array/tests/test_routines.py b/dask/array/tests/test_routines.py
index 3e1c694b2..70bd0ea1c 100644
--- a/dask/array/tests/test_routines.py
+++ b/dask/array/tests/test_routines.py
@@ -1,6 +1,7 @@
from __future__ import division, print_function, absolute_import
import itertools
+import textwrap
import pytest
from distutils.version import LooseVersion
@@ -890,6 +891,8 @@ def test_choose():
def test_piecewise():
+ np.random.seed(1337)
+
x = np.random.randint(10, size=(15, 16))
d = da.from_array(x, chunks=(4, 5))
@@ -898,16 +901,33 @@ def test_piecewise():
da.piecewise(d, [d < 5, d >= 5], [lambda e, v, k: e + 1, 5], 1, k=2)
)
+
[email protected](
+ LooseVersion(np.__version__) < '1.12.0',
+ reason=textwrap.dedent(
+ """\
+ NumPy piecewise mishandles the otherwise condition pre-1.12.0.
+
+ xref: https://github.com/numpy/numpy/issues/5737
+ """
+ )
+)
+def test_piecewise_otherwise():
+ np.random.seed(1337)
+
+ x = np.random.randint(10, size=(15, 16))
+ d = da.from_array(x, chunks=(4, 5))
+
assert_eq(
np.piecewise(
x,
- [x > 2, x <= 5],
+ [x > 5, x <= 2],
[lambda e, v, k: e + 1, lambda e, v, k: v * e, lambda e, v, k: 0],
1, k=2
),
da.piecewise(
d,
- [d > 5, d <= 5],
+ [d > 5, d <= 2],
[lambda e, v, k: e + 1, lambda e, v, k: v * e, lambda e, v, k: 0],
1, k=2
)
diff --git a/dask/dataframe/tests/test_dataframe.py b/dask/dataframe/tests/test_dataframe.py
index a7d15a898..fc9b5c2f9 100644
--- a/dask/dataframe/tests/test_dataframe.py
+++ b/dask/dataframe/tests/test_dataframe.py
@@ -3042,3 +3042,24 @@ def test_mixed_dask_array_multi_dimensional():
assert_eq(ddf + dx + 1, df + x + 1)
assert_eq(ddf + dx.rechunk((None, 1)) + 1, df + x + 1)
assert_eq(ddf[['y', 'x']] + dx + 1, df[['y', 'x']] + x + 1)
+
+
+def test_meta_raises():
+ # Raise when we use a user defined fucntion
+ s = pd.Series(['abcd', 'abcd'])
+ ds = dd.from_pandas(s, npartitions=2)
+ try:
+ ds.map(lambda x: x[3])
+ except ValueError as e:
+ assert "meta=" in str(e)
+
+ # But not otherwise
+ df = pd.DataFrame({'a': ['x', 'y', 'y'],
+ 'b': ['x', 'y', 'z'],
+ 'c': [1, 2, 3]})
+ ddf = dd.from_pandas(df, npartitions=1)
+
+ with pytest.raises(Exception) as info:
+ ddf.a + ddf.c
+
+ assert "meta=" not in str(info.value)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 4
} | 1.21 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[complete]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
click==8.0.4
cloudpickle==2.2.1
-e git+https://github.com/dask/dask.git@a18e4e9934eda54ed78edb11b80228cf9124c607#egg=dask
distributed==1.21.8
HeapDict==1.0.1
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
locket==1.0.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack==1.0.5
numpy==1.19.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pandas==1.1.5
partd==1.2.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==7.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
python-dateutil==2.9.0.post0
pytz==2025.2
six==1.17.0
sortedcontainers==2.4.0
tblib==1.7.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
toolz==0.12.0
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zict==2.1.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: dask
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- click==8.0.4
- cloudpickle==2.2.1
- distributed==1.21.8
- heapdict==1.0.1
- locket==1.0.0
- msgpack==1.0.5
- numpy==1.19.5
- pandas==1.1.5
- partd==1.2.0
- psutil==7.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- six==1.17.0
- sortedcontainers==2.4.0
- tblib==1.7.0
- toolz==0.12.0
- tornado==6.1
- zict==2.1.0
prefix: /opt/conda/envs/dask
| [
"dask/dataframe/tests/test_dataframe.py::test_meta_raises"
]
| [
"dask/dataframe/tests/test_dataframe.py::test_Dataframe",
"dask/dataframe/tests/test_dataframe.py::test_attributes",
"dask/dataframe/tests/test_dataframe.py::test_timezone_freq[npartitions1]",
"dask/dataframe/tests/test_dataframe.py::test_clip[2-5]",
"dask/dataframe/tests/test_dataframe.py::test_clip[2.5-3.5]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_picklable",
"dask/dataframe/tests/test_dataframe.py::test_repartition_freq_divisions",
"dask/dataframe/tests/test_dataframe.py::test_repartition_freq_month",
"dask/dataframe/tests/test_dataframe.py::test_select_dtypes[include0-None]",
"dask/dataframe/tests/test_dataframe.py::test_select_dtypes[None-exclude1]",
"dask/dataframe/tests/test_dataframe.py::test_select_dtypes[include2-exclude2]",
"dask/dataframe/tests/test_dataframe.py::test_select_dtypes[include3-None]",
"dask/dataframe/tests/test_dataframe.py::test_to_timestamp",
"dask/dataframe/tests/test_dataframe.py::test_apply",
"dask/dataframe/tests/test_dataframe.py::test_cov_corr_mixed",
"dask/dataframe/tests/test_dataframe.py::test_apply_infer_columns",
"dask/dataframe/tests/test_dataframe.py::test_info",
"dask/dataframe/tests/test_dataframe.py::test_groupby_multilevel_info",
"dask/dataframe/tests/test_dataframe.py::test_categorize_info",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx2-True]",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx2-False]",
"dask/dataframe/tests/test_dataframe.py::test_shift",
"dask/dataframe/tests/test_dataframe.py::test_shift_with_freq",
"dask/dataframe/tests/test_dataframe.py::test_first_and_last[first]",
"dask/dataframe/tests/test_dataframe.py::test_first_and_last[last]",
"dask/dataframe/tests/test_dataframe.py::test_datetime_loc_open_slicing"
]
| [
"dask/array/tests/test_routines.py::test_array",
"dask/array/tests/test_routines.py::test_atleast_nd_no_args[atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_no_args[atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_no_args[atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape0-chunks0-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape0-chunks0-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape0-chunks0-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape1-chunks1-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape1-chunks1-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape1-chunks1-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape2-chunks2-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape2-chunks2-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape2-chunks2-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape3-chunks3-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape3-chunks3-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape3-chunks3-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape4-chunks4-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape4-chunks4-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape4-chunks4-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape10-shape20-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape10-shape20-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape10-shape20-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape11-shape21-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape11-shape21-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape11-shape21-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape12-shape22-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape12-shape22-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape12-shape22-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape13-shape23-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape13-shape23-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape13-shape23-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape14-shape24-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape14-shape24-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape14-shape24-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape15-shape25-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape15-shape25-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape15-shape25-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape16-shape26-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape16-shape26-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape16-shape26-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape17-shape27-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape17-shape27-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape17-shape27-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape18-shape28-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape18-shape28-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape18-shape28-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape19-shape29-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape19-shape29-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape19-shape29-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape110-shape210-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape110-shape210-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape110-shape210-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape111-shape211-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape111-shape211-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape111-shape211-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape112-shape212-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape112-shape212-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape112-shape212-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape113-shape213-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape113-shape213-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape113-shape213-atleast_3d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape114-shape214-atleast_1d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape114-shape214-atleast_2d]",
"dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape114-shape214-atleast_3d]",
"dask/array/tests/test_routines.py::test_transpose",
"dask/array/tests/test_routines.py::test_transpose_negative_axes",
"dask/array/tests/test_routines.py::test_swapaxes",
"dask/array/tests/test_routines.py::test_flip[shape0-flipud-kwargs0]",
"dask/array/tests/test_routines.py::test_flip[shape0-fliplr-kwargs1]",
"dask/array/tests/test_routines.py::test_flip[shape0-flip-kwargs2]",
"dask/array/tests/test_routines.py::test_flip[shape0-flip-kwargs3]",
"dask/array/tests/test_routines.py::test_flip[shape0-flip-kwargs4]",
"dask/array/tests/test_routines.py::test_flip[shape0-flip-kwargs5]",
"dask/array/tests/test_routines.py::test_flip[shape1-flipud-kwargs0]",
"dask/array/tests/test_routines.py::test_flip[shape1-fliplr-kwargs1]",
"dask/array/tests/test_routines.py::test_flip[shape1-flip-kwargs2]",
"dask/array/tests/test_routines.py::test_flip[shape1-flip-kwargs3]",
"dask/array/tests/test_routines.py::test_flip[shape1-flip-kwargs4]",
"dask/array/tests/test_routines.py::test_flip[shape1-flip-kwargs5]",
"dask/array/tests/test_routines.py::test_flip[shape2-flipud-kwargs0]",
"dask/array/tests/test_routines.py::test_flip[shape2-fliplr-kwargs1]",
"dask/array/tests/test_routines.py::test_flip[shape2-flip-kwargs2]",
"dask/array/tests/test_routines.py::test_flip[shape2-flip-kwargs3]",
"dask/array/tests/test_routines.py::test_flip[shape2-flip-kwargs4]",
"dask/array/tests/test_routines.py::test_flip[shape2-flip-kwargs5]",
"dask/array/tests/test_routines.py::test_flip[shape3-flipud-kwargs0]",
"dask/array/tests/test_routines.py::test_flip[shape3-fliplr-kwargs1]",
"dask/array/tests/test_routines.py::test_flip[shape3-flip-kwargs2]",
"dask/array/tests/test_routines.py::test_flip[shape3-flip-kwargs3]",
"dask/array/tests/test_routines.py::test_flip[shape3-flip-kwargs4]",
"dask/array/tests/test_routines.py::test_flip[shape3-flip-kwargs5]",
"dask/array/tests/test_routines.py::test_flip[shape4-flipud-kwargs0]",
"dask/array/tests/test_routines.py::test_flip[shape4-fliplr-kwargs1]",
"dask/array/tests/test_routines.py::test_flip[shape4-flip-kwargs2]",
"dask/array/tests/test_routines.py::test_flip[shape4-flip-kwargs3]",
"dask/array/tests/test_routines.py::test_flip[shape4-flip-kwargs4]",
"dask/array/tests/test_routines.py::test_flip[shape4-flip-kwargs5]",
"dask/array/tests/test_routines.py::test_matmul[x_shape0-y_shape0]",
"dask/array/tests/test_routines.py::test_matmul[x_shape1-y_shape1]",
"dask/array/tests/test_routines.py::test_matmul[x_shape2-y_shape2]",
"dask/array/tests/test_routines.py::test_matmul[x_shape3-y_shape3]",
"dask/array/tests/test_routines.py::test_matmul[x_shape4-y_shape4]",
"dask/array/tests/test_routines.py::test_matmul[x_shape5-y_shape5]",
"dask/array/tests/test_routines.py::test_matmul[x_shape6-y_shape6]",
"dask/array/tests/test_routines.py::test_matmul[x_shape7-y_shape7]",
"dask/array/tests/test_routines.py::test_matmul[x_shape8-y_shape8]",
"dask/array/tests/test_routines.py::test_matmul[x_shape9-y_shape9]",
"dask/array/tests/test_routines.py::test_matmul[x_shape10-y_shape10]",
"dask/array/tests/test_routines.py::test_matmul[x_shape11-y_shape11]",
"dask/array/tests/test_routines.py::test_matmul[x_shape12-y_shape12]",
"dask/array/tests/test_routines.py::test_matmul[x_shape13-y_shape13]",
"dask/array/tests/test_routines.py::test_matmul[x_shape14-y_shape14]",
"dask/array/tests/test_routines.py::test_matmul[x_shape15-y_shape15]",
"dask/array/tests/test_routines.py::test_matmul[x_shape16-y_shape16]",
"dask/array/tests/test_routines.py::test_matmul[x_shape17-y_shape17]",
"dask/array/tests/test_routines.py::test_matmul[x_shape18-y_shape18]",
"dask/array/tests/test_routines.py::test_matmul[x_shape19-y_shape19]",
"dask/array/tests/test_routines.py::test_matmul[x_shape20-y_shape20]",
"dask/array/tests/test_routines.py::test_matmul[x_shape21-y_shape21]",
"dask/array/tests/test_routines.py::test_matmul[x_shape22-y_shape22]",
"dask/array/tests/test_routines.py::test_matmul[x_shape23-y_shape23]",
"dask/array/tests/test_routines.py::test_matmul[x_shape24-y_shape24]",
"dask/array/tests/test_routines.py::test_tensordot",
"dask/array/tests/test_routines.py::test_tensordot_2[0]",
"dask/array/tests/test_routines.py::test_tensordot_2[1]",
"dask/array/tests/test_routines.py::test_tensordot_2[axes2]",
"dask/array/tests/test_routines.py::test_tensordot_2[axes3]",
"dask/array/tests/test_routines.py::test_tensordot_2[axes4]",
"dask/array/tests/test_routines.py::test_tensordot_2[axes5]",
"dask/array/tests/test_routines.py::test_tensordot_2[axes6]",
"dask/array/tests/test_routines.py::test_dot_method",
"dask/array/tests/test_routines.py::test_vdot[shape0-chunks0]",
"dask/array/tests/test_routines.py::test_vdot[shape1-chunks1]",
"dask/array/tests/test_routines.py::test_apply_along_axis[shape0-0-ndim-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_along_axis[shape0-0-sum-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_along_axis[shape0-0-range-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_along_axis[shape0-0-range2-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_along_axis[shape1-1-ndim-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_along_axis[shape1-1-sum-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_along_axis[shape1-1-range-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_along_axis[shape1-1-range2-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_along_axis[shape2-2-ndim-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_along_axis[shape2-2-sum-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_along_axis[shape2-2-range-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_along_axis[shape2-2-range2-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_along_axis[shape3--1-ndim-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_along_axis[shape3--1-sum-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_along_axis[shape3--1-range-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_along_axis[shape3--1-range2-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_over_axes[shape0-axes0-sum0-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_over_axes[shape0-axes0-sum1-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_over_axes[shape0-axes0-range-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_over_axes[shape1-0-sum0-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_over_axes[shape1-0-sum1-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_over_axes[shape1-0-range-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_over_axes[shape2-axes2-sum0-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_over_axes[shape2-axes2-sum1-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_over_axes[shape2-axes2-range-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_over_axes[shape3-axes3-sum0-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_over_axes[shape3-axes3-sum1-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_over_axes[shape3-axes3-range-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_over_axes[shape4-axes4-sum0-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_over_axes[shape4-axes4-sum1-<lambda>]",
"dask/array/tests/test_routines.py::test_apply_over_axes[shape4-axes4-range-<lambda>]",
"dask/array/tests/test_routines.py::test_ptp[shape0-None]",
"dask/array/tests/test_routines.py::test_ptp[shape1-0]",
"dask/array/tests/test_routines.py::test_ptp[shape2-1]",
"dask/array/tests/test_routines.py::test_ptp[shape3-2]",
"dask/array/tests/test_routines.py::test_ptp[shape4--1]",
"dask/array/tests/test_routines.py::test_diff[0-shape0-0]",
"dask/array/tests/test_routines.py::test_diff[0-shape1-1]",
"dask/array/tests/test_routines.py::test_diff[0-shape2-2]",
"dask/array/tests/test_routines.py::test_diff[0-shape3--1]",
"dask/array/tests/test_routines.py::test_diff[1-shape0-0]",
"dask/array/tests/test_routines.py::test_diff[1-shape1-1]",
"dask/array/tests/test_routines.py::test_diff[1-shape2-2]",
"dask/array/tests/test_routines.py::test_diff[1-shape3--1]",
"dask/array/tests/test_routines.py::test_diff[2-shape0-0]",
"dask/array/tests/test_routines.py::test_diff[2-shape1-1]",
"dask/array/tests/test_routines.py::test_diff[2-shape2-2]",
"dask/array/tests/test_routines.py::test_diff[2-shape3--1]",
"dask/array/tests/test_routines.py::test_ediff1d[None-None-shape0]",
"dask/array/tests/test_routines.py::test_ediff1d[None-None-shape1]",
"dask/array/tests/test_routines.py::test_ediff1d[0-0-shape0]",
"dask/array/tests/test_routines.py::test_ediff1d[0-0-shape1]",
"dask/array/tests/test_routines.py::test_ediff1d[to_end2-to_begin2-shape0]",
"dask/array/tests/test_routines.py::test_ediff1d[to_end2-to_begin2-shape1]",
"dask/array/tests/test_routines.py::test_topk",
"dask/array/tests/test_routines.py::test_topk_k_bigger_than_chunk",
"dask/array/tests/test_routines.py::test_bincount",
"dask/array/tests/test_routines.py::test_bincount_with_weights",
"dask/array/tests/test_routines.py::test_bincount_raises_informative_error_on_missing_minlength_kwarg",
"dask/array/tests/test_routines.py::test_digitize",
"dask/array/tests/test_routines.py::test_histogram",
"dask/array/tests/test_routines.py::test_histogram_alternative_bins_range",
"dask/array/tests/test_routines.py::test_histogram_return_type",
"dask/array/tests/test_routines.py::test_histogram_extra_args_and_shapes",
"dask/array/tests/test_routines.py::test_cov",
"dask/array/tests/test_routines.py::test_corrcoef",
"dask/array/tests/test_routines.py::test_round",
"dask/array/tests/test_routines.py::test_unique_kwargs[False-False-False]",
"dask/array/tests/test_routines.py::test_unique_kwargs[False-False-True]",
"dask/array/tests/test_routines.py::test_unique_kwargs[False-True-False]",
"dask/array/tests/test_routines.py::test_unique_kwargs[False-True-True]",
"dask/array/tests/test_routines.py::test_unique_kwargs[True-False-False]",
"dask/array/tests/test_routines.py::test_unique_kwargs[True-False-True]",
"dask/array/tests/test_routines.py::test_unique_kwargs[True-True-False]",
"dask/array/tests/test_routines.py::test_unique_kwargs[True-True-True]",
"dask/array/tests/test_routines.py::test_unique_rand[shape0-chunks0-0-10-23]",
"dask/array/tests/test_routines.py::test_unique_rand[shape0-chunks0-0-10-796]",
"dask/array/tests/test_routines.py::test_unique_rand[shape1-chunks1-0-10-23]",
"dask/array/tests/test_routines.py::test_unique_rand[shape1-chunks1-0-10-796]",
"dask/array/tests/test_routines.py::test_unique_rand[shape2-chunks2-0-10-23]",
"dask/array/tests/test_routines.py::test_unique_rand[shape2-chunks2-0-10-796]",
"dask/array/tests/test_routines.py::test_unique_rand[shape3-chunks3-0-10-23]",
"dask/array/tests/test_routines.py::test_unique_rand[shape3-chunks3-0-10-796]",
"dask/array/tests/test_routines.py::test_roll[None-3-chunks0]",
"dask/array/tests/test_routines.py::test_roll[None-3-chunks1]",
"dask/array/tests/test_routines.py::test_roll[None-7-chunks0]",
"dask/array/tests/test_routines.py::test_roll[None-7-chunks1]",
"dask/array/tests/test_routines.py::test_roll[None-9-chunks0]",
"dask/array/tests/test_routines.py::test_roll[None-9-chunks1]",
"dask/array/tests/test_routines.py::test_roll[None-shift3-chunks0]",
"dask/array/tests/test_routines.py::test_roll[None-shift3-chunks1]",
"dask/array/tests/test_routines.py::test_roll[None-shift4-chunks0]",
"dask/array/tests/test_routines.py::test_roll[None-shift4-chunks1]",
"dask/array/tests/test_routines.py::test_roll[0-3-chunks0]",
"dask/array/tests/test_routines.py::test_roll[0-3-chunks1]",
"dask/array/tests/test_routines.py::test_roll[0-7-chunks0]",
"dask/array/tests/test_routines.py::test_roll[0-7-chunks1]",
"dask/array/tests/test_routines.py::test_roll[0-9-chunks0]",
"dask/array/tests/test_routines.py::test_roll[0-9-chunks1]",
"dask/array/tests/test_routines.py::test_roll[0-shift3-chunks0]",
"dask/array/tests/test_routines.py::test_roll[0-shift3-chunks1]",
"dask/array/tests/test_routines.py::test_roll[0-shift4-chunks0]",
"dask/array/tests/test_routines.py::test_roll[0-shift4-chunks1]",
"dask/array/tests/test_routines.py::test_roll[1-3-chunks0]",
"dask/array/tests/test_routines.py::test_roll[1-3-chunks1]",
"dask/array/tests/test_routines.py::test_roll[1-7-chunks0]",
"dask/array/tests/test_routines.py::test_roll[1-7-chunks1]",
"dask/array/tests/test_routines.py::test_roll[1-9-chunks0]",
"dask/array/tests/test_routines.py::test_roll[1-9-chunks1]",
"dask/array/tests/test_routines.py::test_roll[1-shift3-chunks0]",
"dask/array/tests/test_routines.py::test_roll[1-shift3-chunks1]",
"dask/array/tests/test_routines.py::test_roll[1-shift4-chunks0]",
"dask/array/tests/test_routines.py::test_roll[1-shift4-chunks1]",
"dask/array/tests/test_routines.py::test_roll[-1-3-chunks0]",
"dask/array/tests/test_routines.py::test_roll[-1-3-chunks1]",
"dask/array/tests/test_routines.py::test_roll[-1-7-chunks0]",
"dask/array/tests/test_routines.py::test_roll[-1-7-chunks1]",
"dask/array/tests/test_routines.py::test_roll[-1-9-chunks0]",
"dask/array/tests/test_routines.py::test_roll[-1-9-chunks1]",
"dask/array/tests/test_routines.py::test_roll[-1-shift3-chunks0]",
"dask/array/tests/test_routines.py::test_roll[-1-shift3-chunks1]",
"dask/array/tests/test_routines.py::test_roll[-1-shift4-chunks0]",
"dask/array/tests/test_routines.py::test_roll[-1-shift4-chunks1]",
"dask/array/tests/test_routines.py::test_roll[axis4-3-chunks0]",
"dask/array/tests/test_routines.py::test_roll[axis4-3-chunks1]",
"dask/array/tests/test_routines.py::test_roll[axis4-7-chunks0]",
"dask/array/tests/test_routines.py::test_roll[axis4-7-chunks1]",
"dask/array/tests/test_routines.py::test_roll[axis4-9-chunks0]",
"dask/array/tests/test_routines.py::test_roll[axis4-9-chunks1]",
"dask/array/tests/test_routines.py::test_roll[axis4-shift3-chunks0]",
"dask/array/tests/test_routines.py::test_roll[axis4-shift3-chunks1]",
"dask/array/tests/test_routines.py::test_roll[axis4-shift4-chunks0]",
"dask/array/tests/test_routines.py::test_roll[axis4-shift4-chunks1]",
"dask/array/tests/test_routines.py::test_roll[axis5-3-chunks0]",
"dask/array/tests/test_routines.py::test_roll[axis5-3-chunks1]",
"dask/array/tests/test_routines.py::test_roll[axis5-7-chunks0]",
"dask/array/tests/test_routines.py::test_roll[axis5-7-chunks1]",
"dask/array/tests/test_routines.py::test_roll[axis5-9-chunks0]",
"dask/array/tests/test_routines.py::test_roll[axis5-9-chunks1]",
"dask/array/tests/test_routines.py::test_roll[axis5-shift3-chunks0]",
"dask/array/tests/test_routines.py::test_roll[axis5-shift3-chunks1]",
"dask/array/tests/test_routines.py::test_roll[axis5-shift4-chunks0]",
"dask/array/tests/test_routines.py::test_roll[axis5-shift4-chunks1]",
"dask/array/tests/test_routines.py::test_ravel",
"dask/array/tests/test_routines.py::test_squeeze[None-True]",
"dask/array/tests/test_routines.py::test_squeeze[None-False]",
"dask/array/tests/test_routines.py::test_squeeze[0-True]",
"dask/array/tests/test_routines.py::test_squeeze[0-False]",
"dask/array/tests/test_routines.py::test_squeeze[-1-True]",
"dask/array/tests/test_routines.py::test_squeeze[-1-False]",
"dask/array/tests/test_routines.py::test_squeeze[axis3-True]",
"dask/array/tests/test_routines.py::test_squeeze[axis3-False]",
"dask/array/tests/test_routines.py::test_vstack",
"dask/array/tests/test_routines.py::test_hstack",
"dask/array/tests/test_routines.py::test_dstack",
"dask/array/tests/test_routines.py::test_take",
"dask/array/tests/test_routines.py::test_take_dask_from_numpy",
"dask/array/tests/test_routines.py::test_compress",
"dask/array/tests/test_routines.py::test_extract",
"dask/array/tests/test_routines.py::test_isnull",
"dask/array/tests/test_routines.py::test_isclose",
"dask/array/tests/test_routines.py::test_allclose",
"dask/array/tests/test_routines.py::test_choose",
"dask/array/tests/test_routines.py::test_piecewise",
"dask/array/tests/test_routines.py::test_piecewise_otherwise",
"dask/array/tests/test_routines.py::test_argwhere",
"dask/array/tests/test_routines.py::test_argwhere_obj",
"dask/array/tests/test_routines.py::test_argwhere_str",
"dask/array/tests/test_routines.py::test_where",
"dask/array/tests/test_routines.py::test_where_scalar_dtype",
"dask/array/tests/test_routines.py::test_where_bool_optimization",
"dask/array/tests/test_routines.py::test_where_nonzero",
"dask/array/tests/test_routines.py::test_where_incorrect_args",
"dask/array/tests/test_routines.py::test_count_nonzero",
"dask/array/tests/test_routines.py::test_count_nonzero_axis[None]",
"dask/array/tests/test_routines.py::test_count_nonzero_axis[0]",
"dask/array/tests/test_routines.py::test_count_nonzero_axis[axis2]",
"dask/array/tests/test_routines.py::test_count_nonzero_axis[axis3]",
"dask/array/tests/test_routines.py::test_count_nonzero_obj",
"dask/array/tests/test_routines.py::test_count_nonzero_obj_axis[None]",
"dask/array/tests/test_routines.py::test_count_nonzero_obj_axis[0]",
"dask/array/tests/test_routines.py::test_count_nonzero_obj_axis[axis2]",
"dask/array/tests/test_routines.py::test_count_nonzero_obj_axis[axis3]",
"dask/array/tests/test_routines.py::test_count_nonzero_str",
"dask/array/tests/test_routines.py::test_flatnonzero",
"dask/array/tests/test_routines.py::test_nonzero",
"dask/array/tests/test_routines.py::test_nonzero_method",
"dask/array/tests/test_routines.py::test_coarsen",
"dask/array/tests/test_routines.py::test_coarsen_with_excess",
"dask/array/tests/test_routines.py::test_insert",
"dask/array/tests/test_routines.py::test_multi_insert",
"dask/array/tests/test_routines.py::test_result_type",
"dask/dataframe/tests/test_dataframe.py::test_head_tail",
"dask/dataframe/tests/test_dataframe.py::test_head_npartitions",
"dask/dataframe/tests/test_dataframe.py::test_head_npartitions_warn",
"dask/dataframe/tests/test_dataframe.py::test_index_head",
"dask/dataframe/tests/test_dataframe.py::test_Series",
"dask/dataframe/tests/test_dataframe.py::test_Index",
"dask/dataframe/tests/test_dataframe.py::test_Scalar",
"dask/dataframe/tests/test_dataframe.py::test_column_names",
"dask/dataframe/tests/test_dataframe.py::test_index_names",
"dask/dataframe/tests/test_dataframe.py::test_timezone_freq[1]",
"dask/dataframe/tests/test_dataframe.py::test_rename_columns",
"dask/dataframe/tests/test_dataframe.py::test_rename_series",
"dask/dataframe/tests/test_dataframe.py::test_rename_series_method",
"dask/dataframe/tests/test_dataframe.py::test_describe",
"dask/dataframe/tests/test_dataframe.py::test_describe_empty",
"dask/dataframe/tests/test_dataframe.py::test_cumulative",
"dask/dataframe/tests/test_dataframe.py::test_dropna",
"dask/dataframe/tests/test_dataframe.py::test_where_mask",
"dask/dataframe/tests/test_dataframe.py::test_map_partitions_multi_argument",
"dask/dataframe/tests/test_dataframe.py::test_map_partitions",
"dask/dataframe/tests/test_dataframe.py::test_map_partitions_names",
"dask/dataframe/tests/test_dataframe.py::test_map_partitions_column_info",
"dask/dataframe/tests/test_dataframe.py::test_map_partitions_method_names",
"dask/dataframe/tests/test_dataframe.py::test_map_partitions_keeps_kwargs_readable",
"dask/dataframe/tests/test_dataframe.py::test_metadata_inference_single_partition_aligned_args",
"dask/dataframe/tests/test_dataframe.py::test_drop_duplicates",
"dask/dataframe/tests/test_dataframe.py::test_drop_duplicates_subset",
"dask/dataframe/tests/test_dataframe.py::test_get_partition",
"dask/dataframe/tests/test_dataframe.py::test_ndim",
"dask/dataframe/tests/test_dataframe.py::test_dtype",
"dask/dataframe/tests/test_dataframe.py::test_value_counts",
"dask/dataframe/tests/test_dataframe.py::test_unique",
"dask/dataframe/tests/test_dataframe.py::test_isin",
"dask/dataframe/tests/test_dataframe.py::test_len",
"dask/dataframe/tests/test_dataframe.py::test_size",
"dask/dataframe/tests/test_dataframe.py::test_nbytes",
"dask/dataframe/tests/test_dataframe.py::test_quantile",
"dask/dataframe/tests/test_dataframe.py::test_quantile_missing",
"dask/dataframe/tests/test_dataframe.py::test_empty_quantile",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_quantile",
"dask/dataframe/tests/test_dataframe.py::test_index",
"dask/dataframe/tests/test_dataframe.py::test_assign",
"dask/dataframe/tests/test_dataframe.py::test_map",
"dask/dataframe/tests/test_dataframe.py::test_concat",
"dask/dataframe/tests/test_dataframe.py::test_args",
"dask/dataframe/tests/test_dataframe.py::test_known_divisions",
"dask/dataframe/tests/test_dataframe.py::test_unknown_divisions",
"dask/dataframe/tests/test_dataframe.py::test_align[inner]",
"dask/dataframe/tests/test_dataframe.py::test_align[outer]",
"dask/dataframe/tests/test_dataframe.py::test_align[left]",
"dask/dataframe/tests/test_dataframe.py::test_align[right]",
"dask/dataframe/tests/test_dataframe.py::test_align_axis[inner]",
"dask/dataframe/tests/test_dataframe.py::test_align_axis[outer]",
"dask/dataframe/tests/test_dataframe.py::test_align_axis[left]",
"dask/dataframe/tests/test_dataframe.py::test_align_axis[right]",
"dask/dataframe/tests/test_dataframe.py::test_combine",
"dask/dataframe/tests/test_dataframe.py::test_combine_first",
"dask/dataframe/tests/test_dataframe.py::test_random_partitions",
"dask/dataframe/tests/test_dataframe.py::test_series_round",
"dask/dataframe/tests/test_dataframe.py::test_repartition_divisions",
"dask/dataframe/tests/test_dataframe.py::test_repartition_on_pandas_dataframe",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions_same_limits",
"dask/dataframe/tests/test_dataframe.py::test_repartition_object_index",
"dask/dataframe/tests/test_dataframe.py::test_repartition_freq_errors",
"dask/dataframe/tests/test_dataframe.py::test_embarrassingly_parallel_operations",
"dask/dataframe/tests/test_dataframe.py::test_fillna",
"dask/dataframe/tests/test_dataframe.py::test_fillna_multi_dataframe",
"dask/dataframe/tests/test_dataframe.py::test_ffill_bfill",
"dask/dataframe/tests/test_dataframe.py::test_fillna_series_types",
"dask/dataframe/tests/test_dataframe.py::test_sample",
"dask/dataframe/tests/test_dataframe.py::test_sample_without_replacement",
"dask/dataframe/tests/test_dataframe.py::test_datetime_accessor",
"dask/dataframe/tests/test_dataframe.py::test_str_accessor",
"dask/dataframe/tests/test_dataframe.py::test_empty_max",
"dask/dataframe/tests/test_dataframe.py::test_deterministic_apply_concat_apply_names",
"dask/dataframe/tests/test_dataframe.py::test_aca_meta_infer",
"dask/dataframe/tests/test_dataframe.py::test_aca_split_every",
"dask/dataframe/tests/test_dataframe.py::test_reduction_method",
"dask/dataframe/tests/test_dataframe.py::test_reduction_method_split_every",
"dask/dataframe/tests/test_dataframe.py::test_pipe",
"dask/dataframe/tests/test_dataframe.py::test_gh_517",
"dask/dataframe/tests/test_dataframe.py::test_drop_axis_1",
"dask/dataframe/tests/test_dataframe.py::test_gh580",
"dask/dataframe/tests/test_dataframe.py::test_rename_dict",
"dask/dataframe/tests/test_dataframe.py::test_rename_function",
"dask/dataframe/tests/test_dataframe.py::test_rename_index",
"dask/dataframe/tests/test_dataframe.py::test_to_frame",
"dask/dataframe/tests/test_dataframe.py::test_apply_warns",
"dask/dataframe/tests/test_dataframe.py::test_applymap",
"dask/dataframe/tests/test_dataframe.py::test_abs",
"dask/dataframe/tests/test_dataframe.py::test_round",
"dask/dataframe/tests/test_dataframe.py::test_cov",
"dask/dataframe/tests/test_dataframe.py::test_corr",
"dask/dataframe/tests/test_dataframe.py::test_cov_corr_meta",
"dask/dataframe/tests/test_dataframe.py::test_autocorr",
"dask/dataframe/tests/test_dataframe.py::test_index_time_properties",
"dask/dataframe/tests/test_dataframe.py::test_nlargest_nsmallest",
"dask/dataframe/tests/test_dataframe.py::test_reset_index",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_compute_forward_kwargs",
"dask/dataframe/tests/test_dataframe.py::test_series_iteritems",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_iterrows",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_itertuples",
"dask/dataframe/tests/test_dataframe.py::test_astype",
"dask/dataframe/tests/test_dataframe.py::test_astype_categoricals",
"dask/dataframe/tests/test_dataframe.py::test_astype_categoricals_known",
"dask/dataframe/tests/test_dataframe.py::test_groupby_callable",
"dask/dataframe/tests/test_dataframe.py::test_methods_tokenize_differently",
"dask/dataframe/tests/test_dataframe.py::test_gh_1301",
"dask/dataframe/tests/test_dataframe.py::test_timeseries_sorted",
"dask/dataframe/tests/test_dataframe.py::test_column_assignment",
"dask/dataframe/tests/test_dataframe.py::test_columns_assignment",
"dask/dataframe/tests/test_dataframe.py::test_attribute_assignment",
"dask/dataframe/tests/test_dataframe.py::test_setitem_triggering_realign",
"dask/dataframe/tests/test_dataframe.py::test_inplace_operators",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx0-True]",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx0-False]",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx1-True]",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx1-False]",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin_empty_partitions",
"dask/dataframe/tests/test_dataframe.py::test_getitem_meta",
"dask/dataframe/tests/test_dataframe.py::test_getitem_multilevel",
"dask/dataframe/tests/test_dataframe.py::test_diff",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-2-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-2-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-2-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-5-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-5-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-5-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-2-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-2-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-2-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-5-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-5-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-5-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-2-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-2-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-2-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-5-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-5-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-5-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-2-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-2-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-2-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-5-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-5-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-5-20]",
"dask/dataframe/tests/test_dataframe.py::test_split_out_drop_duplicates[None]",
"dask/dataframe/tests/test_dataframe.py::test_split_out_drop_duplicates[2]",
"dask/dataframe/tests/test_dataframe.py::test_split_out_value_counts[None]",
"dask/dataframe/tests/test_dataframe.py::test_split_out_value_counts[2]",
"dask/dataframe/tests/test_dataframe.py::test_values",
"dask/dataframe/tests/test_dataframe.py::test_copy",
"dask/dataframe/tests/test_dataframe.py::test_del",
"dask/dataframe/tests/test_dataframe.py::test_memory_usage[True-True]",
"dask/dataframe/tests/test_dataframe.py::test_memory_usage[True-False]",
"dask/dataframe/tests/test_dataframe.py::test_memory_usage[False-True]",
"dask/dataframe/tests/test_dataframe.py::test_memory_usage[False-False]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[sum]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[mean]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[std]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[var]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[count]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[min]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[max]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[idxmin]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[idxmax]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[prod]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[all]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[sem]",
"dask/dataframe/tests/test_dataframe.py::test_to_datetime",
"dask/dataframe/tests/test_dataframe.py::test_to_timedelta",
"dask/dataframe/tests/test_dataframe.py::test_isna[values0]",
"dask/dataframe/tests/test_dataframe.py::test_isna[values1]",
"dask/dataframe/tests/test_dataframe.py::test_slice_on_filtered_boundary[0]",
"dask/dataframe/tests/test_dataframe.py::test_slice_on_filtered_boundary[9]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_nonmonotonic",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[-1-None-False-False-drop0]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[-1-None-False-True-drop1]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-3-False-False-drop2]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-3-True-False-drop3]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[-0.5-None-False-False-drop4]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[-0.5-None-False-True-drop5]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[-1.5-None-False-True-drop6]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-3.5-False-False-drop7]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-3.5-True-False-drop8]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-2.5-False-False-drop9]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index0-0-9]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index1--1-None]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index2-None-10]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index3-None-None]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index4--1-None]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index5-None-2]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index6--2-3]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index7-None-None]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index8-left8-None]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index9-None-right9]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index10-left10-None]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index11-None-right11]",
"dask/dataframe/tests/test_dataframe.py::test_better_errors_object_reductions",
"dask/dataframe/tests/test_dataframe.py::test_sample_empty_partitions",
"dask/dataframe/tests/test_dataframe.py::test_coerce",
"dask/dataframe/tests/test_dataframe.py::test_bool",
"dask/dataframe/tests/test_dataframe.py::test_cumulative_multiple_columns",
"dask/dataframe/tests/test_dataframe.py::test_map_partition_array[asarray]",
"dask/dataframe/tests/test_dataframe.py::test_map_partition_array[func1]",
"dask/dataframe/tests/test_dataframe.py::test_mixed_dask_array_operations",
"dask/dataframe/tests/test_dataframe.py::test_mixed_dask_array_operations_errors",
"dask/dataframe/tests/test_dataframe.py::test_mixed_dask_array_multi_dimensional"
]
| []
| BSD 3-Clause "New" or "Revised" License | 2,341 | [
"dask/dataframe/core.py",
"docs/source/changelog.rst",
"dask/delayed.py",
"dask/dataframe/utils.py"
]
| [
"dask/dataframe/core.py",
"docs/source/changelog.rst",
"dask/delayed.py",
"dask/dataframe/utils.py"
]
|
yevhen-m__flake8-fancy-header-6 | 97fa7a93e2c37119f7e78bda4385366e1f8249a1 | 2018-03-29 13:07:17 | 97fa7a93e2c37119f7e78bda4385366e1f8249a1 | diff --git a/flake8_fancy_header/checker.py b/flake8_fancy_header/checker.py
index eaa9baa..94717b9 100644
--- a/flake8_fancy_header/checker.py
+++ b/flake8_fancy_header/checker.py
@@ -60,9 +60,6 @@ class FancyHeaderCheckerBefore37(BaseChecker):
def run(self):
body = self.tree.body
if not body:
- yield (
- 1, 1, self.message_missing, type(self),
- )
return
if not isinstance(body[0], ast.Expr):
@@ -87,6 +84,9 @@ class FancyHeaderChecker(BaseChecker):
def run(self):
docstring = self.tree.docstring
if not docstring:
+ if not self.tree.body:
+ return
+
yield (
1, 1, self.message_missing, type(self),
)
| Header in empty file
I think plugin should allow empty files without header.
It will be great if you change current behavior | yevhen-m/flake8-fancy-header | diff --git a/tests/test_checker.py b/tests/test_checker.py
index b229032..3eb8bfd 100644
--- a/tests/test_checker.py
+++ b/tests/test_checker.py
@@ -23,7 +23,11 @@ class CheckerTestCase(unittest.TestCase):
def test_empty_module(self):
module = ast.parse('')
checker = FancyHeaderChecker(tree=module, filename='spam.py')
- self.assertEqual(len(list(checker.run())), 1)
+ self.assertEqual(len(list(checker.run())), 0)
+
+ module = ast.parse('\n\n\n')
+ checker = FancyHeaderChecker(tree=module, filename='spam.py')
+ self.assertEqual(len(list(checker.run())), 0)
def test_module_with_docstring(self):
module = ast.parse('"Spam module."')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
flake8==5.0.4
-e git+https://github.com/yevhen-m/flake8-fancy-header.git@97fa7a93e2c37119f7e78bda4385366e1f8249a1#egg=flake8_fancy_header
importlib-metadata==4.2.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: flake8-fancy-header
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- flake8==5.0.4
- importlib-metadata==4.2.0
- mccabe==0.7.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
prefix: /opt/conda/envs/flake8-fancy-header
| [
"tests/test_checker.py::CheckerTestCase::test_empty_module"
]
| []
| [
"tests/test_checker.py::CheckerTestCase::test_checker_with_absolute_filename",
"tests/test_checker.py::CheckerTestCase::test_checker_with_absolute_filename_and_submodule",
"tests/test_checker.py::CheckerTestCase::test_module_with_docstring",
"tests/test_checker.py::CheckerTestCase::test_module_with_invalid_header",
"tests/test_checker.py::CheckerTestCase::test_module_with_relative_filename",
"tests/test_checker.py::CheckerTestCase::test_module_with_valid_header",
"tests/test_checker.py::CheckerTestCase::test_module_with_valid_header_2",
"tests/test_checker.py::CheckerTestCase::test_module_with_valid_header_and_leading_comment",
"tests/test_checker.py::CheckerTestCase::test_module_with_valid_header_for_init_module",
"tests/test_checker.py::CheckerTestCase::test_module_without_docstring",
"tests/test_checker.py::CheckerTestCase::test_valid_header_in_submodule",
"tests/test_checker.py::CheckerTestCase1::test_checker_not_from_project_root"
]
| []
| MIT License | 2,343 | [
"flake8_fancy_header/checker.py"
]
| [
"flake8_fancy_header/checker.py"
]
|
|
python-trio__outcome-6 | 43ef9ae8870c6a9e1ad2762b3284d417f21e2d75 | 2018-03-29 21:03:40 | 43ef9ae8870c6a9e1ad2762b3284d417f21e2d75 | diff --git a/docs/source/tutorial.rst b/docs/source/tutorial.rst
index 74942ae..bd61a13 100644
--- a/docs/source/tutorial.rst
+++ b/docs/source/tutorial.rst
@@ -7,22 +7,22 @@ Tutorial
Outcome provides a function for capturing the outcome of a Python
function call, so that it can be passed around. The basic rule is::
- result = outcome.capture(f, *args)
+ result = outcome.capture(f, *args, **kwargs)
x = result.unwrap()
is the same as::
- x = f(*args)
+ x = f(*args, **kwargs)
even if ``f`` raises an error.
On Python 3.5+, there's also :func:`acapture`::
- result = await outcome.acapture(f, *args)
+ result = await outcome.acapture(f, *args, **kwargs)
x = result.unwrap()
which, like before, is the same as::
- x = await f(*args)
+ x = await f(*args, **kwargs)
See the :ref:`api-reference` for the types involved.
diff --git a/src/outcome/_async.py b/src/outcome/_async.py
index dabc726..293e5c0 100644
--- a/src/outcome/_async.py
+++ b/src/outcome/_async.py
@@ -7,8 +7,8 @@ from ._sync import (
__all__ = ['Error', 'Outcome', 'Value', 'acapture', 'capture']
-def capture(sync_fn, *args):
- """Run ``sync_fn(*args)`` and capture the result.
+def capture(sync_fn, *args, **kwargs):
+ """Run ``sync_fn(*args, **kwargs)`` and capture the result.
Returns:
Either a :class:`Value` or :class:`Error` as appropriate.
@@ -16,20 +16,20 @@ def capture(sync_fn, *args):
"""
# _sync.capture references ErrorBase and ValueBase
try:
- return Value(sync_fn(*args))
+ return Value(sync_fn(*args, **kwargs))
except BaseException as exc:
return Error(exc)
-async def acapture(async_fn, *args):
- """Run ``await async_fn(*args)`` and capture the result.
+async def acapture(async_fn, *args, **kwargs):
+ """Run ``await async_fn(*args, **kwargs)`` and capture the result.
Returns:
Either a :class:`Value` or :class:`Error` as appropriate.
"""
try:
- return Value(await async_fn(*args))
+ return Value(await async_fn(*args, **kwargs))
except BaseException as exc:
return Error(exc)
diff --git a/src/outcome/_sync.py b/src/outcome/_sync.py
index 5c06264..a74bbc3 100644
--- a/src/outcome/_sync.py
+++ b/src/outcome/_sync.py
@@ -9,15 +9,15 @@ from ._util import ABC
__all__ = ['Error', 'Outcome', 'Value', 'capture']
-def capture(sync_fn, *args):
- """Run ``sync_fn(*args)`` and capture the result.
+def capture(sync_fn, *args, **kwargs):
+ """Run ``sync_fn(*args, **kwargs)`` and capture the result.
Returns:
Either a :class:`Value` or :class:`Error` as appropriate.
"""
try:
- return Value(sync_fn(*args))
+ return Value(sync_fn(*args, **kwargs))
except BaseException as exc:
return Error(exc)
| Support **kwargs in capture/acapture?
I left this out of the original version of this code because in general in trio this is a complicated issue (https://github.com/python-trio/trio/issues/470), and I figured it was better to be consistent with other trio functions like `trio.run`, where we need to reserve kwargs for controlling the parent function. But now that it's a separate library, that doesn't really apply. And `capture` and `acapture` are obviously complete as they stand; there's no way we'll ever want to add any other arguments to them. | python-trio/outcome | diff --git a/tests/test_async.py b/tests/test_async.py
index 6166507..9dba615 100644
--- a/tests/test_async.py
+++ b/tests/test_async.py
@@ -11,11 +11,11 @@ pytestmark = pytest.mark.trio
async def test_acapture():
- async def return_arg(x):
+ async def add(x, y):
await trio.hazmat.checkpoint()
- return x
+ return x + y
- v = await outcome.acapture(return_arg, 7)
+ v = await outcome.acapture(add, 3, y=4)
assert v == Value(7)
async def raise_ValueError(x):
diff --git a/tests/test_sync.py b/tests/test_sync.py
index 50c9cc5..73900bb 100644
--- a/tests/test_sync.py
+++ b/tests/test_sync.py
@@ -77,12 +77,12 @@ def test_Value_compare():
def test_capture():
- def return_arg(x):
- return x
+ def add(x, y):
+ return x + y
- v = outcome.capture(return_arg, 2)
+ v = outcome.capture(add, 2, y=3)
assert type(v) == Value
- assert v.unwrap() == 2
+ assert v.unwrap() == 5
def raise_ValueError(x):
raise ValueError(x)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"trio",
"pytest-trio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | async-generator==1.10
attrs==22.2.0
certifi==2021.5.30
contextvars==2.4
coverage==6.2
idna==3.10
immutables==0.19
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/python-trio/outcome.git@43ef9ae8870c6a9e1ad2762b3284d417f21e2d75#egg=outcome
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-trio==0.7.0
sniffio==1.2.0
sortedcontainers==2.4.0
tomli==1.2.3
trio==0.19.0
typing_extensions==4.1.1
zipp==3.6.0
| name: outcome
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- async-generator==1.10
- attrs==22.2.0
- contextvars==2.4
- coverage==6.2
- idna==3.10
- immutables==0.19
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-trio==0.7.0
- sniffio==1.2.0
- sortedcontainers==2.4.0
- tomli==1.2.3
- trio==0.19.0
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/outcome
| [
"tests/test_sync.py::test_capture"
]
| [
"tests/test_async.py::test_acapture"
]
| [
"tests/test_async.py::test_asend",
"tests/test_sync.py::test_Outcome",
"tests/test_sync.py::test_Outcome_eq_hash",
"tests/test_sync.py::test_Value_compare"
]
| []
| MIT/Apache-2.0 Dual License | 2,344 | [
"src/outcome/_async.py",
"docs/source/tutorial.rst",
"src/outcome/_sync.py"
]
| [
"src/outcome/_async.py",
"docs/source/tutorial.rst",
"src/outcome/_sync.py"
]
|
|
Unidata__siphon-206 | e99953a89c35afd15c6e24a54fa0ebba4e927946 | 2018-03-29 22:04:23 | f2e5e13fc49bea29492e6dbf64b2d5110ce72c2b | diff --git a/siphon/simplewebservice/iastate.py b/siphon/simplewebservice/iastate.py
index f31d8cfa..cb931967 100644
--- a/siphon/simplewebservice/iastate.py
+++ b/siphon/simplewebservice/iastate.py
@@ -3,6 +3,7 @@
# SPDX-License-Identifier: BSD-3-Clause
"""Read upper air data from the IA State archives."""
+from datetime import datetime
import json
import warnings
@@ -64,9 +65,8 @@ class IAStateUpperAir(HTTPEndPoint):
"""
json_data = self._get_data_raw(time, site_id)
-
data = {}
- for pt in json_data:
+ for pt in json_data['profiles'][0]['profile']:
for field in ('drct', 'dwpc', 'hght', 'pres', 'sknt', 'tmpc'):
data.setdefault(field, []).append(np.nan if pt[field] is None else pt[field])
@@ -90,6 +90,10 @@ class IAStateUpperAir(HTTPEndPoint):
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
+ df['station'] = json_data['profiles'][0]['station']
+ df['time'] = datetime.strptime(json_data['profiles'][0]['valid'],
+ '%Y-%m-%dT%H:%M:%SZ')
+
# Add unit dictionary
df.units = {'pressure': 'hPa',
'height': 'meter',
@@ -98,8 +102,9 @@ class IAStateUpperAir(HTTPEndPoint):
'direction': 'degrees',
'speed': 'knot',
'u_wind': 'knot',
- 'v_wind': 'knot'}
-
+ 'v_wind': 'knot',
+ 'station': None,
+ 'time': None}
return df
def _get_data_raw(self, time, site_id):
@@ -119,10 +124,10 @@ class IAStateUpperAir(HTTPEndPoint):
"""
path = ('raob.py?ts={time:%Y%m%d%H}00&station={stid}').format(time=time, stid=site_id)
resp = self.get_path(path)
- json_data = json.loads(resp.text)['profiles'][0]['profile']
+ json_data = json.loads(resp.text)
# See if the return is valid, but has no data
- if not json_data:
+ if not json_data['profiles'][0]['profile']:
raise ValueError('No data available for {time:%Y-%m-%d %HZ} '
'for station {stid}.'.format(time=time, stid=site_id))
return json_data
diff --git a/siphon/simplewebservice/wyoming.py b/siphon/simplewebservice/wyoming.py
index e2d9e4b7..48d0907f 100644
--- a/siphon/simplewebservice/wyoming.py
+++ b/siphon/simplewebservice/wyoming.py
@@ -3,6 +3,7 @@
# SPDX-License-Identifier: BSD-3-Clause
"""Read upper air data from the Wyoming archives."""
+from datetime import datetime
from io import StringIO
import warnings
@@ -69,8 +70,10 @@ class WyomingUpperAir(HTTPEndPoint):
"""
raw_data = self._get_data_raw(time, site_id, region)
+ soup = BeautifulSoup(raw_data, 'html.parser')
+ tabular_data = StringIO(soup.find_all('pre')[0].contents[0])
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
- df = pd.read_fwf(raw_data, skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
+ df = pd.read_fwf(tabular_data, skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = get_wind_components(df['speed'],
np.deg2rad(df['direction']))
@@ -78,6 +81,23 @@ class WyomingUpperAir(HTTPEndPoint):
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
+ # Parse metadata
+ meta_data = soup.find_all('pre')[1].contents[0]
+ lines = meta_data.splitlines()
+ station = lines[1].split(':')[1].strip()
+ station_number = int(lines[2].split(':')[1].strip())
+ sounding_time = datetime.strptime(lines[3].split(':')[1].strip(), '%y%m%d/%H%M')
+ latitude = float(lines[4].split(':')[1].strip())
+ longitude = float(lines[5].split(':')[1].strip())
+ elevation = float(lines[6].split(':')[1].strip())
+
+ df['station'] = station
+ df['station_number'] = station_number
+ df['time'] = sounding_time
+ df['latitude'] = latitude
+ df['longitude'] = longitude
+ df['elevation'] = elevation
+
# Add unit dictionary
df.units = {'pressure': 'hPa',
'height': 'meter',
@@ -86,7 +106,13 @@ class WyomingUpperAir(HTTPEndPoint):
'direction': 'degrees',
'speed': 'knot',
'u_wind': 'knot',
- 'v_wind': 'knot'}
+ 'v_wind': 'knot',
+ 'station': None,
+ 'station_number': None,
+ 'time': None,
+ 'latitude': 'degrees',
+ 'longitude': 'degrees',
+ 'elevation': 'meter'}
return df
def _get_data_raw(self, time, site_id, region='naconf'):
@@ -103,7 +129,7 @@ class WyomingUpperAir(HTTPEndPoint):
Returns
-------
- a file-like object from which to read the data
+ text of the server response
"""
path = ('?region={region}&TYPE=TEXT%3ALIST'
@@ -118,5 +144,4 @@ class WyomingUpperAir(HTTPEndPoint):
'for station {stid}.'.format(time=time, region=region,
stid=site_id))
- soup = BeautifulSoup(resp.text, 'html.parser')
- return StringIO(soup.find_all('pre')[0].contents[0])
+ return resp.text
| Add time to upper air data
Current the returned data does not have a column for time, since this isn't provided as a column in the output--but it *is* in the text header. It would be nice to parse this and add as a column so that we have a self-contained dataset. It's probably also needed when we try to handle requesting and returning data for multiple times. | Unidata/siphon | diff --git a/siphon/tests/test_iastate.py b/siphon/tests/test_iastate.py
index c5d038f6..ca7448d2 100644
--- a/siphon/tests/test_iastate.py
+++ b/siphon/tests/test_iastate.py
@@ -19,6 +19,9 @@ def test_iastate():
"""Test that we are properly parsing data from the Iowa State archive."""
df = IAStateUpperAir.request_data(datetime(1999, 5, 4, 0), 'OUN')
+ assert(df['time'][0] == datetime(1999, 5, 4, 0))
+ assert(df['station'][0] == 'KOUN')
+
assert_almost_equal(df['pressure'][6], 872.7, 2)
assert_almost_equal(df['height'][6], 1172.0, 2)
assert_almost_equal(df['temperature'][6], 18.2, 2)
@@ -36,6 +39,8 @@ def test_iastate():
assert(df.units['v_wind'] == 'knot')
assert(df.units['speed'] == 'knot')
assert(df.units['direction'] == 'degrees')
+ assert(df.units['station'] is None)
+ assert(df.units['time'] is None)
@recorder.use_cassette('iastate_high_alt_sounding')
@@ -43,6 +48,9 @@ def test_high_alt_iastate():
"""Test Iowa State data that starts at pressure less than 925 hPa."""
df = IAStateUpperAir.request_data(datetime(2010, 12, 9, 12), 'BOI')
+ assert(df['time'][0] == datetime(2010, 12, 9, 12))
+ assert(df['station'][0] == 'KBOI')
+
assert_almost_equal(df['pressure'][0], 919.0, 2)
assert_almost_equal(df['height'][0], 871.0, 2)
assert_almost_equal(df['temperature'][0], -0.1, 2)
@@ -51,3 +59,14 @@ def test_high_alt_iastate():
assert_almost_equal(df['v_wind'][0], 1.500, 2)
assert_almost_equal(df['speed'][0], 3.0, 1)
assert_almost_equal(df['direction'][0], 240.0, 1)
+
+ assert(df.units['pressure'] == 'hPa')
+ assert(df.units['height'] == 'meter')
+ assert(df.units['temperature'] == 'degC')
+ assert(df.units['dewpoint'] == 'degC')
+ assert(df.units['u_wind'] == 'knot')
+ assert(df.units['v_wind'] == 'knot')
+ assert(df.units['speed'] == 'knot')
+ assert(df.units['direction'] == 'degrees')
+ assert(df.units['station'] is None)
+ assert(df.units['time'] is None)
diff --git a/siphon/tests/test_wyoming.py b/siphon/tests/test_wyoming.py
index 9675cd28..01ba59ff 100644
--- a/siphon/tests/test_wyoming.py
+++ b/siphon/tests/test_wyoming.py
@@ -19,6 +19,13 @@ def test_wyoming():
"""Test that we are properly parsing data from the Wyoming archive."""
df = WyomingUpperAir.request_data(datetime(1999, 5, 4, 0), 'OUN')
+ assert(df['time'][0] == datetime(1999, 5, 4, 0))
+ assert(df['station'][0] == 'OUN')
+ assert(df['station_number'][0] == 72357)
+ assert(df['latitude'][0] == 35.18)
+ assert(df['longitude'][0] == -97.44)
+ assert(df['elevation'][0] == 345.0)
+
assert_almost_equal(df['pressure'][5], 867.9, 2)
assert_almost_equal(df['height'][5], 1219., 2)
assert_almost_equal(df['temperature'][5], 17.4, 2)
@@ -36,6 +43,12 @@ def test_wyoming():
assert(df.units['v_wind'] == 'knot')
assert(df.units['speed'] == 'knot')
assert(df.units['direction'] == 'degrees')
+ assert(df.units['latitude'] == 'degrees')
+ assert(df.units['longitude'] == 'degrees')
+ assert(df.units['elevation'] == 'meter')
+ assert(df.units['station'] is None)
+ assert(df.units['station_number'] is None)
+ assert(df.units['time'] is None)
@recorder.use_cassette('wyoming_high_alt_sounding')
@@ -43,6 +56,13 @@ def test_high_alt_wyoming():
"""Test Wyoming data that starts at pressure less than 925 hPa."""
df = WyomingUpperAir.request_data(datetime(2010, 12, 9, 12), 'BOI')
+ assert(df['time'][0] == datetime(2010, 12, 9, 12))
+ assert(df['station'][0] == 'BOI')
+ assert(df['station_number'][0] == 72681)
+ assert(df['latitude'][0] == 43.56)
+ assert(df['longitude'][0] == -116.21)
+ assert(df['elevation'][0] == 874.0)
+
assert_almost_equal(df['pressure'][2], 890.0, 2)
assert_almost_equal(df['height'][2], 1133., 2)
assert_almost_equal(df['temperature'][2], 5.4, 2)
@@ -51,3 +71,18 @@ def test_high_alt_wyoming():
assert_almost_equal(df['v_wind'][2], 5.99, 2)
assert_almost_equal(df['speed'][2], 6.0, 1)
assert_almost_equal(df['direction'][2], 176.0, 1)
+
+ assert(df.units['pressure'] == 'hPa')
+ assert(df.units['height'] == 'meter')
+ assert(df.units['temperature'] == 'degC')
+ assert(df.units['dewpoint'] == 'degC')
+ assert(df.units['u_wind'] == 'knot')
+ assert(df.units['v_wind'] == 'knot')
+ assert(df.units['speed'] == 'knot')
+ assert(df.units['direction'] == 'degrees')
+ assert(df.units['latitude'] == 'degrees')
+ assert(df.units['longitude'] == 'degrees')
+ assert(df.units['elevation'] == 'meter')
+ assert(df.units['station'] is None)
+ assert(df.units['station_number'] is None)
+ assert(df.units['time'] is None)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 0.6 | {
"env_vars": null,
"env_yml_path": [
"environment.yml"
],
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "environment.yml",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libhdf5-serial-dev libnetcdf-dev"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster @ file:///home/conda/feedstock_root/build_artifacts/alabaster_1673645646525/work
argon2-cffi @ file:///home/conda/feedstock_root/build_artifacts/argon2-cffi_1633990451307/work
async_generator @ file:///home/conda/feedstock_root/build_artifacts/async_generator_1722652753231/work
attrs @ file:///home/conda/feedstock_root/build_artifacts/attrs_1671632566681/work
Babel @ file:///home/conda/feedstock_root/build_artifacts/babel_1667688356751/work
backcall @ file:///home/conda/feedstock_root/build_artifacts/backcall_1592338393461/work
backports.functools-lru-cache @ file:///home/conda/feedstock_root/build_artifacts/backports.functools_lru_cache_1702571698061/work
beautifulsoup4 @ file:///home/conda/feedstock_root/build_artifacts/beautifulsoup4_1705564648255/work
bleach @ file:///home/conda/feedstock_root/build_artifacts/bleach_1696630167146/work
brotlipy==0.7.0
Cartopy @ file:///home/conda/feedstock_root/build_artifacts/cartopy_1630680837223/work
certifi==2021.5.30
cffi @ file:///home/conda/feedstock_root/build_artifacts/cffi_1631636256886/work
cftime @ file:///home/conda/feedstock_root/build_artifacts/cftime_1632539733990/work
charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1661170624537/work
colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1655412516417/work
coverage @ file:///home/conda/feedstock_root/build_artifacts/coverage_1633450575846/work
cryptography @ file:///home/conda/feedstock_root/build_artifacts/cryptography_1634230300355/work
cycler @ file:///home/conda/feedstock_root/build_artifacts/cycler_1635519461629/work
decorator @ file:///home/conda/feedstock_root/build_artifacts/decorator_1641555617451/work
defusedxml @ file:///home/conda/feedstock_root/build_artifacts/defusedxml_1615232257335/work
doc8 @ file:///home/conda/feedstock_root/build_artifacts/doc8_1652824562281/work
docutils @ file:///home/conda/feedstock_root/build_artifacts/docutils_1618676244774/work
entrypoints @ file:///home/conda/feedstock_root/build_artifacts/entrypoints_1643888246732/work
flake8 @ file:///home/conda/feedstock_root/build_artifacts/flake8_1659645013175/work
flake8-builtins @ file:///home/conda/feedstock_root/build_artifacts/flake8-builtins_1589815207697/work
flake8-comprehensions @ file:///home/conda/feedstock_root/build_artifacts/flake8-comprehensions_1641851052064/work
flake8-copyright @ file:///home/conda/feedstock_root/build_artifacts/flake8-copyright_1676003148518/work
flake8-docstrings @ file:///home/conda/feedstock_root/build_artifacts/flake8-docstrings_1616176909510/work
flake8-import-order @ file:///home/conda/feedstock_root/build_artifacts/flake8-import-order_1669670271290/work
flake8-mutable==1.2.0
flake8-pep3101==1.3.0
flake8-polyfill==1.0.2
flake8-print @ file:///home/conda/feedstock_root/build_artifacts/flake8-print_1606721773021/work
flake8-quotes @ file:///home/conda/feedstock_root/build_artifacts/flake8-quotes_1707605925191/work
idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1726459485162/work
imagesize @ file:///home/conda/feedstock_root/build_artifacts/imagesize_1656939531508/work
importlib-metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1630267465156/work
importlib-resources==5.4.0
iniconfig @ file:///home/conda/feedstock_root/build_artifacts/iniconfig_1603384189793/work
ipykernel @ file:///home/conda/feedstock_root/build_artifacts/ipykernel_1620912934572/work/dist/ipykernel-5.5.5-py3-none-any.whl
ipyparallel==8.2.1
ipython @ file:///home/conda/feedstock_root/build_artifacts/ipython_1609697613279/work
ipython_genutils @ file:///home/conda/feedstock_root/build_artifacts/ipython_genutils_1716278396992/work
ipywidgets @ file:///home/conda/feedstock_root/build_artifacts/ipywidgets_1679421482533/work
jedi @ file:///home/conda/feedstock_root/build_artifacts/jedi_1605054537831/work
Jinja2 @ file:///home/conda/feedstock_root/build_artifacts/jinja2_1636510082894/work
jsonschema @ file:///home/conda/feedstock_root/build_artifacts/jsonschema_1634752161479/work
jupyter @ file:///home/conda/feedstock_root/build_artifacts/jupyter_1696255489086/work
jupyter-client @ file:///home/conda/feedstock_root/build_artifacts/jupyter_client_1642858610849/work
jupyter-console @ file:///home/conda/feedstock_root/build_artifacts/jupyter_console_1676328545892/work
jupyter-core @ file:///home/conda/feedstock_root/build_artifacts/jupyter_core_1631852698933/work
jupyterlab-pygments @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_pygments_1601375948261/work
jupyterlab-widgets @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_widgets_1655961217661/work
kiwisolver @ file:///home/conda/feedstock_root/build_artifacts/kiwisolver_1610099771815/work
MarkupSafe @ file:///home/conda/feedstock_root/build_artifacts/markupsafe_1621455668064/work
matplotlib @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-suite_1611858699142/work
mccabe @ file:///home/conda/feedstock_root/build_artifacts/mccabe_1643049622439/work
mistune @ file:///home/conda/feedstock_root/build_artifacts/mistune_1673904152039/work
more-itertools @ file:///home/conda/feedstock_root/build_artifacts/more-itertools_1690211628840/work
multidict @ file:///home/conda/feedstock_root/build_artifacts/multidict_1633329770033/work
nbclient @ file:///home/conda/feedstock_root/build_artifacts/nbclient_1637327213451/work
nbconvert @ file:///home/conda/feedstock_root/build_artifacts/nbconvert_1605401832871/work
nbformat @ file:///home/conda/feedstock_root/build_artifacts/nbformat_1617383142101/work
nest_asyncio @ file:///home/conda/feedstock_root/build_artifacts/nest-asyncio_1705850609492/work
netCDF4 @ file:///home/conda/feedstock_root/build_artifacts/netcdf4_1633096406418/work
nose==1.3.7
notebook @ file:///home/conda/feedstock_root/build_artifacts/notebook_1616419146127/work
numpy @ file:///home/conda/feedstock_root/build_artifacts/numpy_1626681920064/work
olefile @ file:///home/conda/feedstock_root/build_artifacts/olefile_1602866521163/work
packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1637239678211/work
pandas==1.1.5
pandocfilters @ file:///home/conda/feedstock_root/build_artifacts/pandocfilters_1631603243851/work
parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1595548966091/work
pbr @ file:///home/conda/feedstock_root/build_artifacts/pbr_1724777609752/work
pep8-naming @ file:///home/conda/feedstock_root/build_artifacts/pep8-naming_1628397497711/work
pexpect @ file:///home/conda/feedstock_root/build_artifacts/pexpect_1667297516076/work
pickleshare @ file:///home/conda/feedstock_root/build_artifacts/pickleshare_1602536217715/work
Pillow @ file:///home/conda/feedstock_root/build_artifacts/pillow_1630696616009/work
pluggy @ file:///home/conda/feedstock_root/build_artifacts/pluggy_1631522669284/work
prometheus-client @ file:///home/conda/feedstock_root/build_artifacts/prometheus_client_1689032443210/work
prompt-toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1670414775770/work
protobuf==3.18.0
psutil==7.0.0
ptyprocess @ file:///home/conda/feedstock_root/build_artifacts/ptyprocess_1609419310487/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl
py @ file:///home/conda/feedstock_root/build_artifacts/py_1636301881863/work
pycodestyle @ file:///home/conda/feedstock_root/build_artifacts/pycodestyle_1659638152915/work
pycparser @ file:///home/conda/feedstock_root/build_artifacts/pycparser_1636257122734/work
pydocstyle @ file:///home/conda/feedstock_root/build_artifacts/pydocstyle_1672787369895/work
pyflakes @ file:///home/conda/feedstock_root/build_artifacts/pyflakes_1659210156976/work
Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1672682006896/work
pyOpenSSL @ file:///home/conda/feedstock_root/build_artifacts/pyopenssl_1663846997386/work
pyparsing @ file:///home/conda/feedstock_root/build_artifacts/pyparsing_1724616129934/work
PyQt5==5.12.3
PyQt5_sip==4.19.18
PyQtChart==5.12
PyQtWebEngine==5.12.1
pyrsistent @ file:///home/conda/feedstock_root/build_artifacts/pyrsistent_1610146795286/work
pyshp @ file:///home/conda/feedstock_root/build_artifacts/pyshp_1659002966020/work
PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1610291458349/work
pytest==6.2.5
pytest-cov @ file:///home/conda/feedstock_root/build_artifacts/pytest-cov_1664412836798/work
pytest-flake8 @ file:///home/conda/feedstock_root/build_artifacts/pytest-flake8_1646767752166/work
pytest-runner @ file:///home/conda/feedstock_root/build_artifacts/pytest-runner_1646127837850/work
python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1626286286081/work
pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1693930252784/work
PyYAML==5.4.1
pyzmq @ file:///home/conda/feedstock_root/build_artifacts/pyzmq_1631793305981/work
qtconsole @ file:///home/conda/feedstock_root/build_artifacts/qtconsole-base_1640876679830/work
QtPy @ file:///home/conda/feedstock_root/build_artifacts/qtpy_1643828301492/work
requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1656534056640/work
restructuredtext-lint @ file:///home/conda/feedstock_root/build_artifacts/restructuredtext_lint_1645724685739/work
scipy @ file:///home/conda/feedstock_root/build_artifacts/scipy_1629411471490/work
Send2Trash @ file:///home/conda/feedstock_root/build_artifacts/send2trash_1682601222253/work
Shapely @ file:///home/conda/feedstock_root/build_artifacts/shapely_1628205367507/work
-e git+https://github.com/Unidata/siphon.git@e99953a89c35afd15c6e24a54fa0ebba4e927946#egg=siphon
six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work
snowballstemmer @ file:///home/conda/feedstock_root/build_artifacts/snowballstemmer_1637143057757/work
soupsieve @ file:///home/conda/feedstock_root/build_artifacts/soupsieve_1658207591808/work
Sphinx @ file:///home/conda/feedstock_root/build_artifacts/sphinx_1658872348413/work
sphinx-gallery @ file:///home/conda/feedstock_root/build_artifacts/sphinx-gallery_1700542355088/work
sphinxcontrib-applehelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-applehelp_1674487779667/work
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-htmlhelp_1675256494457/work
sphinxcontrib-jsmath @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-jsmath_1691604704163/work
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-serializinghtml_1649380998999/work
stevedore @ file:///home/conda/feedstock_root/build_artifacts/stevedore_1629395095970/work
terminado @ file:///home/conda/feedstock_root/build_artifacts/terminado_1631128154882/work
testpath @ file:///home/conda/feedstock_root/build_artifacts/testpath_1645693042223/work
toml @ file:///home/conda/feedstock_root/build_artifacts/toml_1604308577558/work
tomli @ file:///home/conda/feedstock_root/build_artifacts/tomli_1635181214134/work
tornado @ file:///home/conda/feedstock_root/build_artifacts/tornado_1610094701020/work
tqdm==4.64.1
traitlets @ file:///home/conda/feedstock_root/build_artifacts/traitlets_1631041982274/work
typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/typing_extensions_1644850595256/work
urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1678635778344/work
vcrpy @ file:///home/conda/feedstock_root/build_artifacts/vcrpy_1602284745577/work
wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1699959196938/work
webencodings @ file:///home/conda/feedstock_root/build_artifacts/webencodings_1694681268211/work
widgetsnbextension @ file:///home/conda/feedstock_root/build_artifacts/widgetsnbextension_1655939017940/work
wrapt @ file:///home/conda/feedstock_root/build_artifacts/wrapt_1633440474617/work
xarray @ file:///home/conda/feedstock_root/build_artifacts/xarray_1621474818012/work
yarl @ file:///home/conda/feedstock_root/build_artifacts/yarl_1625232870338/work
zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1633302054558/work
| name: siphon
channels:
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=2_gnu
- alabaster=0.7.13=pyhd8ed1ab_0
- alsa-lib=1.2.7.2=h166bdaf_0
- argon2-cffi=21.1.0=py36h8f6f2f9_0
- async_generator=1.10=pyhd8ed1ab_1
- attrs=22.2.0=pyh71513ae_0
- babel=2.11.0=pyhd8ed1ab_0
- backcall=0.2.0=pyh9f0ad1d_0
- backports=1.0=pyhd8ed1ab_4
- backports.functools_lru_cache=2.0.0=pyhd8ed1ab_0
- beautifulsoup4=4.12.3=pyha770c72_0
- bleach=6.1.0=pyhd8ed1ab_0
- brotlipy=0.7.0=py36h8f6f2f9_1001
- bzip2=1.0.8=h4bc722e_7
- c-ares=1.34.4=hb9d3cd8_0
- ca-certificates=2025.1.31=hbcca054_0
- cartopy=0.19.0.post1=py36hbcbf2fa_1
- certifi=2021.5.30=py36h5fab9bb_0
- cffi=1.14.6=py36hd8eec40_1
- cftime=1.5.1=py36he33b4a0_0
- charset-normalizer=2.1.1=pyhd8ed1ab_0
- colorama=0.4.5=pyhd8ed1ab_0
- coverage=6.0=py36h8f6f2f9_1
- cryptography=35.0.0=py36hb60f036_0
- curl=7.87.0=h6312ad2_0
- cycler=0.11.0=pyhd8ed1ab_0
- dbus=1.13.6=h5008d03_3
- decorator=5.1.1=pyhd8ed1ab_0
- defusedxml=0.7.1=pyhd8ed1ab_0
- doc8=0.11.2=pyhd8ed1ab_0
- docutils=0.17.1=py36h5fab9bb_0
- entrypoints=0.4=pyhd8ed1ab_0
- expat=2.6.4=h5888daf_0
- flake8=5.0.4=pyhd8ed1ab_0
- flake8-builtins=1.5.3=pyh9f0ad1d_0
- flake8-comprehensions=3.8.0=pyhd8ed1ab_0
- flake8-copyright=0.2.4=pyhd8ed1ab_0
- flake8-docstrings=1.6.0=pyhd8ed1ab_0
- flake8-import-order=0.18.2=pyhd8ed1ab_0
- flake8-mutable=1.2.0=py_1
- flake8-pep3101=1.3.0=py_0
- flake8-polyfill=1.0.2=py_0
- flake8-print=4.0.0=pyhd8ed1ab_0
- flake8-quotes=3.4.0=pyhd8ed1ab_0
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=h77eed37_3
- fontconfig=2.14.2=h14ed4e7_0
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- freetype=2.12.1=h267a509_2
- geos=3.9.1=h9c3ff4c_2
- gettext=0.23.1=h5888daf_0
- gettext-tools=0.23.1=h5888daf_0
- glib=2.80.2=hf974151_0
- glib-tools=2.80.2=hb6ce0ca_0
- gst-plugins-base=1.20.3=h57caac4_2
- gstreamer=1.20.3=hd4edc92_2
- hdf4=4.2.15=h9772cbc_5
- hdf5=1.12.1=nompi_h2386368_104
- icu=69.1=h9c3ff4c_0
- idna=3.10=pyhd8ed1ab_0
- imagesize=1.4.1=pyhd8ed1ab_0
- importlib-metadata=4.8.1=py36h5fab9bb_0
- importlib_metadata=4.8.1=hd8ed1ab_1
- iniconfig=1.1.1=pyh9f0ad1d_0
- ipykernel=5.5.5=py36hcb3619a_0
- ipython=7.16.1=py36he448a4c_2
- ipython_genutils=0.2.0=pyhd8ed1ab_1
- ipywidgets=7.7.4=pyhd8ed1ab_0
- jedi=0.17.2=py36h5fab9bb_1
- jinja2=3.0.3=pyhd8ed1ab_0
- jpeg=9e=h0b41bf4_3
- jsonschema=4.1.2=pyhd8ed1ab_0
- jupyter=1.0.0=pyhd8ed1ab_10
- jupyter_client=7.1.2=pyhd8ed1ab_0
- jupyter_console=6.5.1=pyhd8ed1ab_0
- jupyter_core=4.8.1=py36h5fab9bb_0
- jupyterlab_pygments=0.1.2=pyh9f0ad1d_0
- jupyterlab_widgets=1.1.1=pyhd8ed1ab_0
- keyutils=1.6.1=h166bdaf_0
- kiwisolver=1.3.1=py36h605e78d_1
- krb5=1.20.1=hf9c8cef_0
- lcms2=2.12=hddcbb42_0
- ld_impl_linux-64=2.43=h712a8e2_4
- lerc=3.0=h9c3ff4c_0
- libasprintf=0.23.1=h8e693c7_0
- libasprintf-devel=0.23.1=h8e693c7_0
- libblas=3.9.0=20_linux64_openblas
- libcblas=3.9.0=20_linux64_openblas
- libclang=13.0.1=default_hb5137d0_10
- libcurl=7.87.0=h6312ad2_0
- libdeflate=1.10=h7f98852_0
- libedit=3.1.20250104=pl5321h7949ede_0
- libev=4.33=hd590300_2
- libevent=2.1.10=h9b69904_4
- libexpat=2.6.4=h5888daf_0
- libffi=3.4.6=h2dba641_0
- libgcc=14.2.0=h767d61c_2
- libgcc-ng=14.2.0=h69a702a_2
- libgettextpo=0.23.1=h5888daf_0
- libgettextpo-devel=0.23.1=h5888daf_0
- libgfortran=14.2.0=h69a702a_2
- libgfortran-ng=14.2.0=h69a702a_2
- libgfortran5=14.2.0=hf1ad2bd_2
- libglib=2.80.2=hf974151_0
- libgomp=14.2.0=h767d61c_2
- libiconv=1.18=h4ce23a2_1
- liblapack=3.9.0=20_linux64_openblas
- libllvm13=13.0.1=hf817b99_2
- liblzma=5.6.4=hb9d3cd8_0
- liblzma-devel=5.6.4=hb9d3cd8_0
- libnetcdf=4.8.1=nompi_h329d8a1_102
- libnghttp2=1.51.0=hdcd2b5c_0
- libnsl=2.0.1=hd590300_0
- libogg=1.3.5=h4ab18f5_0
- libopenblas=0.3.25=pthreads_h413a1c8_0
- libopus=1.3.1=h7f98852_1
- libpng=1.6.43=h2797004_0
- libpq=14.5=h2baec63_5
- libprotobuf=3.18.0=h780b84a_1
- libsodium=1.0.18=h36c2ea0_1
- libsqlite=3.46.0=hde9e2c9_0
- libssh2=1.10.0=haa6b8db_3
- libstdcxx=14.2.0=h8f9b012_2
- libstdcxx-ng=14.2.0=h4852527_2
- libtiff=4.3.0=h0fcbabc_4
- libuuid=2.38.1=h0b41bf4_0
- libvorbis=1.3.7=h9c3ff4c_0
- libwebp-base=1.5.0=h851e524_0
- libxcb=1.13=h7f98852_1004
- libxkbcommon=1.0.3=he3ba5ed_0
- libxml2=2.9.14=haae042b_4
- libzip=1.9.2=hc869a4a_1
- libzlib=1.2.13=h4ab18f5_6
- markupsafe=2.0.1=py36h8f6f2f9_0
- matplotlib=3.3.4=py36h5fab9bb_0
- matplotlib-base=3.3.4=py36hd391965_0
- mccabe=0.7.0=pyhd8ed1ab_0
- mistune=0.8.4=pyh1a96a4e_1006
- more-itertools=10.0.0=pyhd8ed1ab_0
- multidict=5.2.0=py36h8f6f2f9_0
- mysql-common=8.0.32=h14678bc_0
- mysql-libs=8.0.32=h54cf53e_0
- nbclient=0.5.9=pyhd8ed1ab_0
- nbconvert=6.0.7=py36h5fab9bb_3
- nbformat=5.1.3=pyhd8ed1ab_0
- ncurses=6.5=h2d0b736_3
- nest-asyncio=1.6.0=pyhd8ed1ab_0
- netcdf4=1.5.7=nompi_py36h775750b_103
- notebook=6.3.0=py36h5fab9bb_0
- nspr=4.36=h5888daf_0
- nss=3.100=hca3bf56_0
- numpy=1.19.5=py36hfc0c790_2
- olefile=0.46=pyh9f0ad1d_1
- openjpeg=2.5.0=h7d73246_0
- openssl=1.1.1w=hd590300_0
- packaging=21.3=pyhd8ed1ab_0
- pandas=1.1.5=py36h284efc9_0
- pandoc=2.19.2=h32600fe_2
- pandocfilters=1.5.0=pyhd8ed1ab_0
- parso=0.7.1=pyh9f0ad1d_0
- pbr=6.1.0=pyhd8ed1ab_0
- pcre2=10.43=hcad00b1_0
- pep8-naming=0.12.1=pyhd8ed1ab_0
- pexpect=4.8.0=pyh1a96a4e_2
- pickleshare=0.7.5=py_1003
- pillow=8.3.2=py36h676a545_0
- pip=21.3.1=pyhd8ed1ab_0
- pluggy=1.0.0=py36h5fab9bb_1
- proj=7.2.0=h277dcde_2
- prometheus_client=0.17.1=pyhd8ed1ab_0
- prompt-toolkit=3.0.36=pyha770c72_0
- prompt_toolkit=3.0.36=hd8ed1ab_0
- protobuf=3.18.0=py36hc4f0c31_0
- pthread-stubs=0.4=hb9d3cd8_1002
- ptyprocess=0.7.0=pyhd3deb0d_0
- py=1.11.0=pyh6c4a22f_0
- pycodestyle=2.9.1=pyhd8ed1ab_0
- pycparser=2.21=pyhd8ed1ab_0
- pydocstyle=6.2.0=pyhd8ed1ab_0
- pyflakes=2.5.0=pyhd8ed1ab_0
- pygments=2.14.0=pyhd8ed1ab_0
- pyopenssl=22.0.0=pyhd8ed1ab_1
- pyparsing=3.1.4=pyhd8ed1ab_0
- pyqt=5.12.3=py36h5fab9bb_7
- pyqt-impl=5.12.3=py36h7ec31b9_7
- pyqt5-sip=4.19.18=py36hc4f0c31_7
- pyqtchart=5.12=py36h7ec31b9_7
- pyqtwebengine=5.12.1=py36h7ec31b9_7
- pyrsistent=0.17.3=py36h8f6f2f9_2
- pyshp=2.3.1=pyhd8ed1ab_0
- pysocks=1.7.1=py36h5fab9bb_3
- pytest=6.2.5=py36h5fab9bb_0
- pytest-cov=4.0.0=pyhd8ed1ab_0
- pytest-flake8=1.1.0=pyhd8ed1ab_0
- pytest-runner=5.3.2=pyhd8ed1ab_0
- python=3.6.15=hb7a2778_0_cpython
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.6=2_cp36m
- pytz=2023.3.post1=pyhd8ed1ab_0
- pyyaml=5.4.1=py36h8f6f2f9_1
- pyzmq=22.3.0=py36h7068817_0
- qt=5.12.9=h1304e3e_6
- qtconsole-base=5.2.2=pyhd8ed1ab_1
- qtpy=2.0.1=pyhd8ed1ab_0
- readline=8.2=h8c095d6_2
- requests=2.28.1=pyhd8ed1ab_0
- restructuredtext_lint=1.4.0=pyhd8ed1ab_0
- scipy=1.5.3=py36h81d768a_1
- send2trash=1.8.2=pyh41d4057_0
- setuptools=58.0.4=py36h5fab9bb_2
- shapely=1.7.1=py36hff28ebb_5
- six=1.16.0=pyh6c4a22f_0
- snowballstemmer=2.2.0=pyhd8ed1ab_0
- soupsieve=2.3.2.post1=pyhd8ed1ab_0
- sphinx=5.1.1=pyh6c4a22f_0
- sphinx-gallery=0.15.0=pyhd8ed1ab_0
- sphinxcontrib-applehelp=1.0.4=pyhd8ed1ab_0
- sphinxcontrib-devhelp=1.0.2=py_0
- sphinxcontrib-htmlhelp=2.0.1=pyhd8ed1ab_0
- sphinxcontrib-jsmath=1.0.1=pyhd8ed1ab_0
- sphinxcontrib-qthelp=1.0.3=py_0
- sphinxcontrib-serializinghtml=1.1.5=pyhd8ed1ab_2
- sqlite=3.46.0=h6d4b2fc_0
- stevedore=3.4.0=py36h5fab9bb_0
- terminado=0.12.1=py36h5fab9bb_0
- testpath=0.6.0=pyhd8ed1ab_0
- tk=8.6.13=noxft_h4845f30_101
- toml=0.10.2=pyhd8ed1ab_0
- tomli=1.2.2=pyhd8ed1ab_0
- tornado=6.1=py36h8f6f2f9_1
- traitlets=4.3.3=pyhd8ed1ab_2
- typing-extensions=4.1.1=hd8ed1ab_0
- typing_extensions=4.1.1=pyha770c72_0
- urllib3=1.26.15=pyhd8ed1ab_0
- vcrpy=4.1.1=py_0
- wcwidth=0.2.10=pyhd8ed1ab_0
- webencodings=0.5.1=pyhd8ed1ab_2
- wheel=0.37.1=pyhd8ed1ab_0
- widgetsnbextension=3.6.1=pyha770c72_0
- wrapt=1.13.1=py36h8f6f2f9_0
- xarray=0.18.2=pyhd8ed1ab_0
- xorg-libxau=1.0.12=hb9d3cd8_0
- xorg-libxdmcp=1.1.5=hb9d3cd8_0
- xz=5.6.4=hbcc6ac9_0
- xz-gpl-tools=5.6.4=hbcc6ac9_0
- xz-tools=5.6.4=hb9d3cd8_0
- yaml=0.2.5=h7f98852_2
- yarl=1.6.3=py36h8f6f2f9_2
- zeromq=4.3.5=h59595ed_1
- zipp=3.6.0=pyhd8ed1ab_0
- zlib=1.2.13=h4ab18f5_6
- zstd=1.5.6=ha6fb4c9_0
- pip:
- importlib-resources==5.4.0
- ipyparallel==8.2.1
- nose==1.3.7
- psutil==7.0.0
- tqdm==4.64.1
prefix: /opt/conda/envs/siphon
| [
"siphon/tests/test_iastate.py::test_iastate",
"siphon/tests/test_iastate.py::test_high_alt_iastate",
"siphon/tests/test_wyoming.py::test_wyoming",
"siphon/tests/test_wyoming.py::test_high_alt_wyoming"
]
| []
| []
| []
| BSD 3-Clause "New" or "Revised" License | 2,345 | [
"siphon/simplewebservice/iastate.py",
"siphon/simplewebservice/wyoming.py"
]
| [
"siphon/simplewebservice/iastate.py",
"siphon/simplewebservice/wyoming.py"
]
|
|
Agizin__Algorithm-Visualization-28 | 2dd360e7692621c71abec0afa34c0b9a3136f609 | 2018-03-30 15:55:51 | 2dd360e7692621c71abec0afa34c0b9a3136f609 | diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index c1489e4..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-language: python
-python:
- - "3.5"
- - "3.6"
-
-# command to install dependencies
-install:
- - pip install pipenv
- - pipenv install
-
-# command to run tests and lint
-script:
- - python -m unittest discover
- - pipenv check --style ./algviz
diff --git a/Pipfile b/Pipfile
deleted file mode 100644
index f7b79a0..0000000
--- a/Pipfile
+++ /dev/null
@@ -1,14 +0,0 @@
-[[source]]
-
-url = "https://pypi.python.org/simple"
-verify_ssl = true
-name = "pypi"
-
-
-[dev-packages]
-
-
-
-[packages]
-
-svgwrite = "*"
diff --git a/Pipfile.lock b/Pipfile.lock
deleted file mode 100644
index 509ea8c..0000000
--- a/Pipfile.lock
+++ /dev/null
@@ -1,51 +0,0 @@
-{
- "_meta": {
- "hash": {
- "sha256": "5e8fc6baf1666ec9b09e547e42962949b4ec5f9b38dfc5945d8c5d961d6482a3"
- },
- "host-environment-markers": {
- "implementation_name": "cpython",
- "implementation_version": "3.5.2",
- "os_name": "nt",
- "platform_machine": "AMD64",
- "platform_python_implementation": "CPython",
- "platform_release": "10",
- "platform_system": "Windows",
- "platform_version": "10.0.16299",
- "python_full_version": "3.5.2",
- "python_version": "3.5",
- "sys_platform": "win32"
- },
- "pipfile-spec": 6,
- "requires": {},
- "sources": [
- {
- "name": "pypi",
- "url": "https://pypi.python.org/simple",
- "verify_ssl": true
- }
- ]
- },
- "default": {
- "pyparsing": {
- "hashes": [
- "sha256:fee43f17a9c4087e7ed1605bd6df994c6173c1e977d7ade7b651292fab2bd010",
- "sha256:0832bcf47acd283788593e7a0f542407bd9550a55a8a8435214a1960e04bcb04",
- "sha256:9e8143a3e15c13713506886badd96ca4b579a87fbdf49e550dbfc057d6cb218e",
- "sha256:281683241b25fe9b80ec9d66017485f6deff1af5cde372469134b56ca8447a07",
- "sha256:b8b3117ed9bdf45e14dcc89345ce638ec7e0e29b2b579fa1ecf32ce45ebac8a5",
- "sha256:8f1e18d3fd36c6795bb7e02a39fd05c611ffc2596c1e0d995d34d67630426c18",
- "sha256:e4d45427c6e20a59bf4f88c639dcc03ce30d193112047f94012102f235853a58"
- ],
- "version": "==2.2.0"
- },
- "svgwrite": {
- "hashes": [
- "sha256:679507bb71c4eefb0d6c15643dbb8489ed0e3088330f46df30d7dc2abd897a82",
- "sha256:451c7f16220d654be0cfdbd13cc6f23aca69e6fd3ca19254e80b5f6d9ca6af5a"
- ],
- "version": "==1.1.11"
- }
- },
- "develop": {}
-}
diff --git a/algviz/interface/visitors.py b/algviz/interface/visitors.py
index 2b8dca9..b3653d7 100644
--- a/algviz/interface/visitors.py
+++ b/algviz/interface/visitors.py
@@ -1,6 +1,7 @@
import abc
from algviz.parser.json_objects import Tokens
+from algviz.parser import structures
class Visitor(metaclass=abc.ABCMeta):
@@ -190,3 +191,60 @@ class WidgetVisitor(Visitor):
def visit(self, *args, **kwargs):
return super().visit(*args, **kwargs)
+
+@default_for_type(type(None))
+class NullVisitor(Visitor):
+ type_ = Tokens.NULL_T
+
+ def uid(self, item):
+ return structures.Null.uid
+
+ def visit(self, *args, **kwargs):
+ return super().visit(*args, **kwargs)
+
+class TreeVisitor(Visitor):
+ """A visitor for trees of all shapes and sizes"""
+ type_ = Tokens.TREE_NODE_T
+
+ @abc.abstractmethod
+ def is_placeholder(self, tree):
+ """Test if the given tree is a placeholder for a non-existent node in
+ rigidly-structured trees.
+
+ For example, in the binary search tree:
+
+ ...........
+ ....2......
+ ..1...3....
+ ........4..
+ ...........
+
+ The first child of `3` should be a placeholder.
+
+ By default, tests if the tree is None.
+ """
+ return tree is None
+
+ @abc.abstractmethod
+ def iter_children(self, tree):
+ """Obviously, return an iterable/generator with the subtrees"""
+ yield from []
+
+ @abc.abstractmethod
+ def get_data(self, tree):
+ return None
+
+ def traverse(self, tree, **kwargs):
+ if self.is_placeholder(tree):
+ NullVisitor(self.output_mngr).traverse(tree, **kwargs)
+ else:
+ super().traverse(tree, **kwargs)
+
+ def visit(self, tree, **kwargs):
+ super().visit(tree, **kwargs)
+ self.output_mngr.next_key(Tokens.DATA)
+ self.data_visitor.traverse(self.get_data(tree))
+ self.output_mngr.next_key(Tokens.CHILDREN)
+ with self.output_mngr.push(mapping=False):
+ for child in self.iter_children(tree):
+ self.traverse(child)
diff --git a/algviz/interface/weird_visitors.py b/algviz/interface/weird_visitors.py
index 45ed59c..4e8f9f8 100644
--- a/algviz/interface/weird_visitors.py
+++ b/algviz/interface/weird_visitors.py
@@ -1,6 +1,8 @@
-from . import visitors
+import collections
import math
+from . import visitors
+
class BitmapArrayVisitor(visitors.ArrayVisitor):
"""Interpret an `int` as an array of 0s and 1s"""
@@ -15,3 +17,24 @@ class BitmapArrayVisitor(visitors.ArrayVisitor):
def get_item(self, x, i):
# Return the i'th bit of x
return int(bool(x & (2**i)))
+
+class ListTreeVisitor(visitors.TreeVisitor):
+ """Interpret a list as a binary tree"""
+ _Node = collections.namedtuple("_Node", ("list_", "index"))
+ def _wrap(self, tree):
+ if isinstance(tree, self._Node):
+ return tree
+ return self._Node(tree, 0)
+
+ def is_placeholder(self, tree):
+ tree = self._wrap(tree)
+ return tree.index >= len(tree.list_)
+
+ def get_data(self, tree):
+ tree = self._wrap(tree)
+ return tree.list_[tree.index]
+
+ def iter_children(self, tree):
+ tree = self._wrap(tree)
+ for i in (1, 2):
+ yield self._Node(tree.list_, 2 * tree.index + i)
diff --git a/algviz/parser/json_objects.py b/algviz/parser/json_objects.py
index ace9048..49e1516 100644
--- a/algviz/parser/json_objects.py
+++ b/algviz/parser/json_objects.py
@@ -116,7 +116,7 @@ class SnapshotDecoder(metaclass=Dispatcher):
@Dispatcher.dispatch(Tokens.TREE_NODE_T)
def tree_node_decode(self, tree_node, **kwargs):
- return structures.TreeNode(data=tree_node.get(Tokens.DATA, structures.Null),
+ return structures.Tree(data=tree_node.get(Tokens.DATA, structures.Null),
children=tree_node.get(Tokens.CHILDREN),
**kwargs)
diff --git a/algviz/parser/structures.py b/algviz/parser/structures.py
index 5efe694..65513ba 100644
--- a/algviz/parser/structures.py
+++ b/algviz/parser/structures.py
@@ -162,7 +162,7 @@ class Node(DataStructure):
# This is a minimal node that isn't responsible for its own edges. This
# allows for a more flexible graph implementation (i.e. allowing subgraphs
# over the same nodes). If you want to store edges within your node, use
- # TreeNode or a subclass instead of this.
+ # Tree or a subclass instead of this.
def __init__(self, data, **kwargs):
super().__init__(**kwargs)
self.data = data
@@ -209,7 +209,7 @@ class Widget(DataStructure):
def untablify(self, obj_table):
pass
-class TreeNode(DataStructure):
+class Tree(DataStructure):
"""A node with some number of children in a fixed order. Edges are implicit."""
# A common superclass could be used for linked-list nodes, since linked
# lists are just skinny trees
@@ -224,7 +224,7 @@ class TreeNode(DataStructure):
def __eq__(self, other):
return (super().__eq__(other) and
- isinstance(other, TreeNode) and
+ isinstance(other, Tree) and
self.data == other.data and
self.children == other.children)
| Visitor for Trees in Python
Depends on #13 | Agizin/Algorithm-Visualization | diff --git a/algviz/interface/test_visitors.py b/algviz/interface/test_visitors.py
index 164c96f..0c31d9b 100644
--- a/algviz/interface/test_visitors.py
+++ b/algviz/interface/test_visitors.py
@@ -20,7 +20,7 @@ class VisitorTestCaseMixin(TempFileMixin):
text = self.read_tempfile()
return text, json_objects.decode_json(text)
- def to_hell_and_back_full_result(self, instance, **kwargs):
+ def to_json_and_back_full_result(self, instance, **kwargs):
"""Convenience for test cases where you only need to encode and decode
one instance. Returns (json_text, decoded_object)
"""
@@ -28,9 +28,9 @@ class VisitorTestCaseMixin(TempFileMixin):
self.visitor.traverse(instance, **kwargs)
return self.read_result()
- def to_hell_and_back(self, instance, **kwargs):
+ def to_json_and_back(self, instance, **kwargs):
"""Visit the object, print it out, decode it, and return the resulting object"""
- _, snapshots = self.to_hell_and_back_full_result(instance, **kwargs)
+ _, snapshots = self.to_json_and_back_full_result(instance, **kwargs)
return snapshots[-1].obj_table.getuid(self.visitor.uid(instance))
def test_metadata(self):
@@ -42,7 +42,7 @@ class VisitorTestCaseMixin(TempFileMixin):
msg="""This test doesn't work. We want different
instances of identical dictionaries, or else the test
can be passed by calling `metadata.clear()`.""")
- result = self.to_hell_and_back(self.sample_instance(),
+ result = self.to_json_and_back(self.sample_instance(),
metadata=mk_metadata())
self.assertEqual(mk_metadata(), result.metadata)
@@ -90,6 +90,6 @@ class ArrayVisitorTestCase(VisitorTestCaseMixin, unittest.TestCase):
return [1, 2, 3]
def test_array_export_and_import(self):
- arr = self.to_hell_and_back([1, 2, 3])
+ arr = self.to_json_and_back([1, 2, 3])
self.assertIsInstance(arr, structures.Array)
self.assertEqual(list(arr), [1, 2, 3])
diff --git a/algviz/interface/test_weird_visitors.py b/algviz/interface/test_weird_visitors.py
index 11e9f57..b8e8a4e 100644
--- a/algviz/interface/test_weird_visitors.py
+++ b/algviz/interface/test_weird_visitors.py
@@ -1,7 +1,7 @@
import unittest
import tempfile
-from algviz.parser import json_objects
+from algviz.parser import json_objects, structures
from . import weird_visitors
from . import output
@@ -26,3 +26,17 @@ class BitmapVisitorTestCase(VisitorTestCaseMixin, unittest.TestCase):
# (True == 1 but id(True) != id(1))
self._next_sample_bool ^= True
return True if self._next_sample_bool else 1
+
+
+class ListTreeVisitorTestCase(VisitorTestCaseMixin, unittest.TestCase):
+ visitor_cls = weird_visitors.ListTreeVisitor
+
+ def test_tree_visit(self):
+ tree = self.to_json_and_back([1, 2])
+ self.assertEqual(tree.data, 1)
+ self.assertEqual(tree.children[1], structures.Null)
+ self.assertEqual(tree.children[0].data, 2)
+ self.assertEqual(tree.children[0].children, [structures.Null] * 2)
+
+ def sample_instance(self):
+ return [4, 2, 6, 1, 3, 5, 7]
diff --git a/algviz/parser/test_json_objects.py b/algviz/parser/test_json_objects.py
index 1f0eac0..74f2f83 100644
--- a/algviz/parser/test_json_objects.py
+++ b/algviz/parser/test_json_objects.py
@@ -147,8 +147,8 @@ class ArrayDecodingTestCase(GenericDecodingTestCase):
self.unexpected_object = self.factory([1, 2, 3, 4, structures.String("goodbye")])
self.same_uid_object = self.factory([1, 2, 3])
-class TreeNodeDecodingTestCase(GenericDecodingTestCase):
- cls_under_test = structures.TreeNode
+class TreeDecodingTestCase(GenericDecodingTestCase):
+ cls_under_test = structures.Tree
def set_up_expectations(self):
self.snapshot_input = [
{"T": "treenode", "uid": "L", "children": ["LL", "#null"], "data": 1},
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 4
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pygraphviz",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/Agizin/Algorithm-Visualization.git@2dd360e7692621c71abec0afa34c0b9a3136f609#egg=algviz
exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pygraphviz @ file:///croot/pygraphviz_1671045577740/work
pytest==8.3.5
tomli==2.2.1
| name: Algorithm-Visualization
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- atk-1.0=2.36.0=ha1a6a79_0
- boost-cpp=1.82.0=hdb19cb5_2
- bzip2=1.0.8=h5eee18b_6
- c-ares=1.19.1=h5eee18b_0
- ca-certificates=2025.2.25=h06a4308_0
- cairo=1.16.0=hb05425b_5
- expat=2.6.4=h6a678d5_0
- font-ttf-dejavu-sans-mono=2.37=hd3eb1b0_0
- font-ttf-inconsolata=2.001=hcb22688_0
- font-ttf-source-code-pro=2.030=hd3eb1b0_0
- font-ttf-ubuntu=0.83=h8b1ccd4_0
- fontconfig=2.14.1=h55d465d_3
- fonts-anaconda=1=h8fa9717_0
- fonts-conda-ecosystem=1=hd3eb1b0_0
- freetype=2.12.1=h4a9f257_0
- fribidi=1.0.10=h7b6447c_0
- gdk-pixbuf=2.42.10=h5eee18b_1
- giflib=5.2.2=h5eee18b_0
- glib=2.78.4=h6a678d5_0
- glib-tools=2.78.4=h6a678d5_0
- gobject-introspection=1.78.1=py39h42194e9_2
- graphite2=1.3.14=h295c915_1
- graphviz=2.50.0=h78213b7_2
- gtk2=2.24.33=h27e1c3a_3
- gts=0.7.6=hb67d8dd_3
- harfbuzz=10.2.0=hf296adc_0
- icu=73.1=h6a678d5_0
- jpeg=9e=h5eee18b_3
- krb5=1.20.1=h143b758_1
- lcms2=2.16=hb9589c4_0
- ld_impl_linux-64=2.40=h12ee557_0
- lerc=4.0.0=h6a678d5_0
- libboost=1.82.0=h109eef0_2
- libcurl=8.12.1=hc9e6f67_0
- libdeflate=1.22=h5eee18b_0
- libedit=3.1.20230828=h5eee18b_0
- libev=4.33=h7f8727e_1
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgd=2.3.3=h6a678d5_3
- libglib=2.78.4=hdc74915_0
- libgomp=11.2.0=h1234567_1
- libiconv=1.16=h5eee18b_3
- libnghttp2=1.57.0=h2d74bed_0
- libpng=1.6.39=h5eee18b_0
- librsvg=2.56.3=hf6914bd_1
- libssh2=1.11.1=h251f7ec_0
- libstdcxx-ng=11.2.0=h1234567_1
- libtiff=4.5.1=hffd6297_1
- libtool=2.4.7=h6a678d5_0
- libuuid=1.41.5=h5eee18b_0
- libwebp=1.2.4=h11a3e52_1
- libwebp-base=1.2.4=h5eee18b_1
- libxcb=1.15=h7f8727e_0
- libxml2=2.13.5=hfdd30dd_0
- lz4-c=1.9.4=h6a678d5_1
- ncurses=6.4=h6a678d5_0
- ninja=1.12.1=h06a4308_0
- ninja-base=1.12.1=hdb19cb5_0
- nspr=4.35=h6a678d5_0
- nss=3.89.1=h6a678d5_0
- openjpeg=2.5.2=he7f1fd0_0
- openssl=3.0.16=h5eee18b_0
- pango=1.50.7=h0fee60c_1
- pcre2=10.42=hebb0a14_1
- pip=25.0=py39h06a4308_0
- pixman=0.40.0=h7f8727e_1
- poppler=24.09.0=hcf11d46_1
- poppler-data=0.4.11=h06a4308_1
- pygraphviz=1.9=py39h5eee18b_1
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- zstd=1.5.6=hc292b87_0
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/Algorithm-Visualization
| [
"algviz/interface/test_visitors.py::WidgetVisitorTestCase::test_metadata",
"algviz/interface/test_visitors.py::WidgetVisitorTestCase::test_varnames",
"algviz/interface/test_visitors.py::WidgetVisitorTestCase::test_widget_export_and_import",
"algviz/interface/test_visitors.py::ArrayVisitorTestCase::test_array_export_and_import",
"algviz/interface/test_visitors.py::ArrayVisitorTestCase::test_metadata",
"algviz/interface/test_visitors.py::ArrayVisitorTestCase::test_varnames",
"algviz/interface/test_weird_visitors.py::BitmapVisitorTestCase::test_bitmap_visit",
"algviz/interface/test_weird_visitors.py::BitmapVisitorTestCase::test_metadata",
"algviz/interface/test_weird_visitors.py::BitmapVisitorTestCase::test_varnames",
"algviz/interface/test_weird_visitors.py::ListTreeVisitorTestCase::test_metadata",
"algviz/interface/test_weird_visitors.py::ListTreeVisitorTestCase::test_tree_visit",
"algviz/interface/test_weird_visitors.py::ListTreeVisitorTestCase::test_varnames",
"algviz/parser/test_json_objects.py::JSONObjectsTestCase::test_aliases_are_not_already_tokens",
"algviz/parser/test_json_objects.py::JSONObjectsTestCase::test_can_handle_missing_outermost_close_bracket",
"algviz/parser/test_json_objects.py::JSONObjectsTestCase::test_fix_aliases",
"algviz/parser/test_json_objects.py::JSONObjectsTestCase::test_literal_decoding",
"algviz/parser/test_json_objects.py::JSONObjectsTestCase::test_var_key_shows_up_in_namespace",
"algviz/parser/test_json_objects.py::GenericDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::GenericDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::GenericDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::GenericDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::GenericDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::GenericDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::GenericDecodingTestCase::test_same_object_method_works_based_on_uid",
"algviz/parser/test_json_objects.py::ArrayDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::ArrayDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::ArrayDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::ArrayDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::ArrayDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::ArrayDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::ArrayDecodingTestCase::test_same_object_method_works_based_on_uid",
"algviz/parser/test_json_objects.py::TreeDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::TreeDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::TreeDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::TreeDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::TreeDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::TreeDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::TreeDecodingTestCase::test_same_object_method_works_based_on_uid",
"algviz/parser/test_json_objects.py::NullDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::NullDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::NullDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::NullDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::NullDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::NullDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::NullDecodingTestCase::test_same_object_method_works_based_on_uid",
"algviz/parser/test_json_objects.py::OtherNullDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::OtherNullDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::OtherNullDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::OtherNullDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::OtherNullDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::OtherNullDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::OtherNullDecodingTestCase::test_same_object_method_works_based_on_uid",
"algviz/parser/test_json_objects.py::StringDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::StringDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::StringDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::StringDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::StringDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::StringDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::StringDecodingTestCase::test_same_object_method_works_based_on_uid",
"algviz/parser/test_json_objects.py::PointerDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::PointerDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::PointerDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::PointerDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::PointerDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::PointerDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::PointerDecodingTestCase::test_same_object_method_works_based_on_uid",
"algviz/parser/test_json_objects.py::GraphDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::GraphDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::GraphDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::GraphDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::GraphDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::GraphDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::GraphDecodingTestCase::test_same_object_method_works_based_on_uid",
"algviz/parser/test_json_objects.py::NodeDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::NodeDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::NodeDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::NodeDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::NodeDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::NodeDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::NodeDecodingTestCase::test_same_object_method_works_based_on_uid",
"algviz/parser/test_json_objects.py::EdgeDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::EdgeDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::EdgeDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::EdgeDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::EdgeDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::EdgeDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::EdgeDecodingTestCase::test_same_object_method_works_based_on_uid"
]
| []
| []
| []
| null | 2,346 | [
"algviz/parser/structures.py",
"algviz/interface/visitors.py",
"algviz/interface/weird_visitors.py",
"Pipfile.lock",
".travis.yml",
"Pipfile",
"algviz/parser/json_objects.py"
]
| [
"algviz/parser/structures.py",
"algviz/interface/visitors.py",
"algviz/interface/weird_visitors.py",
"Pipfile.lock",
".travis.yml",
"Pipfile",
"algviz/parser/json_objects.py"
]
|
|
oasis-open__cti-python-stix2-150 | e92db2417ab493ea1010e82ea8be4105229ee75d | 2018-03-30 16:09:04 | 2d689815d743611a8f3ccd48ce5e2d1ec70695e5 | diff --git a/.isort.cfg b/.isort.cfg
index 0fadb83..cca9d19 100644
--- a/.isort.cfg
+++ b/.isort.cfg
@@ -1,5 +1,6 @@
[settings]
not_skip = __init__.py
+skip = workbench.py
known_third_party =
dateutil,
ordereddict,
diff --git a/docs/api/stix2.workbench.rst b/docs/api/stix2.workbench.rst
new file mode 100644
index 0000000..19345f0
--- /dev/null
+++ b/docs/api/stix2.workbench.rst
@@ -0,0 +1,5 @@
+workbench
+===============
+
+.. automodule:: stix2.workbench
+ :members:
\ No newline at end of file
diff --git a/docs/conf.py b/docs/conf.py
index 49416a0..0764454 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -115,5 +115,16 @@ class STIXPropertyDocumenter(ClassDocumenter):
self.add_line('', '<stixattr>')
+def autodoc_skipper(app, what, name, obj, skip, options):
+ """Customize Sphinx to skip some member we don't want documented.
+
+ Skips anything containing ':autodoc-skip:' in its docstring.
+ """
+ if obj.__doc__ and ':autodoc-skip:' in obj.__doc__:
+ return skip or True
+ return skip
+
+
def setup(app):
app.add_autodocumenter(STIXPropertyDocumenter)
+ app.connect('autodoc-skip-member', autodoc_skipper)
diff --git a/docs/guide/workbench.ipynb b/docs/guide/workbench.ipynb
new file mode 100644
index 0000000..9cb099a
--- /dev/null
+++ b/docs/guide/workbench.ipynb
@@ -0,0 +1,485 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "nbsphinx": "hidden"
+ },
+ "outputs": [],
+ "source": [
+ "# Delete this cell to re-enable tracebacks\n",
+ "import sys\n",
+ "ipython = get_ipython()\n",
+ "\n",
+ "def hide_traceback(exc_tuple=None, filename=None, tb_offset=None,\n",
+ " exception_only=False, running_compiled_code=False):\n",
+ " etype, value, tb = sys.exc_info()\n",
+ " return ipython._showtraceback(etype, value, ipython.InteractiveTB.get_exception_only(etype, value))\n",
+ "\n",
+ "ipython.showtraceback = hide_traceback"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {
+ "nbsphinx": "hidden"
+ },
+ "outputs": [],
+ "source": [
+ "# JSON output syntax highlighting\n",
+ "from __future__ import print_function\n",
+ "from pygments import highlight\n",
+ "from pygments.lexers import JsonLexer\n",
+ "from pygments.formatters import HtmlFormatter\n",
+ "from six.moves import builtins\n",
+ "from IPython.display import display, HTML\n",
+ "\n",
+ "def json_print(inpt):\n",
+ " string = str(inpt)\n",
+ " if string[0] == '{':\n",
+ " formatter = HtmlFormatter()\n",
+ " display(HTML('<style type=\"text/css\">{}</style>{}'.format(\n",
+ " formatter.get_style_defs('.highlight'),\n",
+ " highlight(string, JsonLexer(), formatter))))\n",
+ " else:\n",
+ " builtins.print(inpt)\n",
+ "\n",
+ "globals()['print'] = json_print"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Using The Workbench"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The [Workbench API](../api/stix2.workbench.rst) hides most of the complexity of the rest of the library to make it easy to interact with STIX data. To use it, just import everything from ``stix2.workbench``:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from stix2.workbench import *"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Retrieving STIX Data\n",
+ "\n",
+ "To get some STIX data to work with, let's set up a DataSource and add it to our workbench."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "from taxii2client import Collection\n",
+ "\n",
+ "collection = Collection(\"http://127.0.0.1:5000/trustgroup1/collections/91a7b528-80eb-42ed-a74d-c6fbd5a26116/\", user=\"admin\", password=\"Password0\")\n",
+ "tc_source = TAXIICollectionSource(collection)\n",
+ "add_data_source(tc_source)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "collapsed": true
+ },
+ "source": [
+ "Now we can get all of the indicators from the data source."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "response = indicators()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Similar functions are available for the other STIX Object types. See the full list [here](../api/stix2.workbench.rst#stix2.workbench.attack_patterns).\n",
+ "\n",
+ "If you want to only retrieve *some* indicators, you can pass in one or more [Filters](../api/datastore/stix2.datastore.filters.rst). This example finds all the indicators created by a specific identity:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "response = indicators(filters=Filter('created_by_ref', '=', 'identity--adede3e8-bf44-4e6f-b3c9-1958cbc3b188'))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The objects returned let you easily traverse their relationships. Get all Relationship objects involving that object with ``.relationships()``, all other objects related to this object with ``.related()``, and the Identity object for the creator of the object (if one exists) with ``.created_by()``. For full details on these methods and their arguments, see the [Workbench API](../api/stix2.workbench.rst) documentation."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "indicator--a932fcc6-e032-176c-126f-cb970a5a1ade\n",
+ "indicates\n",
+ "malware--fdd60b30-b67c-11e3-b0b9-f01faf20d111\n"
+ ]
+ }
+ ],
+ "source": [
+ "for i in indicators():\n",
+ " for rel in i.relationships():\n",
+ " print(rel.source_ref)\n",
+ " print(rel.relationship_type)\n",
+ " print(rel.target_ref)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">.highlight .hll { background-color: #ffffcc }\n",
+ ".highlight { background: #f8f8f8; }\n",
+ ".highlight .c { color: #408080; font-style: italic } /* Comment */\n",
+ ".highlight .err { border: 1px solid #FF0000 } /* Error */\n",
+ ".highlight .k { color: #008000; font-weight: bold } /* Keyword */\n",
+ ".highlight .o { color: #666666 } /* Operator */\n",
+ ".highlight .ch { color: #408080; font-style: italic } /* Comment.Hashbang */\n",
+ ".highlight .cm { color: #408080; font-style: italic } /* Comment.Multiline */\n",
+ ".highlight .cp { color: #BC7A00 } /* Comment.Preproc */\n",
+ ".highlight .cpf { color: #408080; font-style: italic } /* Comment.PreprocFile */\n",
+ ".highlight .c1 { color: #408080; font-style: italic } /* Comment.Single */\n",
+ ".highlight .cs { color: #408080; font-style: italic } /* Comment.Special */\n",
+ ".highlight .gd { color: #A00000 } /* Generic.Deleted */\n",
+ ".highlight .ge { font-style: italic } /* Generic.Emph */\n",
+ ".highlight .gr { color: #FF0000 } /* Generic.Error */\n",
+ ".highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */\n",
+ ".highlight .gi { color: #00A000 } /* Generic.Inserted */\n",
+ ".highlight .go { color: #888888 } /* Generic.Output */\n",
+ ".highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */\n",
+ ".highlight .gs { font-weight: bold } /* Generic.Strong */\n",
+ ".highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */\n",
+ ".highlight .gt { color: #0044DD } /* Generic.Traceback */\n",
+ ".highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */\n",
+ ".highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */\n",
+ ".highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */\n",
+ ".highlight .kp { color: #008000 } /* Keyword.Pseudo */\n",
+ ".highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */\n",
+ ".highlight .kt { color: #B00040 } /* Keyword.Type */\n",
+ ".highlight .m { color: #666666 } /* Literal.Number */\n",
+ ".highlight .s { color: #BA2121 } /* Literal.String */\n",
+ ".highlight .na { color: #7D9029 } /* Name.Attribute */\n",
+ ".highlight .nb { color: #008000 } /* Name.Builtin */\n",
+ ".highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */\n",
+ ".highlight .no { color: #880000 } /* Name.Constant */\n",
+ ".highlight .nd { color: #AA22FF } /* Name.Decorator */\n",
+ ".highlight .ni { color: #999999; font-weight: bold } /* Name.Entity */\n",
+ ".highlight .ne { color: #D2413A; font-weight: bold } /* Name.Exception */\n",
+ ".highlight .nf { color: #0000FF } /* Name.Function */\n",
+ ".highlight .nl { color: #A0A000 } /* Name.Label */\n",
+ ".highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */\n",
+ ".highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */\n",
+ ".highlight .nv { color: #19177C } /* Name.Variable */\n",
+ ".highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */\n",
+ ".highlight .w { color: #bbbbbb } /* Text.Whitespace */\n",
+ ".highlight .mb { color: #666666 } /* Literal.Number.Bin */\n",
+ ".highlight .mf { color: #666666 } /* Literal.Number.Float */\n",
+ ".highlight .mh { color: #666666 } /* Literal.Number.Hex */\n",
+ ".highlight .mi { color: #666666 } /* Literal.Number.Integer */\n",
+ ".highlight .mo { color: #666666 } /* Literal.Number.Oct */\n",
+ ".highlight .sa { color: #BA2121 } /* Literal.String.Affix */\n",
+ ".highlight .sb { color: #BA2121 } /* Literal.String.Backtick */\n",
+ ".highlight .sc { color: #BA2121 } /* Literal.String.Char */\n",
+ ".highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */\n",
+ ".highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */\n",
+ ".highlight .s2 { color: #BA2121 } /* Literal.String.Double */\n",
+ ".highlight .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */\n",
+ ".highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */\n",
+ ".highlight .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */\n",
+ ".highlight .sx { color: #008000 } /* Literal.String.Other */\n",
+ ".highlight .sr { color: #BB6688 } /* Literal.String.Regex */\n",
+ ".highlight .s1 { color: #BA2121 } /* Literal.String.Single */\n",
+ ".highlight .ss { color: #19177C } /* Literal.String.Symbol */\n",
+ ".highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */\n",
+ ".highlight .fm { color: #0000FF } /* Name.Function.Magic */\n",
+ ".highlight .vc { color: #19177C } /* Name.Variable.Class */\n",
+ ".highlight .vg { color: #19177C } /* Name.Variable.Global */\n",
+ ".highlight .vi { color: #19177C } /* Name.Variable.Instance */\n",
+ ".highlight .vm { color: #19177C } /* Name.Variable.Magic */\n",
+ ".highlight .il { color: #666666 } /* Literal.Number.Integer.Long */</style><div class=\"highlight\"><pre><span></span><span class=\"p\">{</span>\n",
+ " <span class=\"nt\">"type"</span><span class=\"p\">:</span> <span class=\"s2\">"malware"</span><span class=\"p\">,</span>\n",
+ " <span class=\"nt\">"id"</span><span class=\"p\">:</span> <span class=\"s2\">"malware--fdd60b30-b67c-11e3-b0b9-f01faf20d111"</span><span class=\"p\">,</span>\n",
+ " <span class=\"nt\">"created"</span><span class=\"p\">:</span> <span class=\"s2\">"2017-01-27T13:49:53.997Z"</span><span class=\"p\">,</span>\n",
+ " <span class=\"nt\">"modified"</span><span class=\"p\">:</span> <span class=\"s2\">"2017-01-27T13:49:53.997Z"</span><span class=\"p\">,</span>\n",
+ " <span class=\"nt\">"name"</span><span class=\"p\">:</span> <span class=\"s2\">"Poison Ivy"</span><span class=\"p\">,</span>\n",
+ " <span class=\"nt\">"description"</span><span class=\"p\">:</span> <span class=\"s2\">"Poison Ivy"</span><span class=\"p\">,</span>\n",
+ " <span class=\"nt\">"labels"</span><span class=\"p\">:</span> <span class=\"p\">[</span>\n",
+ " <span class=\"s2\">"remote-access-trojan"</span>\n",
+ " <span class=\"p\">]</span>\n",
+ "<span class=\"p\">}</span>\n",
+ "</pre></div>\n"
+ ],
+ "text/plain": [
+ "<IPython.core.display.HTML object>"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "for i in indicators():\n",
+ " for obj in i.related():\n",
+ " print(obj)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "If there are a lot of related objects, you can narrow it down by passing in one or more [Filters](../api/datastore/stix2.datastore.filters.rst) just as before. For example, if we want to get only the indicators related to a specific piece of malware (and not any entities that use it or are targeted by it):"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<style type=\"text/css\">.highlight .hll { background-color: #ffffcc }\n",
+ ".highlight { background: #f8f8f8; }\n",
+ ".highlight .c { color: #408080; font-style: italic } /* Comment */\n",
+ ".highlight .err { border: 1px solid #FF0000 } /* Error */\n",
+ ".highlight .k { color: #008000; font-weight: bold } /* Keyword */\n",
+ ".highlight .o { color: #666666 } /* Operator */\n",
+ ".highlight .ch { color: #408080; font-style: italic } /* Comment.Hashbang */\n",
+ ".highlight .cm { color: #408080; font-style: italic } /* Comment.Multiline */\n",
+ ".highlight .cp { color: #BC7A00 } /* Comment.Preproc */\n",
+ ".highlight .cpf { color: #408080; font-style: italic } /* Comment.PreprocFile */\n",
+ ".highlight .c1 { color: #408080; font-style: italic } /* Comment.Single */\n",
+ ".highlight .cs { color: #408080; font-style: italic } /* Comment.Special */\n",
+ ".highlight .gd { color: #A00000 } /* Generic.Deleted */\n",
+ ".highlight .ge { font-style: italic } /* Generic.Emph */\n",
+ ".highlight .gr { color: #FF0000 } /* Generic.Error */\n",
+ ".highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */\n",
+ ".highlight .gi { color: #00A000 } /* Generic.Inserted */\n",
+ ".highlight .go { color: #888888 } /* Generic.Output */\n",
+ ".highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */\n",
+ ".highlight .gs { font-weight: bold } /* Generic.Strong */\n",
+ ".highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */\n",
+ ".highlight .gt { color: #0044DD } /* Generic.Traceback */\n",
+ ".highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */\n",
+ ".highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */\n",
+ ".highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */\n",
+ ".highlight .kp { color: #008000 } /* Keyword.Pseudo */\n",
+ ".highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */\n",
+ ".highlight .kt { color: #B00040 } /* Keyword.Type */\n",
+ ".highlight .m { color: #666666 } /* Literal.Number */\n",
+ ".highlight .s { color: #BA2121 } /* Literal.String */\n",
+ ".highlight .na { color: #7D9029 } /* Name.Attribute */\n",
+ ".highlight .nb { color: #008000 } /* Name.Builtin */\n",
+ ".highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */\n",
+ ".highlight .no { color: #880000 } /* Name.Constant */\n",
+ ".highlight .nd { color: #AA22FF } /* Name.Decorator */\n",
+ ".highlight .ni { color: #999999; font-weight: bold } /* Name.Entity */\n",
+ ".highlight .ne { color: #D2413A; font-weight: bold } /* Name.Exception */\n",
+ ".highlight .nf { color: #0000FF } /* Name.Function */\n",
+ ".highlight .nl { color: #A0A000 } /* Name.Label */\n",
+ ".highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */\n",
+ ".highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */\n",
+ ".highlight .nv { color: #19177C } /* Name.Variable */\n",
+ ".highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */\n",
+ ".highlight .w { color: #bbbbbb } /* Text.Whitespace */\n",
+ ".highlight .mb { color: #666666 } /* Literal.Number.Bin */\n",
+ ".highlight .mf { color: #666666 } /* Literal.Number.Float */\n",
+ ".highlight .mh { color: #666666 } /* Literal.Number.Hex */\n",
+ ".highlight .mi { color: #666666 } /* Literal.Number.Integer */\n",
+ ".highlight .mo { color: #666666 } /* Literal.Number.Oct */\n",
+ ".highlight .sa { color: #BA2121 } /* Literal.String.Affix */\n",
+ ".highlight .sb { color: #BA2121 } /* Literal.String.Backtick */\n",
+ ".highlight .sc { color: #BA2121 } /* Literal.String.Char */\n",
+ ".highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */\n",
+ ".highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */\n",
+ ".highlight .s2 { color: #BA2121 } /* Literal.String.Double */\n",
+ ".highlight .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */\n",
+ ".highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */\n",
+ ".highlight .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */\n",
+ ".highlight .sx { color: #008000 } /* Literal.String.Other */\n",
+ ".highlight .sr { color: #BB6688 } /* Literal.String.Regex */\n",
+ ".highlight .s1 { color: #BA2121 } /* Literal.String.Single */\n",
+ ".highlight .ss { color: #19177C } /* Literal.String.Symbol */\n",
+ ".highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */\n",
+ ".highlight .fm { color: #0000FF } /* Name.Function.Magic */\n",
+ ".highlight .vc { color: #19177C } /* Name.Variable.Class */\n",
+ ".highlight .vg { color: #19177C } /* Name.Variable.Global */\n",
+ ".highlight .vi { color: #19177C } /* Name.Variable.Instance */\n",
+ ".highlight .vm { color: #19177C } /* Name.Variable.Magic */\n",
+ ".highlight .il { color: #666666 } /* Literal.Number.Integer.Long */</style><div class=\"highlight\"><pre><span></span><span class=\"p\">{</span>\n",
+ " <span class=\"nt\">"type"</span><span class=\"p\">:</span> <span class=\"s2\">"indicator"</span><span class=\"p\">,</span>\n",
+ " <span class=\"nt\">"id"</span><span class=\"p\">:</span> <span class=\"s2\">"indicator--a932fcc6-e032-176c-126f-cb970a5a1ade"</span><span class=\"p\">,</span>\n",
+ " <span class=\"nt\">"created"</span><span class=\"p\">:</span> <span class=\"s2\">"2014-05-08T09:00:00.000Z"</span><span class=\"p\">,</span>\n",
+ " <span class=\"nt\">"modified"</span><span class=\"p\">:</span> <span class=\"s2\">"2014-05-08T09:00:00.000Z"</span><span class=\"p\">,</span>\n",
+ " <span class=\"nt\">"name"</span><span class=\"p\">:</span> <span class=\"s2\">"File hash for Poison Ivy variant"</span><span class=\"p\">,</span>\n",
+ " <span class=\"nt\">"pattern"</span><span class=\"p\">:</span> <span class=\"s2\">"[file:hashes.'SHA-256' = 'ef537f25c895bfa782526529a9b63d97aa631564d5d789c2b765448c8635fb6c']"</span><span class=\"p\">,</span>\n",
+ " <span class=\"nt\">"valid_from"</span><span class=\"p\">:</span> <span class=\"s2\">"2014-05-08T09:00:00Z"</span><span class=\"p\">,</span>\n",
+ " <span class=\"nt\">"labels"</span><span class=\"p\">:</span> <span class=\"p\">[</span>\n",
+ " <span class=\"s2\">"file-hash-watchlist"</span>\n",
+ " <span class=\"p\">]</span>\n",
+ "<span class=\"p\">}</span>\n",
+ "</pre></div>\n"
+ ],
+ "text/plain": [
+ "<IPython.core.display.HTML object>"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "malware = get('malware--fdd60b30-b67c-11e3-b0b9-f01faf20d111')\n",
+ "indicator = malware.related(filters=Filter('type', '=', 'indicator'))\n",
+ "print(indicator[0])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Creating STIX Data\n",
+ "\n",
+ "To create a STIX object, just use that object's class constructor. Once it's created, add it to the workbench with [save()](../api/datastore/stix2.workbench.rst#stix2.workbench.save)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "identity = Identity(name=\"ACME Threat Intel Co.\", identity_class=\"organization\")\n",
+ "save(identity)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "You can also set defaults for certain properties when creating objects. For example, let's set the default creator to be the identity object we just created:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "set_default_creator(identity)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now when we create an indicator (or any other STIX Domain Object), it will automatically have the right ``create_by_ref`` value."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "ACME Threat Intel Co.\n"
+ ]
+ }
+ ],
+ "source": [
+ "indicator = Indicator(labels=[\"malicious-activity\"], pattern=\"[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']\")\n",
+ "save(indicator)\n",
+ "\n",
+ "indicator_creator = get(indicator.created_by_ref)\n",
+ "print(indicator_creator.name)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Defaults can also be set for the [created timestamp](../api/datastore/stix2.workbench.rst#stix2.workbench.set_default_created), [external references](../api/datastore/stix2.workbench.rst#stix2.workbench.set_default_external_refs) and [object marking references](../api/datastore/stix2.workbench.rst#stix2.workbench.set_default_object_marking_refs)."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "<div class=\"alert alert-warning\">\n",
+ "\n",
+ "**Warning:**\n",
+ "\n",
+ "The workbench layer replaces STIX Object classes with special versions of them that use \"wrappers\" to provide extra functionality. Because of this, we recommend that you **either use the workbench layer or the rest of the library, but not both**. In other words, don't import from both ``stix2.workbench`` and any other submodules of ``stix2``.\n",
+ "\n",
+ "</div>"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/stix2/__init__.py b/stix2/__init__.py
index 401d44b..89043ec 100644
--- a/stix2/__init__.py
+++ b/stix2/__init__.py
@@ -11,6 +11,7 @@
patterns
properties
utils
+ workbench
v20.common
v20.observables
v20.sdo
diff --git a/stix2/datastore/__init__.py b/stix2/datastore/__init__.py
index 78f7555..e0de6fe 100644
--- a/stix2/datastore/__init__.py
+++ b/stix2/datastore/__init__.py
@@ -16,7 +16,7 @@ import uuid
from six import with_metaclass
-from stix2.datastore.filters import Filter
+from stix2.datastore.filters import Filter, _assemble_filters
from stix2.utils import deduplicate
@@ -73,7 +73,7 @@ class DataStoreMixin(object):
stix_id (str): the id of the STIX object to retrieve.
Returns:
- stix_objs (list): a list of STIX objects
+ list: All versions of the specified STIX object.
"""
try:
@@ -91,7 +91,7 @@ class DataStoreMixin(object):
to conduct search on.
Returns:
- stix_objs (list): a list of STIX objects
+ list: The STIX objects matching the query.
"""
try:
@@ -99,6 +99,25 @@ class DataStoreMixin(object):
except AttributeError:
raise AttributeError('%s has no data source to query' % self.__class__.__name__)
+ def query_by_type(self, *args, **kwargs):
+ """Retrieve all objects of the given STIX object type.
+
+ Translate query_by_type() call to the appropriate DataSource call.
+
+ Args:
+ obj_type (str): The STIX object type to retrieve.
+ filters (list, optional): A list of additional filters to apply to
+ the query.
+
+ Returns:
+ list: The STIX objects that matched the query.
+
+ """
+ try:
+ return self.source.query_by_type(*args, **kwargs)
+ except AttributeError:
+ raise AttributeError('%s has no data source to query' % self.__class__.__name__)
+
def creator_of(self, *args, **kwargs):
"""Retrieve the Identity refered to by the object's `created_by_ref`.
@@ -136,7 +155,7 @@ class DataStoreMixin(object):
object is the target_ref. Default: False.
Returns:
- (list): List of Relationship objects involving the given STIX object.
+ list: The Relationship objects involving the given STIX object.
"""
try:
@@ -162,9 +181,11 @@ class DataStoreMixin(object):
object is the source_ref. Default: False.
target_only (bool): Only examine Relationships for which this
object is the target_ref. Default: False.
+ filters (list): list of additional filters the related objects must
+ match.
Returns:
- (list): List of STIX objects related to the given STIX object.
+ list: The STIX objects related to the given STIX object.
"""
try:
@@ -175,8 +196,8 @@ class DataStoreMixin(object):
def add(self, *args, **kwargs):
"""Method for storing STIX objects.
- Define custom behavior before storing STIX objects using the associated
- DataSink. Translates add() to the appropriate DataSink call.
+ Defines custom behavior before storing STIX objects using the
+ appropriate method call on the associated DataSink.
Args:
stix_objs (list): a list of STIX objects
@@ -240,7 +261,7 @@ class DataSource(with_metaclass(ABCMeta)):
specified by the "id".
Returns:
- stix_obj: the STIX object
+ stix_obj: The STIX object.
"""
@@ -258,7 +279,7 @@ class DataSource(with_metaclass(ABCMeta)):
specified by the "id".
Returns:
- stix_objs (list): a list of STIX objects
+ list: All versions of the specified STIX object.
"""
@@ -273,7 +294,7 @@ class DataSource(with_metaclass(ABCMeta)):
to conduct search on.
Returns:
- stix_objs (list): a list of STIX objects
+ list: The STIX objects that matched the query.
"""
@@ -311,7 +332,7 @@ class DataSource(with_metaclass(ABCMeta)):
object is the target_ref. Default: False.
Returns:
- (list): List of Relationship objects involving the given STIX object.
+ list: The Relationship objects involving the given STIX object.
"""
results = []
@@ -338,7 +359,7 @@ class DataSource(with_metaclass(ABCMeta)):
return results
- def related_to(self, obj, relationship_type=None, source_only=False, target_only=False):
+ def related_to(self, obj, relationship_type=None, source_only=False, target_only=False, filters=None):
"""Retrieve STIX Objects that have a Relationship involving the given
STIX object.
@@ -354,9 +375,11 @@ class DataSource(with_metaclass(ABCMeta)):
object is the source_ref. Default: False.
target_only (bool): Only examine Relationships for which this
object is the target_ref. Default: False.
+ filters (list): list of additional filters the related objects must
+ match.
Returns:
- (list): List of STIX objects related to the given STIX object.
+ list: The STIX objects related to the given STIX object.
"""
results = []
@@ -372,10 +395,13 @@ class DataSource(with_metaclass(ABCMeta)):
ids = set()
for r in rels:
ids.update((r.source_ref, r.target_ref))
- ids.remove(obj_id)
+ ids.discard(obj_id)
+
+ # Assemble filters
+ filter_list = _assemble_filters(filters)
for i in ids:
- results.append(self.get(i))
+ results.extend(self.query(filter_list + [Filter('id', '=', i)]))
return results
@@ -425,7 +451,7 @@ class CompositeDataSource(DataSource):
to another parent CompositeDataSource), not user supplied.
Returns:
- stix_obj: the STIX object to be returned.
+ stix_obj: The STIX object to be returned.
"""
if not self.has_data_sources():
@@ -471,7 +497,7 @@ class CompositeDataSource(DataSource):
attached to a parent CompositeDataSource), not user supplied.
Returns:
- all_data (list): list of STIX objects that have the specified id
+ list: The STIX objects that have the specified id.
"""
if not self.has_data_sources():
@@ -510,7 +536,7 @@ class CompositeDataSource(DataSource):
attached to a parent CompositeDataSource), not user supplied.
Returns:
- all_data (list): list of STIX objects to be returned
+ list: The STIX objects to be returned.
"""
if not self.has_data_sources():
@@ -542,6 +568,35 @@ class CompositeDataSource(DataSource):
return all_data
+ def query_by_type(self, *args, **kwargs):
+ """Retrieve all objects of the given STIX object type.
+
+ Federate the query to all DataSources attached to the
+ Composite Data Source.
+
+ Args:
+ obj_type (str): The STIX object type to retrieve.
+ filters (list, optional): A list of additional filters to apply to
+ the query.
+
+ Returns:
+ list: The STIX objects that matched the query.
+
+ """
+ if not self.has_data_sources():
+ raise AttributeError('CompositeDataSource has no data sources')
+
+ results = []
+ for ds in self.data_sources:
+ results.extend(ds.query_by_type(*args, **kwargs))
+
+ # remove exact duplicates (where duplicates are STIX 2.0
+ # objects with the same 'id' and 'modified' values)
+ if len(results) > 0:
+ results = deduplicate(results)
+
+ return results
+
def relationships(self, *args, **kwargs):
"""Retrieve Relationships involving the given STIX object.
@@ -561,7 +616,7 @@ class CompositeDataSource(DataSource):
object is the target_ref. Default: False.
Returns:
- (list): List of Relationship objects involving the given STIX object.
+ list: The Relationship objects involving the given STIX object.
"""
if not self.has_data_sources():
@@ -597,9 +652,11 @@ class CompositeDataSource(DataSource):
object is the source_ref. Default: False.
target_only (bool): Only examine Relationships for which this
object is the target_ref. Default: False.
+ filters (list): list of additional filters the related objects must
+ match.
Returns:
- (list): List of STIX objects related to the given STIX object.
+ list: The STIX objects related to the given STIX object.
"""
if not self.has_data_sources():
diff --git a/stix2/datastore/filters.py b/stix2/datastore/filters.py
index 9065b61..10bbeee 100644
--- a/stix2/datastore/filters.py
+++ b/stix2/datastore/filters.py
@@ -44,6 +44,37 @@ def _check_filter_components(prop, op, value):
return True
+def _assemble_filters(filters1=None, filters2=None):
+ """Assemble a list of filters.
+
+ This can be used to allow certain functions to work correctly no matter if
+ the user provides a single filter or a list of them.
+
+ Args:
+ filters1 (Filter or list, optional): The single Filter or list of Filters to
+ coerce into a list of Filters.
+ filters2 (Filter or list, optional): The single Filter or list of Filters to
+ append to the list of Filters.
+
+ Returns:
+ List of Filters.
+
+ """
+ if filters1 is None:
+ filter_list = []
+ elif not isinstance(filters1, list):
+ filter_list = [filters1]
+ else:
+ filter_list = filters1
+
+ if isinstance(filters2, list):
+ filter_list.extend(filters2)
+ elif filters2 is not None:
+ filter_list.append(filters2)
+
+ return filter_list
+
+
class Filter(collections.namedtuple("Filter", ['property', 'op', 'value'])):
"""STIX 2 filters that support the querying functionality of STIX 2
DataStores and DataSources.
diff --git a/stix2/environment.py b/stix2/environment.py
index eb5583e..e40e991 100644
--- a/stix2/environment.py
+++ b/stix2/environment.py
@@ -30,19 +30,43 @@ class ObjectFactory(object):
self._defaults = {}
if created_by_ref:
- self._defaults['created_by_ref'] = created_by_ref
+ self.set_default_creator(created_by_ref)
if created:
- self._defaults['created'] = created
- # If the user provides a default "created" time, we also want to use
- # that as the modified time.
- self._defaults['modified'] = created
+ self.set_default_created(created)
if external_references:
- self._defaults['external_references'] = external_references
+ self.set_default_external_refs(external_references)
if object_marking_refs:
- self._defaults['object_marking_refs'] = object_marking_refs
+ self.set_default_object_marking_refs(object_marking_refs)
self._list_append = list_append
self._list_properties = ['external_references', 'object_marking_refs']
+ def set_default_creator(self, creator=None):
+ """Set default value for the `created_by_ref` property.
+
+ """
+ self._defaults['created_by_ref'] = creator
+
+ def set_default_created(self, created=None):
+ """Set default value for the `created` property.
+
+ """
+ self._defaults['created'] = created
+ # If the user provides a default "created" time, we also want to use
+ # that as the modified time.
+ self._defaults['modified'] = created
+
+ def set_default_external_refs(self, external_references=None):
+ """Set default external references.
+
+ """
+ self._defaults['external_references'] = external_references
+
+ def set_default_object_marking_refs(self, object_marking_refs=None):
+ """Set default object markings.
+
+ """
+ self._defaults['object_marking_refs'] = object_marking_refs
+
def create(self, cls, **kwargs):
"""Create a STIX object using object factory defaults.
@@ -94,6 +118,7 @@ class Environment(DataStoreMixin):
.. automethod:: relationships
.. automethod:: related_to
.. automethod:: add
+
"""
def __init__(self, factory=ObjectFactory(), store=None, source=None, sink=None):
@@ -113,17 +138,27 @@ class Environment(DataStoreMixin):
return self.factory.create(*args, **kwargs)
create.__doc__ = ObjectFactory.create.__doc__
+ def set_default_creator(self, *args, **kwargs):
+ return self.factory.set_default_creator(*args, **kwargs)
+ set_default_creator.__doc__ = ObjectFactory.set_default_creator.__doc__
+
+ def set_default_created(self, *args, **kwargs):
+ return self.factory.set_default_created(*args, **kwargs)
+ set_default_created.__doc__ = ObjectFactory.set_default_created.__doc__
+
+ def set_default_external_refs(self, *args, **kwargs):
+ return self.factory.set_default_external_refs(*args, **kwargs)
+ set_default_external_refs.__doc__ = ObjectFactory.set_default_external_refs.__doc__
+
+ def set_default_object_marking_refs(self, *args, **kwargs):
+ return self.factory.set_default_object_marking_refs(*args, **kwargs)
+ set_default_object_marking_refs.__doc__ = ObjectFactory.set_default_object_marking_refs.__doc__
+
def add_filters(self, *args, **kwargs):
- try:
- return self.source.filters.update(*args, **kwargs)
- except AttributeError:
- raise AttributeError('Environment has no data source')
+ return self.source.filters.update(*args, **kwargs)
def add_filter(self, *args, **kwargs):
- try:
- return self.source.filters.add(*args, **kwargs)
- except AttributeError:
- raise AttributeError('Environment has no data source')
+ return self.source.filters.add(*args, **kwargs)
def parse(self, *args, **kwargs):
return _parse(*args, **kwargs)
diff --git a/stix2/workbench.py b/stix2/workbench.py
new file mode 100644
index 0000000..9e31b50
--- /dev/null
+++ b/stix2/workbench.py
@@ -0,0 +1,292 @@
+"""Functions and class wrappers for interacting with STIX data at a high level.
+
+.. autofunction:: create
+.. autofunction:: set_default_creator
+.. autofunction:: set_default_created
+.. autofunction:: set_default_external_refs
+.. autofunction:: set_default_object_marking_refs
+.. autofunction:: get
+.. autofunction:: all_versions
+.. autofunction:: query
+.. autofunction:: creator_of
+.. autofunction:: relationships
+.. autofunction:: related_to
+.. autofunction:: save
+.. autofunction:: add_filters
+.. autofunction:: add_filter
+.. autofunction:: parse
+.. autofunction:: add_data_source
+.. autofunction:: add_data_sources
+
+"""
+
+import stix2
+from . import AttackPattern as _AttackPattern
+from . import Campaign as _Campaign
+from . import CourseOfAction as _CourseOfAction
+from . import Identity as _Identity
+from . import Indicator as _Indicator
+from . import IntrusionSet as _IntrusionSet
+from . import Malware as _Malware
+from . import ObservedData as _ObservedData
+from . import Report as _Report
+from . import ThreatActor as _ThreatActor
+from . import Tool as _Tool
+from . import Vulnerability as _Vulnerability
+from . import (AlternateDataStream, ArchiveExt, Artifact, AutonomousSystem, # noqa: F401
+ Bundle, CustomExtension, CustomMarking, CustomObservable,
+ Directory, DomainName, EmailAddress, EmailMessage,
+ EmailMIMEComponent, Environment, ExtensionsProperty,
+ ExternalReference, File, FileSystemSource, Filter,
+ GranularMarking, HTTPRequestExt, ICMPExt, IPv4Address,
+ IPv6Address, KillChainPhase, MACAddress, MarkingDefinition,
+ MemoryStore, Mutex, NetworkTraffic, NTFSExt, parse_observable,
+ PDFExt, Process, RasterImageExt, Relationship, Sighting,
+ SocketExt, Software, StatementMarking, TAXIICollectionSource,
+ TCPExt, TLP_AMBER, TLP_GREEN, TLP_RED, TLP_WHITE, TLPMarking,
+ UNIXAccountExt, URL, UserAccount, WindowsPEBinaryExt,
+ WindowsPEOptionalHeaderType, WindowsPESection,
+ WindowsProcessExt, WindowsRegistryKey, WindowsRegistryValueType,
+ WindowsServiceExt, X509Certificate, X509V3ExtenstionsType)
+from .datastore.filters import _assemble_filters
+
+# Use an implicit MemoryStore
+_environ = Environment(store=MemoryStore())
+
+create = _environ.create
+set_default_creator = _environ.set_default_creator
+set_default_created = _environ.set_default_created
+set_default_external_refs = _environ.set_default_external_refs
+set_default_object_marking_refs = _environ.set_default_object_marking_refs
+get = _environ.get
+all_versions = _environ.all_versions
+query = _environ.query
+creator_of = _environ.creator_of
+relationships = _environ.relationships
+related_to = _environ.related_to
+save = _environ.add
+add_filters = _environ.add_filters
+add_filter = _environ.add_filter
+parse = _environ.parse
+add_data_source = _environ.source.add_data_source
+add_data_sources = _environ.source.add_data_sources
+
+
+# Wrap SDOs with helper functions
+
+
+STIX_OBJS = [_AttackPattern, _Campaign, _CourseOfAction, _Identity,
+ _Indicator, _IntrusionSet, _Malware, _ObservedData, _Report,
+ _ThreatActor, _Tool, _Vulnerability]
+
+STIX_OBJ_DOCS = """
+
+.. method:: created_by(*args, **kwargs)
+
+ {}
+
+.. method:: relationships(*args, **kwargs)
+
+ {}
+
+.. method:: related(*args, **kwargs)
+
+ {}
+
+""".format(_environ.creator_of.__doc__,
+ _environ.relationships.__doc__,
+ _environ.related_to.__doc__)
+
+
+def _created_by_wrapper(self, *args, **kwargs):
+ return _environ.creator_of(self, *args, **kwargs)
+
+
+def _relationships_wrapper(self, *args, **kwargs):
+ return _environ.relationships(self, *args, **kwargs)
+
+
+def _related_wrapper(self, *args, **kwargs):
+ return _environ.related_to(self, *args, **kwargs)
+
+
+def _constructor_wrapper(obj_type):
+ # Use an intermediate wrapper class so the implicit environment will create objects that have our wrapper functions
+ wrapped_type = type(obj_type.__name__, obj_type.__bases__, dict(
+ created_by=_created_by_wrapper,
+ relationships=_relationships_wrapper,
+ related=_related_wrapper,
+ **obj_type.__dict__
+ ))
+
+ @staticmethod
+ def new_constructor(cls, *args, **kwargs):
+ x = _environ.create(wrapped_type, *args, **kwargs)
+ return x
+ return new_constructor
+
+
+def _setup_workbench():
+ # Create wrapper classes whose constructors call the implicit environment's create()
+ for obj_type in STIX_OBJS:
+ new_class_dict = {
+ '__new__': _constructor_wrapper(obj_type),
+ '__doc__': 'Workbench wrapper around the `{0} <stix2.v20.sdo.html#stix2.v20.sdo.{0}>`__ object. {1}'.format(obj_type.__name__, STIX_OBJ_DOCS)
+ }
+ new_class = type(obj_type.__name__, (), new_class_dict)
+
+ # Add our new class to this module's globals and to the library-wide mapping.
+ # This allows parse() to use the wrapped classes.
+ globals()[obj_type.__name__] = new_class
+ stix2.OBJ_MAP[obj_type._type] = new_class
+ new_class = None
+
+
+_setup_workbench()
+
+
+# Functions to get all objects of a specific type
+
+
+def attack_patterns(filters=None):
+ """Retrieve all Attack Pattern objects.
+
+ Args:
+ filters (list, optional): A list of additional filters to apply to
+ the query.
+
+ """
+ filter_list = _assemble_filters(filters, [Filter('type', '=', 'attack-pattern')])
+ return query(filter_list)
+
+
+def campaigns(filters=None):
+ """Retrieve all Campaign objects.
+
+ Args:
+ filters (list, optional): A list of additional filters to apply to
+ the query.
+
+ """
+ filter_list = _assemble_filters(filters, [Filter('type', '=', 'campaign')])
+ return query(filter_list)
+
+
+def courses_of_action(filters=None):
+ """Retrieve all Course of Action objects.
+
+ Args:
+ filters (list, optional): A list of additional filters to apply to
+ the query.
+
+ """
+ filter_list = _assemble_filters(filters, [Filter('type', '=', 'course-of-action')])
+ return query(filter_list)
+
+
+def identities(filters=None):
+ """Retrieve all Identity objects.
+
+ Args:
+ filters (list, optional): A list of additional filters to apply to
+ the query.
+
+ """
+ filter_list = _assemble_filters(filters, [Filter('type', '=', 'identity')])
+ return query(filter_list)
+
+
+def indicators(filters=None):
+ """Retrieve all Indicator objects.
+
+ Args:
+ filters (list, optional): A list of additional filters to apply to
+ the query.
+
+ """
+ filter_list = _assemble_filters(filters, [Filter('type', '=', 'indicator')])
+ return query(filter_list)
+
+
+def intrusion_sets(filters=None):
+ """Retrieve all Intrusion Set objects.
+
+ Args:
+ filters (list, optional): A list of additional filters to apply to
+ the query.
+
+ """
+ filter_list = _assemble_filters(filters, [Filter('type', '=', 'intrusion-set')])
+ return query(filter_list)
+
+
+def malware(filters=None):
+ """Retrieve all Malware objects.
+
+ Args:
+ filters (list, optional): A list of additional filters to apply to
+ the query.
+
+ """
+ filter_list = _assemble_filters(filters, [Filter('type', '=', 'malware')])
+ return query(filter_list)
+
+
+def observed_data(filters=None):
+ """Retrieve all Observed Data objects.
+
+ Args:
+ filters (list, optional): A list of additional filters to apply to
+ the query.
+
+ """
+ filter_list = _assemble_filters(filters, [Filter('type', '=', 'observed-data')])
+ return query(filter_list)
+
+
+def reports(filters=None):
+ """Retrieve all Report objects.
+
+ Args:
+ filters (list, optional): A list of additional filters to apply to
+ the query.
+
+ """
+ filter_list = _assemble_filters(filters, [Filter('type', '=', 'report')])
+ return query(filter_list)
+
+
+def threat_actors(filters=None):
+ """Retrieve all Threat Actor objects.
+
+ Args:
+ filters (list, optional): A list of additional filters to apply to
+ the query.
+
+ """
+ filter_list = _assemble_filters(filters, [Filter('type', '=', 'threat-actor')])
+ return query(filter_list)
+
+
+def tools(filters=None):
+ """Retrieve all Tool objects.
+
+ Args:
+ filters (list, optional): A list of additional filters to apply to
+ the query.
+
+ """
+ filter_list = _assemble_filters(filters, [Filter('type', '=', 'tool')])
+ return query(filter_list)
+
+
+def vulnerabilities(filters=None):
+ """Retrieve all Vulnerability objects.
+
+ Args:
+ filters (list, optional): A list of additional filters to apply to
+ the query.
+
+ """
+ filter_list = _assemble_filters(filters, [Filter('type', '=', 'vulnerability')])
+ return query(filter_list)
diff --git a/tox.ini b/tox.ini
index bfc8c1b..46d88c1 100644
--- a/tox.ini
+++ b/tox.ini
@@ -9,7 +9,8 @@ deps =
pytest-cov
coverage
commands =
- py.test --cov=stix2 stix2/test/ --cov-report term-missing
+ py.test --ignore=stix2/test/test_workbench.py --cov=stix2 stix2/test/ --cov-report term-missing
+ py.test stix2/test/test_workbench.py --cov=stix2 --cov-report term-missing --cov-append
passenv = CI TRAVIS TRAVIS_*
| Create "Workbench API"
The Workbench API is designed to be a high-level interface to querying, creating, and interacting with STIX data. It builds on the explicit "Environment" concept by exposing a set of module-level functions that operate on an implicit, module-level Environment, without requiring users to be aware of the underlying Environment. (This encapsulation allows applications that need to support multiple Environments to do so, but does not require individual users to manage their own Environment).
| oasis-open/cti-python-stix2 | diff --git a/stix2/test/constants.py b/stix2/test/constants.py
index 3db39d6..ab7fcf3 100644
--- a/stix2/test/constants.py
+++ b/stix2/test/constants.py
@@ -34,14 +34,18 @@ RELATIONSHIP_IDS = [
'relationship--a0cbb21c-8daf-4a7f-96aa-7155a4ef8f70'
]
-# All required args for a Campaign instance
+# *_KWARGS contains all required arguments to create an instance of that STIX object
+# *_MORE_KWARGS contains all the required arguments, plus some optional ones
+
+ATTACK_PATTERN_KWARGS = dict(
+ name="Phishing",
+)
+
CAMPAIGN_KWARGS = dict(
name="Green Group Attacks Against Finance",
description="Campaign by Green Group against a series of targets in the financial services sector.",
)
-
-# All required args for a Campaign instance, plus some optional args
CAMPAIGN_MORE_KWARGS = dict(
type='campaign',
id=CAMPAIGN_ID,
@@ -52,25 +56,29 @@ CAMPAIGN_MORE_KWARGS = dict(
description="Campaign by Green Group against a series of targets in the financial services sector.",
)
-# Minimum required args for an Identity instance
+COURSE_OF_ACTION_KWARGS = dict(
+ name="Block",
+)
+
IDENTITY_KWARGS = dict(
name="John Smith",
identity_class="individual",
)
-# Minimum required args for an Indicator instance
INDICATOR_KWARGS = dict(
labels=['malicious-activity'],
pattern="[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']",
)
-# Minimum required args for a Malware instance
+INTRUSION_SET_KWARGS = dict(
+ name="Bobcat Breakin",
+)
+
MALWARE_KWARGS = dict(
labels=['ransomware'],
name="Cryptolocker",
)
-# All required args for a Malware instance, plus some optional args
MALWARE_MORE_KWARGS = dict(
type='malware',
id=MALWARE_ID,
@@ -81,14 +89,45 @@ MALWARE_MORE_KWARGS = dict(
description="A ransomware related to ..."
)
-# Minimum required args for a Relationship instance
+OBSERVED_DATA_KWARGS = dict(
+ first_observed=FAKE_TIME,
+ last_observed=FAKE_TIME,
+ number_observed=1,
+ objects={
+ "0": {
+ "type": "windows-registry-key",
+ "key": "HKEY_LOCAL_MACHINE\\System\\Foo\\Bar",
+ }
+ }
+)
+
+REPORT_KWARGS = dict(
+ labels=["campaign"],
+ name="Bad Cybercrime",
+ published=FAKE_TIME,
+ object_refs=[INDICATOR_ID],
+)
+
RELATIONSHIP_KWARGS = dict(
relationship_type="indicates",
source_ref=INDICATOR_ID,
target_ref=MALWARE_ID,
)
-# Minimum required args for a Sighting instance
SIGHTING_KWARGS = dict(
sighting_of_ref=INDICATOR_ID,
)
+
+THREAT_ACTOR_KWARGS = dict(
+ labels=["crime-syndicate"],
+ name="Evil Org",
+)
+
+TOOL_KWARGS = dict(
+ labels=["remote-access"],
+ name="VNC",
+)
+
+VULNERABILITY_KWARGS = dict(
+ name="Heartbleed",
+)
diff --git a/stix2/test/test_datastore.py b/stix2/test/test_datastore.py
index e80e8d8..8f40401 100644
--- a/stix2/test/test_datastore.py
+++ b/stix2/test/test_datastore.py
@@ -4,7 +4,7 @@ from taxii2client import Collection
from stix2 import Filter, MemorySink, MemorySource
from stix2.datastore import (CompositeDataSource, DataSink, DataSource,
make_id, taxii)
-from stix2.datastore.filters import apply_common_filters
+from stix2.datastore.filters import _assemble_filters, apply_common_filters
from stix2.utils import deduplicate
COLLECTION_URL = 'https://example.com/api1/collections/91a7b528-80eb-42ed-a74d-c6fbd5a26116/'
@@ -473,6 +473,15 @@ def test_filters7():
assert len(resp) == 1
+def test_assemble_filters():
+ filter1 = Filter("name", "=", "Malicious site hosting downloader")
+ filter2 = Filter("modified", ">", "2017-01-28T13:49:53.935Z")
+ result = _assemble_filters(filter1, filter2)
+ assert len(result) == 2
+ assert result[0].property == 'name'
+ assert result[1].property == 'modified'
+
+
def test_deduplicate():
unique = deduplicate(STIX_OBJS1)
diff --git a/stix2/test/test_environment.py b/stix2/test/test_environment.py
index 84ca803..176d3f0 100644
--- a/stix2/test/test_environment.py
+++ b/stix2/test/test_environment.py
@@ -47,7 +47,7 @@ def test_object_factory_created():
assert ind.modified == FAKE_TIME
-def test_object_factory_external_resource():
+def test_object_factory_external_reference():
ext_ref = stix2.ExternalReference(source_name="ACME Threat Intel",
description="Threat report")
factory = stix2.ObjectFactory(external_references=ext_ref)
diff --git a/stix2/test/test_tool.py b/stix2/test/test_tool.py
index 21ece24..ce99fb8 100644
--- a/stix2/test/test_tool.py
+++ b/stix2/test/test_tool.py
@@ -58,4 +58,10 @@ def test_parse_tool(data):
assert tool.labels == ["remote-access"]
assert tool.name == "VNC"
+
+def test_tool_no_workbench_wrappers():
+ tool = stix2.Tool(name='VNC', labels=['remote-access'])
+ with pytest.raises(AttributeError):
+ tool.created_by()
+
# TODO: Add other examples
diff --git a/stix2/test/test_workbench.py b/stix2/test/test_workbench.py
new file mode 100644
index 0000000..7857eb2
--- /dev/null
+++ b/stix2/test/test_workbench.py
@@ -0,0 +1,262 @@
+import os
+
+import stix2
+from stix2.workbench import (AttackPattern, Campaign, CourseOfAction,
+ ExternalReference, FileSystemSource, Filter,
+ Identity, Indicator, IntrusionSet, Malware,
+ MarkingDefinition, ObservedData, Relationship,
+ Report, StatementMarking, ThreatActor, Tool,
+ Vulnerability, add_data_source, all_versions,
+ attack_patterns, campaigns, courses_of_action,
+ create, get, identities, indicators,
+ intrusion_sets, malware, observed_data, query,
+ reports, save, set_default_created,
+ set_default_creator, set_default_external_refs,
+ set_default_object_marking_refs, threat_actors,
+ tools, vulnerabilities)
+
+from .constants import (ATTACK_PATTERN_ID, ATTACK_PATTERN_KWARGS, CAMPAIGN_ID,
+ CAMPAIGN_KWARGS, COURSE_OF_ACTION_ID,
+ COURSE_OF_ACTION_KWARGS, IDENTITY_ID, IDENTITY_KWARGS,
+ INDICATOR_ID, INDICATOR_KWARGS, INTRUSION_SET_ID,
+ INTRUSION_SET_KWARGS, MALWARE_ID, MALWARE_KWARGS,
+ OBSERVED_DATA_ID, OBSERVED_DATA_KWARGS, REPORT_ID,
+ REPORT_KWARGS, THREAT_ACTOR_ID, THREAT_ACTOR_KWARGS,
+ TOOL_ID, TOOL_KWARGS, VULNERABILITY_ID,
+ VULNERABILITY_KWARGS)
+
+
+def test_workbench_environment():
+
+ # Create a STIX object
+ ind = create(Indicator, id=INDICATOR_ID, **INDICATOR_KWARGS)
+ save(ind)
+
+ resp = get(INDICATOR_ID)
+ assert resp['labels'][0] == 'malicious-activity'
+
+ resp = all_versions(INDICATOR_ID)
+ assert len(resp) == 1
+
+ # Search on something other than id
+ q = [Filter('type', '=', 'vulnerability')]
+ resp = query(q)
+ assert len(resp) == 0
+
+
+def test_workbench_get_all_attack_patterns():
+ mal = AttackPattern(id=ATTACK_PATTERN_ID, **ATTACK_PATTERN_KWARGS)
+ save(mal)
+
+ resp = attack_patterns()
+ assert len(resp) == 1
+ assert resp[0].id == ATTACK_PATTERN_ID
+
+
+def test_workbench_get_all_campaigns():
+ cam = Campaign(id=CAMPAIGN_ID, **CAMPAIGN_KWARGS)
+ save(cam)
+
+ resp = campaigns()
+ assert len(resp) == 1
+ assert resp[0].id == CAMPAIGN_ID
+
+
+def test_workbench_get_all_courses_of_action():
+ coa = CourseOfAction(id=COURSE_OF_ACTION_ID, **COURSE_OF_ACTION_KWARGS)
+ save(coa)
+
+ resp = courses_of_action()
+ assert len(resp) == 1
+ assert resp[0].id == COURSE_OF_ACTION_ID
+
+
+def test_workbench_get_all_identities():
+ idty = Identity(id=IDENTITY_ID, **IDENTITY_KWARGS)
+ save(idty)
+
+ resp = identities()
+ assert len(resp) == 1
+ assert resp[0].id == IDENTITY_ID
+
+
+def test_workbench_get_all_indicators():
+ resp = indicators()
+ assert len(resp) == 1
+ assert resp[0].id == INDICATOR_ID
+
+
+def test_workbench_get_all_intrusion_sets():
+ ins = IntrusionSet(id=INTRUSION_SET_ID, **INTRUSION_SET_KWARGS)
+ save(ins)
+
+ resp = intrusion_sets()
+ assert len(resp) == 1
+ assert resp[0].id == INTRUSION_SET_ID
+
+
+def test_workbench_get_all_malware():
+ mal = Malware(id=MALWARE_ID, **MALWARE_KWARGS)
+ save(mal)
+
+ resp = malware()
+ assert len(resp) == 1
+ assert resp[0].id == MALWARE_ID
+
+
+def test_workbench_get_all_observed_data():
+ od = ObservedData(id=OBSERVED_DATA_ID, **OBSERVED_DATA_KWARGS)
+ save(od)
+
+ resp = observed_data()
+ assert len(resp) == 1
+ assert resp[0].id == OBSERVED_DATA_ID
+
+
+def test_workbench_get_all_reports():
+ rep = Report(id=REPORT_ID, **REPORT_KWARGS)
+ save(rep)
+
+ resp = reports()
+ assert len(resp) == 1
+ assert resp[0].id == REPORT_ID
+
+
+def test_workbench_get_all_threat_actors():
+ thr = ThreatActor(id=THREAT_ACTOR_ID, **THREAT_ACTOR_KWARGS)
+ save(thr)
+
+ resp = threat_actors()
+ assert len(resp) == 1
+ assert resp[0].id == THREAT_ACTOR_ID
+
+
+def test_workbench_get_all_tools():
+ tool = Tool(id=TOOL_ID, **TOOL_KWARGS)
+ save(tool)
+
+ resp = tools()
+ assert len(resp) == 1
+ assert resp[0].id == TOOL_ID
+
+
+def test_workbench_get_all_vulnerabilities():
+ vuln = Vulnerability(id=VULNERABILITY_ID, **VULNERABILITY_KWARGS)
+ save(vuln)
+
+ resp = vulnerabilities()
+ assert len(resp) == 1
+ assert resp[0].id == VULNERABILITY_ID
+
+
+def test_workbench_relationships():
+ rel = Relationship(INDICATOR_ID, 'indicates', MALWARE_ID)
+ save(rel)
+
+ ind = get(INDICATOR_ID)
+ resp = ind.relationships()
+ assert len(resp) == 1
+ assert resp[0].relationship_type == 'indicates'
+ assert resp[0].source_ref == INDICATOR_ID
+ assert resp[0].target_ref == MALWARE_ID
+
+
+def test_workbench_created_by():
+ intset = IntrusionSet(name="Breach 123", created_by_ref=IDENTITY_ID)
+ save(intset)
+ creator = intset.created_by()
+ assert creator.id == IDENTITY_ID
+
+
+def test_workbench_related():
+ rel1 = Relationship(MALWARE_ID, 'targets', IDENTITY_ID)
+ rel2 = Relationship(CAMPAIGN_ID, 'uses', MALWARE_ID)
+ save([rel1, rel2])
+
+ resp = get(MALWARE_ID).related()
+ assert len(resp) == 3
+ assert any(x['id'] == CAMPAIGN_ID for x in resp)
+ assert any(x['id'] == INDICATOR_ID for x in resp)
+ assert any(x['id'] == IDENTITY_ID for x in resp)
+
+ resp = get(MALWARE_ID).related(relationship_type='indicates')
+ assert len(resp) == 1
+
+
+def test_workbench_related_with_filters():
+ malware = Malware(labels=["ransomware"], name="CryptorBit", created_by_ref=IDENTITY_ID)
+ rel = Relationship(malware.id, 'variant-of', MALWARE_ID)
+ save([malware, rel])
+
+ filters = [Filter('created_by_ref', '=', IDENTITY_ID)]
+ resp = get(MALWARE_ID).related(filters=filters)
+
+ assert len(resp) == 1
+ assert resp[0].name == malware.name
+ assert resp[0].created_by_ref == IDENTITY_ID
+
+ # filters arg can also be single filter
+ resp = get(MALWARE_ID).related(filters=filters[0])
+ assert len(resp) == 1
+
+
+def test_add_data_source():
+ fs_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "stix2_data")
+ fs = FileSystemSource(fs_path)
+ add_data_source(fs)
+
+ resp = tools()
+ assert len(resp) == 3
+ resp_ids = [tool.id for tool in resp]
+ assert TOOL_ID in resp_ids
+ assert 'tool--03342581-f790-4f03-ba41-e82e67392e23' in resp_ids
+ assert 'tool--242f3da3-4425-4d11-8f5c-b842886da966' in resp_ids
+
+
+def test_additional_filter():
+ resp = tools(Filter('created_by_ref', '=', 'identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5'))
+ assert len(resp) == 2
+
+
+def test_additional_filters_list():
+ resp = tools([Filter('created_by_ref', '=', 'identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5'),
+ Filter('name', '=', 'Windows Credential Editor')])
+ assert len(resp) == 1
+
+
+def test_default_creator():
+ set_default_creator(IDENTITY_ID)
+ campaign = Campaign(**CAMPAIGN_KWARGS)
+
+ assert 'created_by_ref' not in CAMPAIGN_KWARGS
+ assert campaign.created_by_ref == IDENTITY_ID
+
+
+def test_default_created_timestamp():
+ timestamp = "2018-03-19T01:02:03.000Z"
+ set_default_created(timestamp)
+ campaign = Campaign(**CAMPAIGN_KWARGS)
+
+ assert 'created' not in CAMPAIGN_KWARGS
+ assert stix2.utils.format_datetime(campaign.created) == timestamp
+ assert stix2.utils.format_datetime(campaign.modified) == timestamp
+
+
+def test_default_external_refs():
+ ext_ref = ExternalReference(source_name="ACME Threat Intel",
+ description="Threat report")
+ set_default_external_refs(ext_ref)
+ campaign = Campaign(**CAMPAIGN_KWARGS)
+
+ assert campaign.external_references[0].source_name == "ACME Threat Intel"
+ assert campaign.external_references[0].description == "Threat report"
+
+
+def test_default_object_marking_refs():
+ stmt_marking = StatementMarking("Copyright 2016, Example Corp")
+ mark_def = MarkingDefinition(definition_type="statement",
+ definition=stmt_marking)
+ set_default_object_marking_refs(mark_def)
+ campaign = Campaign(**CAMPAIGN_KWARGS)
+
+ assert campaign.object_marking_refs[0] == mark_def.id
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 7
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
antlr4-python3-runtime==4.9.3
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
bump2version==1.0.1
bumpversion==0.6.0
certifi==2021.5.30
cfgv==3.3.1
charset-normalizer==2.0.12
coverage==6.2
decorator==5.1.1
defusedxml==0.7.1
distlib==0.3.9
docutils==0.18.1
entrypoints==0.4
filelock==3.4.1
identify==2.4.4
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.2.3
iniconfig==1.1.1
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
Jinja2==3.0.3
jsonschema==3.2.0
jupyter-client==7.1.2
jupyter-core==4.9.2
jupyterlab-pygments==0.1.2
MarkupSafe==2.0.1
mistune==0.8.4
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
nbsphinx==0.8.8
nest-asyncio==1.6.0
nodeenv==1.6.0
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.1
pyzmq==25.1.2
requests==2.27.1
simplejson==3.20.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-prompt==1.5.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
-e git+https://github.com/oasis-open/cti-python-stix2.git@e92db2417ab493ea1010e82ea8be4105229ee75d#egg=stix2
stix2-patterns==2.0.0
taxii2-client==2.3.0
testpath==0.6.0
toml==0.10.2
tomli==1.2.3
tornado==6.1
tox==3.28.0
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.16.2
wcwidth==0.2.13
webencodings==0.5.1
zipp==3.6.0
| name: cti-python-stix2
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- antlr4-python3-runtime==4.9.3
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- bump2version==1.0.1
- bumpversion==0.6.0
- cfgv==3.3.1
- charset-normalizer==2.0.12
- coverage==6.2
- decorator==5.1.1
- defusedxml==0.7.1
- distlib==0.3.9
- docutils==0.18.1
- entrypoints==0.4
- filelock==3.4.1
- identify==2.4.4
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.2.3
- iniconfig==1.1.1
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- jinja2==3.0.3
- jsonschema==3.2.0
- jupyter-client==7.1.2
- jupyter-core==4.9.2
- jupyterlab-pygments==0.1.2
- markupsafe==2.0.1
- mistune==0.8.4
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbsphinx==0.8.8
- nest-asyncio==1.6.0
- nodeenv==1.6.0
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.1
- pyzmq==25.1.2
- requests==2.27.1
- simplejson==3.20.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-prompt==1.5.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- stix2-patterns==2.0.0
- taxii2-client==2.3.0
- testpath==0.6.0
- toml==0.10.2
- tomli==1.2.3
- tornado==6.1
- tox==3.28.0
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.16.2
- wcwidth==0.2.13
- webencodings==0.5.1
- zipp==3.6.0
prefix: /opt/conda/envs/cti-python-stix2
| [
"stix2/test/test_datastore.py::test_ds_abstract_class_smoke",
"stix2/test/test_datastore.py::test_ds_taxii",
"stix2/test/test_datastore.py::test_ds_taxii_name",
"stix2/test/test_datastore.py::test_parse_taxii_filters",
"stix2/test/test_datastore.py::test_add_get_remove_filter",
"stix2/test/test_datastore.py::test_filter_ops_check",
"stix2/test/test_datastore.py::test_filter_value_type_check",
"stix2/test/test_datastore.py::test_filter_type_underscore_check",
"stix2/test/test_datastore.py::test_apply_common_filters",
"stix2/test/test_datastore.py::test_filters0",
"stix2/test/test_datastore.py::test_filters1",
"stix2/test/test_datastore.py::test_filters2",
"stix2/test/test_datastore.py::test_filters3",
"stix2/test/test_datastore.py::test_filters4",
"stix2/test/test_datastore.py::test_filters5",
"stix2/test/test_datastore.py::test_filters6",
"stix2/test/test_datastore.py::test_filters7",
"stix2/test/test_datastore.py::test_assemble_filters",
"stix2/test/test_datastore.py::test_deduplicate",
"stix2/test/test_datastore.py::test_add_remove_composite_datasource",
"stix2/test/test_datastore.py::test_composite_datasource_operations",
"stix2/test/test_datastore.py::test_composite_datastore_no_datasource",
"stix2/test/test_environment.py::test_object_factory_created_by_ref_str",
"stix2/test/test_environment.py::test_object_factory_created_by_ref_obj",
"stix2/test/test_environment.py::test_object_factory_override_default",
"stix2/test/test_environment.py::test_object_factory_created",
"stix2/test/test_environment.py::test_object_factory_external_reference",
"stix2/test/test_environment.py::test_object_factory_obj_markings",
"stix2/test/test_environment.py::test_object_factory_list_append",
"stix2/test/test_environment.py::test_object_factory_list_replace",
"stix2/test/test_environment.py::test_environment_functions",
"stix2/test/test_environment.py::test_environment_source_and_sink",
"stix2/test/test_environment.py::test_environment_datastore_and_sink",
"stix2/test/test_environment.py::test_environment_no_datastore",
"stix2/test/test_environment.py::test_environment_add_filters",
"stix2/test/test_environment.py::test_environment_datastore_and_no_object_factory",
"stix2/test/test_environment.py::test_parse_malware",
"stix2/test/test_environment.py::test_creator_of",
"stix2/test/test_environment.py::test_creator_of_no_datasource",
"stix2/test/test_environment.py::test_creator_of_not_found",
"stix2/test/test_environment.py::test_creator_of_no_created_by_ref",
"stix2/test/test_environment.py::test_relationships",
"stix2/test/test_environment.py::test_relationships_no_id",
"stix2/test/test_environment.py::test_relationships_by_type",
"stix2/test/test_environment.py::test_relationships_by_source",
"stix2/test/test_environment.py::test_relationships_by_target",
"stix2/test/test_environment.py::test_relationships_by_target_and_type",
"stix2/test/test_environment.py::test_relationships_by_target_and_source",
"stix2/test/test_environment.py::test_related_to",
"stix2/test/test_environment.py::test_related_to_no_id",
"stix2/test/test_environment.py::test_related_to_by_source",
"stix2/test/test_environment.py::test_related_to_by_target",
"stix2/test/test_tool.py::test_tool_example",
"stix2/test/test_tool.py::test_parse_tool[{\\n",
"stix2/test/test_tool.py::test_parse_tool[data1]",
"stix2/test/test_tool.py::test_tool_no_workbench_wrappers",
"stix2/test/test_workbench.py::test_workbench_environment",
"stix2/test/test_workbench.py::test_workbench_get_all_attack_patterns",
"stix2/test/test_workbench.py::test_workbench_get_all_campaigns",
"stix2/test/test_workbench.py::test_workbench_get_all_courses_of_action",
"stix2/test/test_workbench.py::test_workbench_get_all_identities",
"stix2/test/test_workbench.py::test_workbench_get_all_indicators",
"stix2/test/test_workbench.py::test_workbench_get_all_intrusion_sets",
"stix2/test/test_workbench.py::test_workbench_get_all_malware",
"stix2/test/test_workbench.py::test_workbench_get_all_observed_data",
"stix2/test/test_workbench.py::test_workbench_get_all_reports",
"stix2/test/test_workbench.py::test_workbench_get_all_threat_actors",
"stix2/test/test_workbench.py::test_workbench_get_all_tools",
"stix2/test/test_workbench.py::test_workbench_get_all_vulnerabilities",
"stix2/test/test_workbench.py::test_workbench_relationships",
"stix2/test/test_workbench.py::test_workbench_created_by",
"stix2/test/test_workbench.py::test_workbench_related",
"stix2/test/test_workbench.py::test_workbench_related_with_filters",
"stix2/test/test_workbench.py::test_add_data_source",
"stix2/test/test_workbench.py::test_additional_filter",
"stix2/test/test_workbench.py::test_additional_filters_list",
"stix2/test/test_workbench.py::test_default_creator",
"stix2/test/test_workbench.py::test_default_created_timestamp",
"stix2/test/test_workbench.py::test_default_external_refs",
"stix2/test/test_workbench.py::test_default_object_marking_refs"
]
| []
| []
| []
| BSD 3-Clause "New" or "Revised" License | 2,347 | [
"docs/conf.py",
"stix2/datastore/__init__.py",
"stix2/environment.py",
".isort.cfg",
"docs/api/stix2.workbench.rst",
"tox.ini",
"stix2/__init__.py",
"stix2/datastore/filters.py",
"docs/guide/workbench.ipynb",
"stix2/workbench.py"
]
| [
"docs/conf.py",
"stix2/datastore/__init__.py",
"stix2/environment.py",
".isort.cfg",
"docs/api/stix2.workbench.rst",
"tox.ini",
"stix2/__init__.py",
"stix2/datastore/filters.py",
"docs/guide/workbench.ipynb",
"stix2/workbench.py"
]
|
|
weecology__retriever-1125 | e55d4d3637c62df8a14f349208759cbe133986eb | 2018-03-31 10:23:02 | 9da7b5b54e6391fcff62401fc015711f76de5722 | henrykironde: I feel like this is a hacky way to solve this issue, but it is one of those edge cases.
As mention in the issue #1117, if users follow the format of the required opts for the table-name provided in the engine, we do not get this issue.
_For example_
**Mysql and Postgres:** `{db}.{table}`, `{db}.prefix{table}`
**Sqlite:** `{db}_{table}`, `{db}_prefix{table}`
When a user provides something like `retriever install csv portal -t march`, the engine will fail to maintain the format for the table name.
To avoid this, we are getting the default required opt for that engine `{db}_{table}.csv` and replacing the `{table}` value with `prefix{table}`.
The final format will look like `{db}_prefix{table}.csv`
henrykironde: I will add some multi table tests as I add the fetch function. #1069 | diff --git a/retriever/engines/__init__.py b/retriever/engines/__init__.py
index 0f4de6b..4156f7b 100644
--- a/retriever/engines/__init__.py
+++ b/retriever/engines/__init__.py
@@ -1,4 +1,6 @@
"""Contains DBMS-specific Engine implementations."""
+import os
+
from retriever.lib.engine import Engine
engines = [
@@ -47,6 +49,11 @@ def choose_engine(opts, choice=True):
if (enginename == thisengine.name.lower() or thisengine.abbreviation and
enginename == thisengine.abbreviation):
engine = thisengine
+ if 'table_name' in opts:
+ if opts['table_name'] and "{table}" not in opts['table_name'] or "{db}" not in opts['table_name']:
+ for opt in engine.required_opts:
+ if opt[0] == 'table_name':
+ raise Exception('Accepted Table format {fom}'.format(fom=opt[2]))
engine.opts = opts
return engine
diff --git a/retriever/engines/csvengine.py b/retriever/engines/csvengine.py
index 18e3bf2..871de5e 100644
--- a/retriever/engines/csvengine.py
+++ b/retriever/engines/csvengine.py
@@ -88,8 +88,7 @@ class engine(Engine):
def to_csv(self):
"""Export sorted version of CSV file"""
for keys in self.script.tables:
- table_name = self.opts['table_name'].format(db=self.db_name, table=keys)
- sort_csv(table_name)
+ sort_csv(self.table_name())
def get_connection(self):
"""Gets the db connection."""
diff --git a/retriever/engines/jsonengine.py b/retriever/engines/jsonengine.py
index 4b2a108..aee229d 100644
--- a/retriever/engines/jsonengine.py
+++ b/retriever/engines/jsonengine.py
@@ -114,7 +114,7 @@ class engine(Engine):
def to_csv(self):
"""Export table from json engine to CSV file"""
for keys in list(self.script.tables):
- table_name = self.opts['table_name'].format(db=self.db_name, table=keys)
+ table_name = self.table_name()
header = self.script.tables[keys].get_insert_columns(join=False, create=True)
csv_outfile = json2csv(table_name, header_values=header)
sort_csv(csv_outfile)
diff --git a/retriever/engines/xmlengine.py b/retriever/engines/xmlengine.py
index e03e43c..cc5816f 100644
--- a/retriever/engines/xmlengine.py
+++ b/retriever/engines/xmlengine.py
@@ -109,7 +109,7 @@ class engine(Engine):
def to_csv(self):
"""Export table from xml engine to CSV file."""
for keys in list(self.script.tables):
- table_name = self.opts['table_name'].format(db=self.db_name, table=keys)
+ table_name = self.table_name()
header = self.script.tables[keys].get_insert_columns(join=False, create=True)
csv_outfile = xml2csv(table_name, header_values=header)
sort_csv(csv_outfile)
| Error on custom table name.
The current state of the retriever uses the required_opts `"table_name"` to customize the installation of dataset into dataset. This works fine for single table datasets however, it over rides the tables when the dataset contains multi tables. The best way to use this option is by using this custom `table_name` as a prefix to the table name.
For example:
"{db}.{prefix_name}_{table}")
| weecology/retriever | diff --git a/test/test_regression.py b/test/test_regression.py
index a551828..15081ec 100644
--- a/test/test_regression.py
+++ b/test/test_regression.py
@@ -64,7 +64,7 @@ def setup_module():
def teardown_module():
"""Cleanup temporary output files and return to root directory."""
os.chdir(retriever_root_dir)
- os.system("rm -r output*")
+ os.system("rm -r *output*")
shutil.rmtree(os.path.join(retriever_root_dir, "raw_data"))
os.system("rm testdb.sqlite")
@@ -141,8 +141,8 @@ def test_xmlengine_regression(dataset, expected, tmpdir):
"""Check for xmlenginee regression."""
xml_engine.opts = {
'engine': 'xml',
- 'table_name': 'output_file_{table}.xml'}
- interface_opts = {'table_name': 'output_file_{table}.xml'}
+ 'table_name': '{db}_output_{table}.xml'}
+ interface_opts = {'table_name': '{db}_output_{table}.xml'}
assert get_csv_md5(dataset, xml_engine, tmpdir, install_xml, interface_opts) == expected
@@ -151,8 +151,8 @@ def test_jsonengine_regression(dataset, expected, tmpdir):
"""Check for jsonenginee regression."""
json_engine.opts = {
'engine': 'json',
- 'table_name': 'output_file_{table}.json'}
- interface_opts = {'table_name': 'output_file_{table}.json'}
+ 'table_name': '{db}_output_{table}.json'}
+ interface_opts = {'table_name': '{db}_output_{table}.json'}
assert get_csv_md5(dataset, json_engine, tmpdir, install_json, interface_opts) == expected
@@ -161,8 +161,8 @@ def test_csv_regression(dataset, expected, tmpdir):
"""Check csv regression."""
csv_engine.opts = {
'engine': 'csv',
- 'table_name': 'output_file_{table}.csv'}
- interface_opts = {'table_name': 'output_file_{table}.csv'}
+ 'table_name': '{db}_output_{table}.csv'}
+ interface_opts = {'table_name': '{db}_output_{table}.csv'}
assert get_csv_md5(dataset, csv_engine, tmpdir, install_csv, interface_opts) == expected
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 4
} | 2.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
argcomplete==3.1.2
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
docutils==0.18.1
execnet==1.9.0
future==1.0.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
numpydoc==1.1.0
packaging==21.3
pluggy==1.0.0
pockets==0.9.1
psycopg2==2.7.7
py==1.11.0
Pygments==2.14.0
PyMySQL==1.0.2
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
pytz==2025.2
requests==2.27.1
-e git+https://github.com/weecology/retriever.git@e55d4d3637c62df8a14f349208759cbe133986eb#egg=retriever
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-py3doc-enhanced-theme==2.4.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-napoleon==0.7
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
tqdm==4.64.1
typing_extensions==4.1.1
urllib3==1.26.20
xlrd==2.0.1
zipp==3.6.0
| name: retriever
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- argcomplete==3.1.2
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- docutils==0.18.1
- execnet==1.9.0
- future==1.0.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- numpydoc==1.1.0
- packaging==21.3
- pluggy==1.0.0
- pockets==0.9.1
- psycopg2==2.7.7
- py==1.11.0
- pygments==2.14.0
- pymysql==1.0.2
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-py3doc-enhanced-theme==2.4.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-napoleon==0.7
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- tqdm==4.64.1
- typing-extensions==4.1.1
- urllib3==1.26.20
- xlrd==2.0.1
- zipp==3.6.0
prefix: /opt/conda/envs/retriever
| [
"test/test_regression.py::test_xmlengine_regression[bird_size-98dcfdca19d729c90ee1c6db5221b775]",
"test/test_regression.py::test_xmlengine_regression[mammal_masses-6fec0fc63007a4040d9bbc5cfcd9953e]",
"test/test_regression.py::test_jsonengine_regression[bird_size-98dcfdca19d729c90ee1c6db5221b775]",
"test/test_regression.py::test_jsonengine_regression[mammal_masses-6fec0fc63007a4040d9bbc5cfcd9953e]",
"test/test_regression.py::test_csv_regression[bird_size-98dcfdca19d729c90ee1c6db5221b775]",
"test/test_regression.py::test_csv_regression[mammal_masses-6fec0fc63007a4040d9bbc5cfcd9953e]"
]
| [
"test/test_regression.py::test_postgres_regression[bird_size-98dcfdca19d729c90ee1c6db5221b775]",
"test/test_regression.py::test_postgres_regression[mammal_masses-6fec0fc63007a4040d9bbc5cfcd9953e]",
"test/test_regression.py::test_mysql_regression[bird_size-98dcfdca19d729c90ee1c6db5221b775]",
"test/test_regression.py::test_mysql_regression[mammal_masses-6fec0fc63007a4040d9bbc5cfcd9953e]"
]
| [
"test/test_regression.py::test_sqlite_regression[bird_size-98dcfdca19d729c90ee1c6db5221b775]",
"test/test_regression.py::test_sqlite_regression[mammal_masses-6fec0fc63007a4040d9bbc5cfcd9953e]",
"test/test_regression.py::test_download_regression[mt-st-helens-veg-d5782e07241cb3fe9f5b2e1bb804a794]",
"test/test_regression.py::test_download_regression[bird-size-45c7507ae945868c71b5179f7682ea9c]",
"test/test_regression.py::test_download_regression[mammal-masses-b54b80d0d1959bdea0bb8a59b70fa871]"
]
| []
| MIT License | 2,348 | [
"retriever/engines/__init__.py",
"retriever/engines/jsonengine.py",
"retriever/engines/xmlengine.py",
"retriever/engines/csvengine.py"
]
| [
"retriever/engines/__init__.py",
"retriever/engines/jsonengine.py",
"retriever/engines/xmlengine.py",
"retriever/engines/csvengine.py"
]
|
tornadoweb__tornado-2338 | 35a538f50e704e348926e1b113bc03328a1da9f2 | 2018-03-31 22:11:44 | 6410cd98c1a5e938246a17cac0769f689ed471c5 | diff --git a/tornado/ioloop.py b/tornado/ioloop.py
index f6ec177b..48700139 100644
--- a/tornado/ioloop.py
+++ b/tornado/ioloop.py
@@ -1213,11 +1213,31 @@ class PeriodicCallback(object):
def _schedule_next(self):
if self._running:
- current_time = self.io_loop.time()
-
- if self._next_timeout <= current_time:
- callback_time_sec = self.callback_time / 1000.0
- self._next_timeout += (math.floor((current_time - self._next_timeout) /
- callback_time_sec) + 1) * callback_time_sec
-
+ self._update_next(self.io_loop.time())
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
+
+ def _update_next(self, current_time):
+ callback_time_sec = self.callback_time / 1000.0
+ if self._next_timeout <= current_time:
+ # The period should be measured from the start of one call
+ # to the start of the next. If one call takes too long,
+ # skip cycles to get back to a multiple of the original
+ # schedule.
+ self._next_timeout += (math.floor((current_time - self._next_timeout) /
+ callback_time_sec) + 1) * callback_time_sec
+ else:
+ # If the clock moved backwards, ensure we advance the next
+ # timeout instead of recomputing the same value again.
+ # This may result in long gaps between callbacks if the
+ # clock jumps backwards by a lot, but the far more common
+ # scenario is a small NTP adjustment that should just be
+ # ignored.
+ #
+ # Note that on some systems if time.time() runs slower
+ # than time.monotonic() (most common on windows), we
+ # effectively experience a small backwards time jump on
+ # every iteration because PeriodicCallback uses
+ # time.time() while asyncio schedules callbacks using
+ # time.monotonic().
+ # https://github.com/tornadoweb/tornado/issues/2333
+ self._next_timeout += callback_time_sec
| ioloop: PeriodicCallback executes too often on windows
## Here is the code:
import math
import logging
from crontab import CronTab
from tornado.ioloop import PeriodicCallback, IOLoop
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class CronTabCallback(PeriodicCallback):
def __init__(self, callback, schedule):
self._callback = callback
self._crontab = CronTab(schedule)
super(CronTabCallback, self).__init__(self.run, self._calc_callbacktime())
def _calc_callbacktime(self, now=None):
return math.ceil(self._crontab.next(now)) * 1000.0
def run(self):
return self._callback()
def _schedule_next(self):
self.callback_time = self._calc_callbacktime()
logging.info('calc ---------------------')
logging.info('delay %s' % self.callback_time)
logging.info('last execute %s' % self._next_timeout)
last = self._next_timeout
super(CronTabCallback, self)._schedule_next()
if last == self._next_timeout:
logging.error('error !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
logging.info('current: %s' % self.io_loop.time())
logging.info('calc next: %s' % self._next_timeout)
logging.info('--------------------------\n')
def crontab(schedule):
def decorator(func):
CronTabCallback(func, schedule).start()
return func
return decorator
@crontab('*/1 * * * *')
def run():
logging.info('execute ... \n')
if __name__ == '__main__':
IOLoop.current().start()
## Here is the console log
2018-03-30 11:33:00,311 - asyncio - DEBUG - Using selector: SelectSelector
2018-03-30 11:33:00,316 - root - INFO - calc ---------------------
2018-03-30 11:33:00,316 - root - INFO - delay 60000.0
2018-03-30 11:33:00,316 - root - INFO - last execute 1522380780.3169544
2018-03-30 11:33:00,316 - root - INFO - current: 1522380780.3169544
2018-03-30 11:33:00,316 - root - INFO - **calc next: 1522380840.3169544**
2018-03-30 11:33:00,316 - root - INFO - --------------------------
**2018-03-30 11:34:00,313** - root - INFO - execute ...
2018-03-30 11:34:00,313 - root - INFO - calc ---------------------
2018-03-30 11:34:00,313 - root - INFO - delay 60000.0
2018-03-30 11:34:00,313 - root - INFO - last execute 1522380840.3169544
2018-03-30 11:34:00,313 - root - ERROR - error !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2018-03-30 11:34:00,313 - root - INFO - current: 1522380840.3139544
2018-03-30 11:34:00,313 - root - INFO - calc next: 1522380840.3169544
2018-03-30 11:34:00,313 - root - INFO - --------------------------
2018-03-30 11:34:00,318 - root - INFO - execute ...
2018-03-30 11:34:00,318 - root - INFO - calc ---------------------
2018-03-30 11:34:00,318 - root - INFO - delay 60000.0
2018-03-30 11:34:00,318 - root - INFO - last execute 1522380840.3169544
2018-03-30 11:34:00,318 - root - INFO - current: 1522380840.3189545
2018-03-30 11:34:00,318 - root - INFO - calc next: 1522380900.3169544
2018-03-30 11:34:00,318 - root - INFO - --------------------------
## Environment:
OS: windows 7
Python: python 3.6
Dependent library: crontab 0.22.0
Tornado 4.5.1 python2 (ok) python3(ok)
Tornado 5.0.1 python2 (ok) python3 (linux ok, window has issue) | tornadoweb/tornado | diff --git a/tornado/test/ioloop_test.py b/tornado/test/ioloop_test.py
index 09f71c5d..9f7c1847 100644
--- a/tornado/test/ioloop_test.py
+++ b/tornado/test/ioloop_test.py
@@ -789,6 +789,62 @@ class TestPeriodicCallback(unittest.TestCase):
io_loop.close()
+class TestPeriodicCallbackMath(unittest.TestCase):
+ def simulate_calls(self, pc, durations):
+ """Simulate a series of calls to the PeriodicCallback.
+
+ Pass a list of call durations in seconds (negative values
+ work to simulate clock adjustments during the call, or more or
+ less equivalently, between calls). This method returns the
+ times at which each call would be made.
+ """
+ calls = []
+ now = 1000
+ pc._next_timeout = now
+ for d in durations:
+ pc._update_next(now)
+ calls.append(pc._next_timeout)
+ now = pc._next_timeout + d
+ return calls
+
+ def test_basic(self):
+ pc = PeriodicCallback(None, 10000)
+ self.assertEqual(self.simulate_calls(pc, [0] * 5),
+ [1010, 1020, 1030, 1040, 1050])
+
+ def test_overrun(self):
+ # If a call runs for too long, we skip entire cycles to get
+ # back on schedule.
+ call_durations = [9, 9, 10, 11, 20, 20, 35, 35, 0, 0, 0]
+ expected = [
+ 1010, 1020, 1030, # first 3 calls on schedule
+ 1050, 1070, # next 2 delayed one cycle
+ 1100, 1130, # next 2 delayed 2 cycles
+ 1170, 1210, # next 2 delayed 3 cycles
+ 1220, 1230, # then back on schedule.
+ ]
+
+ pc = PeriodicCallback(None, 10000)
+ self.assertEqual(self.simulate_calls(pc, call_durations),
+ expected)
+
+ def test_clock_backwards(self):
+ pc = PeriodicCallback(None, 10000)
+ # Backwards jumps are ignored, potentially resulting in a
+ # slightly slow schedule (although we assume that when
+ # time.time() and time.monotonic() are different, time.time()
+ # is getting adjusted by NTP and is therefore more accurate)
+ self.assertEqual(self.simulate_calls(pc, [-2, -1, -3, -2, 0]),
+ [1010, 1020, 1030, 1040, 1050])
+
+ # For big jumps, we should perhaps alter the schedule, but we
+ # don't currently. This trace shows that we run callbacks
+ # every 10s of time.time(), but the first and second calls are
+ # 110s of real time apart because the backwards jump is
+ # ignored.
+ self.assertEqual(self.simulate_calls(pc, [-100, 0, 0]),
+ [1010, 1020, 1030])
+
class TestIOLoopConfiguration(unittest.TestCase):
def run_python(self, *statements):
statements = [
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 1
} | 5.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
-e git+https://github.com/tornadoweb/tornado.git@35a538f50e704e348926e1b113bc03328a1da9f2#egg=tornado
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: tornado
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- flake8==5.0.4
- importlib-metadata==4.2.0
- mccabe==0.7.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
prefix: /opt/conda/envs/tornado
| [
"tornado/test/ioloop_test.py::TestPeriodicCallbackMath::test_basic",
"tornado/test/ioloop_test.py::TestPeriodicCallbackMath::test_clock_backwards",
"tornado/test/ioloop_test.py::TestPeriodicCallbackMath::test_overrun"
]
| []
| [
"tornado/test/ioloop_test.py::TestIOLoop::test_add_callback_from_signal",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_callback_from_signal_other_thread",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_callback_return_sequence",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_callback_wakeup",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_callback_wakeup_other_thread",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_callback_while_closing",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_timeout_return",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_timeout_timedelta",
"tornado/test/ioloop_test.py::TestIOLoop::test_call_at_return",
"tornado/test/ioloop_test.py::TestIOLoop::test_call_later_return",
"tornado/test/ioloop_test.py::TestIOLoop::test_close_file_object",
"tornado/test/ioloop_test.py::TestIOLoop::test_exception_logging",
"tornado/test/ioloop_test.py::TestIOLoop::test_exception_logging_future",
"tornado/test/ioloop_test.py::TestIOLoop::test_exception_logging_native_coro",
"tornado/test/ioloop_test.py::TestIOLoop::test_handle_callback_exception",
"tornado/test/ioloop_test.py::TestIOLoop::test_handler_callback_file_object",
"tornado/test/ioloop_test.py::TestIOLoop::test_mixed_fd_fileobj",
"tornado/test/ioloop_test.py::TestIOLoop::test_multiple_add",
"tornado/test/ioloop_test.py::TestIOLoop::test_read_while_writeable",
"tornado/test/ioloop_test.py::TestIOLoop::test_reentrant",
"tornado/test/ioloop_test.py::TestIOLoop::test_remove_handler_from_handler",
"tornado/test/ioloop_test.py::TestIOLoop::test_remove_timeout_after_fire",
"tornado/test/ioloop_test.py::TestIOLoop::test_remove_timeout_cleanup",
"tornado/test/ioloop_test.py::TestIOLoop::test_remove_timeout_from_timeout",
"tornado/test/ioloop_test.py::TestIOLoop::test_remove_without_add",
"tornado/test/ioloop_test.py::TestIOLoop::test_spawn_callback",
"tornado/test/ioloop_test.py::TestIOLoop::test_timeout_with_arguments",
"tornado/test/ioloop_test.py::TestIOLoopCurrent::test_default_current",
"tornado/test/ioloop_test.py::TestIOLoopCurrent::test_force_current",
"tornado/test/ioloop_test.py::TestIOLoopCurrent::test_non_current",
"tornado/test/ioloop_test.py::TestIOLoopCurrentAsync::test_clear_without_current",
"tornado/test/ioloop_test.py::TestIOLoopAddCallback::test_pre_wrap",
"tornado/test/ioloop_test.py::TestIOLoopAddCallback::test_pre_wrap_with_args",
"tornado/test/ioloop_test.py::TestIOLoopAddCallbackFromSignal::test_pre_wrap",
"tornado/test/ioloop_test.py::TestIOLoopAddCallbackFromSignal::test_pre_wrap_with_args",
"tornado/test/ioloop_test.py::TestIOLoopFutures::test_add_future_stack_context",
"tornado/test/ioloop_test.py::TestIOLoopFutures::test_add_future_threads",
"tornado/test/ioloop_test.py::TestIOLoopFutures::test_run_in_executor_gen",
"tornado/test/ioloop_test.py::TestIOLoopFutures::test_run_in_executor_native",
"tornado/test/ioloop_test.py::TestIOLoopFutures::test_set_default_executor",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_async_exception",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_async_result",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_current",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_native_coroutine",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_sync_exception",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_sync_result",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_timeout",
"tornado/test/ioloop_test.py::TestIOLoopConfiguration::test_asyncio",
"tornado/test/ioloop_test.py::TestIOLoopConfiguration::test_asyncio_main",
"tornado/test/ioloop_test.py::TestIOLoopConfiguration::test_default"
]
| []
| Apache License 2.0 | 2,349 | [
"tornado/ioloop.py"
]
| [
"tornado/ioloop.py"
]
|
|
EVEprosper__ProsperCommon-23 | 9cb24f7eefe6a43d043fc541ad89d6e434aef0a4 | 2018-03-31 23:03:40 | 9cb24f7eefe6a43d043fc541ad89d6e434aef0a4 | diff --git a/.coveragerc b/.coveragerc
index 4fe80c4..262aea2 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,5 +1,8 @@
[report]
- omit = prosper/common/_version.py
+ omit =
+ prosper/common/_version.py
+ prosper/common/exceptions.py
+ prosper/common/__init__.py
[paths]
source = prosper/common
\ No newline at end of file
diff --git a/docs/flask_utils.rst b/docs/flask_utils.rst
new file mode 100644
index 0000000..e69de29
diff --git a/docs/prosper_cli.rst b/docs/prosper_cli.rst
index c29feee..adee102 100644
--- a/docs/prosper_cli.rst
+++ b/docs/prosper_cli.rst
@@ -7,6 +7,8 @@ Help create uniform templates and reduce boilerplate, PropsperCLI gives a common
Using prosper_cli
=================
+**Generic CLI**
+
.. code-block:: python
"""my_app.py"""
@@ -31,6 +33,46 @@ Using prosper_cli
if __name__ == '__main__':
MyApplication.run()
+-----
+
+**Flask Launcher**
+
+.. code-block:: python
+
+ """my_app.py"""
+ from os import path
+ import prosper.common.prosper_cli as p_cli
+ from _version import __version__
+ from endpoints import APP
+
+ class MyFlaskApplication(p_cli.FlaskLauncher):
+ PROGNAME = 'my_app_name' # REQUIRED
+ VERSION = __version__
+
+ config_path = path.join(
+ path.abspath(path.dirname(__file__)),
+ 'my_config_file.cfg'
+ )
+
+ def main(self):
+ """actual logic goes here"""
+ self.notify_launch()
+
+ APP.run(
+ host=self.get_host(),
+ port=self.port,
+ debug=self.debug,
+ threaded=self.threaded,
+ process=self.workers,
+ )
+
+ if __name__ == '__main__':
+ MyFlaskApplication.run()
+
+Meant to be used with `ProsperCookiecutters`_ for debug launching Flask apps.
+
+-----
+
By using the Prosper framework, the following is handled automatically:
- Help/version info handled by `Plumbum`_
@@ -44,4 +86,5 @@ By using the Prosper framework, the following is handled automatically:
- Standardized log formatting
- Platform and version information for webhook loggers
-.. _Plumbum: http://plumbum.readthedocs.io/en/latest/cli.html
\ No newline at end of file
+.. _Plumbum: http://plumbum.readthedocs.io/en/latest/cli.html
+.. _ProsperCookiecutters: https://github.com/EVEprosper/ProsperCookiecutters
diff --git a/prosper/common/flask_utils.py b/prosper/common/flask_utils.py
new file mode 100644
index 0000000..0fe68ce
--- /dev/null
+++ b/prosper/common/flask_utils.py
@@ -0,0 +1,34 @@
+"""flask_utils: general purpose Flask helpers for Docker/Debug"""
+from os import path, environ
+
+def make_gunicorn_config(
+ _gunicorn_config_path='',
+):
+ """makes gunicorn.conf file for launching in docker
+
+ Notes:
+ https://sebest.github.io/post/protips-using-gunicorn-inside-a-docker-image/
+ renders gunicorn.config (python) file in running dir
+ looks for GUNICORN_{option} in environment vars
+ Args:
+ _gunicorn_config_path (str): TEST HOOK, path to dump file
+
+ """
+ gunicorn_py = '''"""AUTOGENERATED BY: prosper.common.flask_utils:gunicorn_config
+Based off: https://sebest.github.io/post/protips-using-gunicorn-inside-a-docker-image/
+"""
+from os import environ
+
+for key, value in environ.items():
+ if key.startswith('GUNICORN_'):
+ gunicorn_key = key.split('_', 1)[1].lower()
+ locals()[gunicorn_key] = value
+
+'''
+
+ gunicorn_file = 'gunicorn.conf'
+ if _gunicorn_config_path:
+ gunicorn_file = _gunicorn_config_path
+
+ with open(gunicorn_file, 'w') as gunicorn_cfg:
+ gunicorn_cfg.write(gunicorn_py)
diff --git a/prosper/common/prosper_cli.py b/prosper/common/prosper_cli.py
index 701558b..8781f3f 100644
--- a/prosper/common/prosper_cli.py
+++ b/prosper/common/prosper_cli.py
@@ -1,4 +1,6 @@
"""Plumbum CLI wrapper for easier/common application writing"""
+import logging
+import os
import platform
from plumbum import cli
@@ -32,14 +34,14 @@ class ProsperApplication(cli.Application):
['--config'],
str,
help='Override default config')
- def override_config(self, config_path):
+ def override_config(self, config_path): # pragma: no cover
"""override config object with local version"""
self.config_path = config_path
@cli.switch(
['--dump-config'],
help='Dump default config to stdout')
- def dump_config(self):
+ def dump_config(self): # pragma: no cover
"""dumps configfile to stdout so users can edit/implement their own"""
with open(self.config_path, 'r') as cfg_fh:
base_config = cfg_fh.read()
@@ -67,15 +69,15 @@ class ProsperApplication(cli.Application):
platform=platform.node(),
version=self.VERSION
)
- if self.config.get('LOGGING', 'discord_webhook'):
+ if self.config.get_option('LOGGING', 'discord_webhook'):
log_builder.configure_discord_logger(
custom_args=id_string
)
- if self.config.get('LOGGING', 'slack_webhook'):
+ if self.config.get_option('LOGGING', 'slack_webhook'):
log_builder.configure_slack_logger(
custom_args=id_string
)
- if self.config.get('LOGGING', 'hipchat_webhook'):
+ if self.config.get_option('LOGGING', 'hipchat_webhook'):
log_builder.configure_hipchat_logger(
custom_args=id_string
)
@@ -93,19 +95,78 @@ class ProsperApplication(cli.Application):
self._config = p_config.ProsperConfig(self.config_path)
return self._config
-class ProsperTESTApplication(ProsperApplication):
+
+OPTION_ARGS = (
+ 'debug', 'port', 'threaded', 'workers'
+)
+class FlaskLauncher(ProsperApplication):
+ """wrapper for launching (DEBUG) Flask apps"""
+
+ port = cli.SwitchAttr(
+ ['p', '--port'],
+ int,
+ help='port to launch Flask app on',
+ default=int(os.environ.get('PROSPER_FLASK__port', 8000)),
+ )
+ threaded = cli.SwitchAttr(
+ ['t', '--threaded'],
+ bool,
+ help='Launch Werkzeug in threaded mode',
+ default=os.environ.get('PROSPER_FLASK__threadded', False),
+ )
+ workers = cli.SwitchAttr(
+ ['w', '--workers'],
+ int,
+ help='Launch Werkzeug with multiple worker threads',
+ default=int(os.environ.get('PROSPER_FLASK__workers', 1)),
+ )
+
+
+ def get_host(self):
+ """returns appropriate host configuration
+
+ Returns:
+ str: host IP (127.0.0.1 or 0.0.0.0)
+
+ """
+ if self.debug:
+ return '127.0.0.1'
+ else:
+ return '0.0.0.0'
+
+ def notify_launch(self, log_level='ERROR'):
+ """logs launcher message before startup
+
+ Args:
+ log_level (str): level to notify at
+
+ """
+ if not self.debug:
+ self.logger.log(
+ logging.getLevelName(log_level),
+ 'LAUNCHING %s -- %s', self.PROGNAME, platform.node()
+ )
+ flask_options = {
+ key: getattr(self, key) for key in OPTION_ARGS
+ }
+ flask_options['host'] = self.get_host()
+
+ self.logger.info('OPTIONS: %s', flask_options)
+
+
+class ProsperTESTApplication(ProsperApplication): # pragma: no cover
"""test wrapper for CLI tests"""
- from os import path
PROGNAME = 'CLITEST'
VERSION = '0.0.0'
- HERE = path.abspath(path.dirname(__file__))
+ HERE = os.path.abspath(os.path.dirname(__file__))
- config_path = path.join(HERE, 'common_config.cfg')
+ config_path = os.path.join(HERE, 'common_config.cfg')
def main(self):
"""do stuff"""
self.logger.info('HELLO WORLD')
-if __name__ == '__main__':
+
+if __name__ == '__main__': # pragma: no cover
ProsperTESTApplication.run() # test hook
diff --git a/prosper/common/prosper_config.py b/prosper/common/prosper_config.py
index efae88a..ef8810f 100644
--- a/prosper/common/prosper_config.py
+++ b/prosper/common/prosper_config.py
@@ -10,10 +10,6 @@ from configparser import ExtendedInterpolation
import warnings
import logging
-DEFAULT_LOGGER = logging.getLogger('NULL')
-DEFAULT_LOGGER.addHandler(logging.NullHandler())
-
-HERE = path.abspath(path.dirname(__file__))
class ProsperConfig(object):
"""configuration handler for all prosper projects
@@ -26,19 +22,22 @@ class ProsperConfig(object):
4. environment varabile
5. args_default -- function default w/o global config
+ Args:
+ config_filename (str): path to config file
+ local_filepath_override (str): path modifier for private config file
+
Attributes:
global_config (:obj:`configparser.ConfigParser`)
local_config (:obj:`configparser.ConfigParser`)
config_filename (str): filename of global/tracked/default .cfg file
local_config_filename (str): filename for local/custom .cfg file
+
"""
- _debug_mode = False
+ logger = logging.getLogger('ProsperCommon')
def __init__(
self,
config_filename,
- local_filepath_override=None,
- logger=DEFAULT_LOGGER,
- debug_mode=_debug_mode
+ local_filepath_override='',
):
"""get the config filename for initializing data structures
@@ -49,7 +48,6 @@ class ProsperConfig(object):
debug_mode (bool, optional): enable debug modes for config helper
"""
- self.logger = logger
self.config_filename = config_filename
self.local_config_filename = get_local_config_filepath(config_filename)
if local_filepath_override:
@@ -57,13 +55,13 @@ class ProsperConfig(object):
#TODO: force filepaths to abspaths?
self.global_config, self.local_config = get_configs(
config_filename,
- self.local_config_filename
+ self.local_config_filename,
)
def get(
self,
section_name,
- key_name
+ key_name,
):
"""Replicate configparser.get() functionality
@@ -80,13 +78,13 @@ class ProsperConfig(object):
value = self.local_config.get(section_name, key_name)
except Exception as error_msg:
self.logger.warning(
- '{0}.{1} not found in local config'.format(section_name, key_name)
+ '%s.%s not found in local config', section_name, key_name
)
try:
value = self.global_config.get(section_name, key_name)
except Exception as error_msg:
self.logger.error(
- '{0}.{1} not found in global config'.format(section_name, key_name)
+ '%s.%s not found in global config', section_name, key_name
)
raise KeyError('Could not find option in local/global config')
@@ -97,7 +95,7 @@ class ProsperConfig(object):
section_name,
key_name,
args_option=None,
- args_default=None
+ args_default=None,
):
"""evaluates the requested option and returns the correct value
@@ -119,7 +117,6 @@ class ProsperConfig(object):
(str) appropriate response as per priority order
"""
- self.logger.debug('picking config')
if args_option != args_default and\
args_option is not None:
self.logger.debug('-- using function args')
@@ -127,28 +124,28 @@ class ProsperConfig(object):
section_info = section_name + '.' + key_name
- local_option = None
+ option = None
try:
- local_option = self.local_config[section_name][key_name]
- except KeyError:
- self.logger.debug(section_info + 'not found in local config')
- if local_option:
+ option = self.local_config[section_name][key_name]
self.logger.debug('-- using local config')
- return local_option
+ if option:
+ return option
+ except (KeyError, configparser.NoOptionError, configparser.NoSectionError):
+ self.logger.debug('`%s` not found in local config', section_info)
- global_option = None
try:
- global_option = self.global_config[section_name][key_name]
- except KeyError:# as error_msg:
- self.logger.warning(section_info + 'not found in global config')
- if global_option:
+ option = self.global_config[section_name][key_name]
self.logger.debug('-- using global config')
- return global_option
+ if option:
+ return option
+ except (KeyError, configparser.NoOptionError, configparser.NoSectionError):
+ self.logger.warning('`%s` not found in global config', section_info)
env_option = get_value_from_environment(section_name, key_name, logger=self.logger)
if env_option:
self.logger.debug('-- using environment value')
return env_option
+
self.logger.debug('-- using default argument')
return args_default #If all esle fails return the given default
@@ -161,18 +158,18 @@ def get_value_from_environment(
section_name,
key_name,
envname_pad=ENVNAME_PAD,
- logger=DEFAULT_LOGGER
+ logger=logging.getLogger('ProsperCommon'),
):
"""check environment for key/value pair
Args:
section_name (str): section name
key_name (str): key to look up
- envname_pad (str, optional): namespace padding
- logger (:obj:`logging.logger`, optional): logging handle
+ envname_pad (str): namespace padding
+ logger (:obj:`logging.logger`): logging handle
Returns:
- (str) value in environment
+ str: value in environment
"""
var_name = '{pad}_{section}__{key}'.format(
@@ -181,26 +178,25 @@ def get_value_from_environment(
key=key_name
)
- logger.debug('var_name={0}'.format(var_name))
+ logger.debug('var_name=%s', var_name)
value = getenv(var_name)
- logger.debug('env value={0}'.format(value))
+ logger.debug('env value=%s', value)
return value
def get_configs(
config_filepath,
- local_filepath_override=None,
- debug_mode=False
+ local_filepath_override='',
):
"""go and fetch the global/local configs from file and load them with configparser
Args:
- config_filename (str): path to config
- debug_mode (bool, optional): enable debug modes for config helper
+ config_filepath (str): path to config
+ local_filepath_override (str): secondary place to locate config file
Returns:
- (:obj:`configparser.ConfigParser`) global_config
- (:obj:`configparser.ConfigParser`) local_config
+ ConfigParser: global_config
+ ConfigParser: local_config
"""
global_config = read_config(config_filepath)
@@ -214,13 +210,16 @@ def get_configs(
def read_config(
config_filepath,
- logger=DEFAULT_LOGGER
+ logger=logging.getLogger('ProsperCommon'),
):
"""fetch and parse config file
Args:
config_filepath (str): path to config file. abspath > relpath
- logger (:obj:`logging.Logger`, optional): logger to catch error msgs
+ logger (:obj:`logging.Logger`): logger to catch error msgs
+
+ Raises:
+ FileNotFound: file access issues
"""
config_parser = configparser.ConfigParser(
@@ -229,57 +228,30 @@ def read_config(
delimiters=('='),
inline_comment_prefixes=('#')
)
- logger.debug('config_filepath={0}'.format(config_filepath))
+ logger.debug('config_filepath=%s', config_filepath)
try:
with open(config_filepath, 'r') as filehandle:
config_parser.read_file(filehandle)
except Exception as error_msg:
logger.error(
- 'EXCEPTION - Unable to parse config file' +
- '\r\texception={0}'.format(error_msg) +
- '\r\tconfig_filepath{0}'.format(config_filepath)
+ 'Unable to parse config file: %s', config_filepath, exc_info=True
)
raise error_msg
return config_parser
-def get_config(
- config_filepath,
- local_override=False
-):
- """DEPRECATED: classic v1 config parser. Obsolete by v0.3.0"""
- warnings.warn(
- __name__ + 'replaced with ProsperConfig',
- DeprecationWarning
- )
- config = configparser.ConfigParser(
- interpolation=ExtendedInterpolation(),
- allow_no_value=True,
- delimiters=('='),
- inline_comment_prefixes=('#')
- )
-
- real_config_filepath = get_local_config_filepath(config_filepath)
-
- if local_override: #force lookup tracked config
- real_config_filepath = config_filepath
-
- with open(real_config_filepath, 'r') as filehandle:
- config.read_file(filehandle)
- return config
-
def get_local_config_filepath(
config_filepath,
- force_local=False
+ force_local=False,
):
"""helper for finding local filepath for config
Args:
config_filepath (str): path to local config abspath > relpath
- force_local (bool, optional): force return of _local.cfg version
+ force_local (bool): force return of _local.cfg version
Returns:
- (str): Path to local config, or global if path DNE
+ str: Path to local config, or global if path DNE
"""
local_config_filepath = config_filepath.replace('.cfg', '_local.cfg')
diff --git a/prosper/common/prosper_logging.py b/prosper/common/prosper_logging.py
index 17d6f40..a1bf657 100644
--- a/prosper/common/prosper_logging.py
+++ b/prosper/common/prosper_logging.py
@@ -31,7 +31,6 @@ import re
import requests
import prosper.common.prosper_config as p_config
-import prosper.common.prosper_utilities as p_utils
import prosper.common.exceptions as exceptions
HERE = path.abspath(path.dirname(__file__))
@@ -284,21 +283,18 @@ class ProsperLogger(object):
# vv TODO vv: Test review #
if discord_obj.can_query:
- try:
- discord_handler = HackyDiscordHandler(
- discord_obj,
- discord_recipient
- )
- self._configure_common(
- 'discord_',
- log_level,
- log_format,
- 'Discord',
- discord_handler,
- custom_args=custom_args
- )
- except Exception as error_msg: # FIXME: remove this, if we're just re-throwing?
- raise error_msg
+ discord_handler = HackyDiscordHandler(
+ discord_obj,
+ discord_recipient
+ )
+ self._configure_common(
+ 'discord_',
+ log_level,
+ log_format,
+ 'Discord',
+ discord_handler,
+ custom_args=custom_args
+ )
else:
warnings.warn(
'Unable to execute webhook',
@@ -338,20 +334,17 @@ class ProsperLogger(object):
# Actually build slack logging handler #
# vv TODO vv: Test review #
- try:
- slack_handler = HackySlackHandler(
- slack_webhook
- )
- self._configure_common(
- 'slack_',
- log_level,
- log_format,
- 'Slack',
- slack_handler,
- custom_args=custom_args
- )
- except Exception as error_msg:
- raise error_msg
+ slack_handler = HackySlackHandler(
+ slack_webhook
+ )
+ self._configure_common(
+ 'slack_',
+ log_level,
+ log_format,
+ 'Slack',
+ slack_handler,
+ custom_args=custom_args
+ )
# ^^ TODO ^^ #
def configure_hipchat_logger(
@@ -659,7 +652,7 @@ class HackySlackHandler(logging.Handler):
log_msg (str): actual log message
"""
- if SILENCE_OVERRIDE:
+ if SILENCE_OVERRIDE: # pragma: no cover
return
payload = {
diff --git a/prosper/common/prosper_version.py b/prosper/common/prosper_version.py
index 5382d9e..22edc04 100644
--- a/prosper/common/prosper_version.py
+++ b/prosper/common/prosper_version.py
@@ -10,21 +10,20 @@ import warnings
import semantic_version
-import prosper.common.exceptions as exceptions
+from . import exceptions
DEFAULT_VERSION = '0.0.0'
-DEFAULT_BRANCH = 'master'
TEST_MODE = False
def get_version(
here_path,
- default_version=DEFAULT_VERSION
+ default_version=DEFAULT_VERSION,
):
"""tries to resolve version number
Args:
here_path (str): path to project local dir
- default_version (str, optional): what version to return if all else fails
+ default_version (str): what version to return if all else fails
Returns:
str: semantic_version information for library
@@ -59,7 +58,7 @@ def get_version(
def _read_git_tags(
default_version=DEFAULT_VERSION,
- git_command=['git', 'tag']
+ git_command=('git', 'tag'),
):
"""tries to find current git tag
@@ -67,19 +66,19 @@ def _read_git_tags(
git_command exposed for testing null case
Args:
- default_version (str, optional): what version to make
- git_command (:obj:`list`, optional): subprocess command
+ default_version (str): what version to make
+ git_command (:obj:`list`): subprocess command
Retruns:
str: latest version found, or default
- Raises:
+ Warns:
exceptions.ProsperDefaultVersionWarning: git version not found
"""
- try: # pragma: no cover
+ try:
current_tags = check_output(git_command).splitlines()
- except Exception:
+ except Exception: # pragma: no cover
raise
if not current_tags[0]:
@@ -103,13 +102,13 @@ def _read_git_tags(
def _version_from_file(
path_to_version,
- default_version=DEFAULT_VERSION
+ default_version=DEFAULT_VERSION,
):
"""for PyPI installed versions, just get data from file
Args:
path_to_version (str): abspath to dir where version.txt exists
- default_version (str, optional): fallback version in case of error
+ default_version (str): fallback version in case of error
Returns:
str: current working version
diff --git a/setup.py b/setup.py
index f135241..1b4bddc 100644
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@ def hack_find_packages(include_str):
setuptools.find_packages(path='') doesn't work as intended
Returns:
- (:obj:`list` :obj:`str`) append <include_str>. onto every element of setuptools.find_pacakges() call
+ list: append <include_str>. onto every element of setuptools.find_pacakges() call
"""
new_list = [include_str]
@@ -49,7 +49,7 @@ def include_all_subfiles(*args):
Not recursive, only includes flat files
Returns:
- (:obj:`list` :obj:`str`) list of all non-directories in a file
+ list: list of all non-directories in a file
"""
file_list = []
@@ -70,7 +70,7 @@ class PyTest(TestCommand):
http://doc.pytest.org/en/latest/goodpractices.html#manual-integration
"""
- user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
+ user_options = [('pytest-args=', 'a', 'Arguments to pass to pytest')]
def initialize_options(self):
TestCommand.initialize_options(self)
@@ -81,35 +81,34 @@ class PyTest(TestCommand):
'no:logging',
'--cov=prosper/' + __library_name__,
'--cov-report=term-missing',
- '--cov-config=.coveragerc'
- ] #load defaults here
+ '--cov-config=.coveragerc',
+ ]
def run_tests(self):
import shlex
- #import here, cause outside the eggs aren't loaded
import pytest
pytest_commands = []
- try: #read commandline
+ try:
pytest_commands = shlex.split(self.pytest_args)
- except AttributeError: #use defaults
+ except AttributeError:
pytest_commands = self.pytest_args
errno = pytest.main(pytest_commands)
exit(errno)
class QuietTest(PyTest):
"""overrides to prevent webhook spam while developing"""
- user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
-
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = [
'tests',
'-rx',
+ '-p',
+ 'no:logging',
'-m',
'not loud',
'--cov=prosper/' + __library_name__,
'--cov-report=term-missing',
- '--cov-config=.coveragerc'
+ '--cov-config=.coveragerc',
]
with open('README.rst', 'r', 'utf-8') as f:
@@ -125,34 +124,46 @@ setup(
url='https://github.com/EVEprosper/' + __package_name__,
license='MIT',
classifiers=[
- 'Programming Language :: Python :: 3.5'
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
],
keywords='prosper eve-online webhooks logging configuration-management',
packages=hack_find_packages('prosper'),
include_package_data=True,
package_data={
- '': ['LICENSE', 'README.rst']
+ '': ['LICENSE', 'README.rst'],
+ },
+ entry_points={
+ 'console_scripts': [
+ 'make_gunicorn_config=prosper.common.flask_utils:make_gunicorn_config',
+ ],
},
install_requires=[
'requests',
'semantic_version',
- 'plumbum'
+ 'plumbum',
],
tests_require=[
'pytest>=3.3.0',
'testfixtures',
'pytest_cov',
'mock',
- 'yolk3k'
+ 'yolk3k',
+ 'coverage',
+ 'docker',
],
extras_require={
'dev':[
'sphinx',
'sphinxcontrib-napoleon',
- ]
+ ],
+ 'test':[
+ 'plumbum',
+ 'docker',
+ ],
},
cmdclass={
'test':PyTest,
- 'quiettest': QuietTest
- }
+ 'quiet': QuietTest,
+ },
)
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 0000000..e69de29
diff --git a/prosper/common/prosper_utilities.py b/tests/helpers.py
similarity index 66%
rename from prosper/common/prosper_utilities.py
rename to tests/helpers.py
index be00090..c993169 100644
--- a/prosper/common/prosper_utilities.py
+++ b/tests/helpers.py
@@ -1,13 +1,35 @@
-'''utilities.py: worker functions for CREST calls'''
-
+"""TEMPORARY: ported config helpers to ProsperTestHelpers"""
+import configparser
from os import path
-import logging
from datetime import datetime
+import warnings
+
+import prosper.common.prosper_config as p_config
+
+def get_config(
+ config_filepath,
+ local_override=False
+):
+ """DEPRECATED: classic v1 config parser. Obsolete by v0.3.0"""
+ warnings.warn(
+ __name__ + 'replaced with ProsperConfig',
+ DeprecationWarning
+ )
+ config = configparser.ConfigParser(
+ interpolation=configparser.ExtendedInterpolation(),
+ allow_no_value=True,
+ delimiters=('='),
+ inline_comment_prefixes=('#')
+ )
+
+ real_config_filepath = p_config.get_local_config_filepath(config_filepath)
-from prosper.common.prosper_config import get_config, get_local_config_filepath
+ if local_override: #force lookup tracked config
+ real_config_filepath = config_filepath
-DEFAULT_LOGGER = logging.getLogger('NULL')
-DEFAULT_LOGGER.addHandler(logging.NullHandler())
+ with open(real_config_filepath, 'r') as filehandle:
+ config.read_file(filehandle)
+ return config
def compare_config_files(config_filepath):
"""compares prod config file vs local version
@@ -16,7 +38,7 @@ def compare_config_files(config_filepath):
config_filepath (str): path to config file
Returns:
- (:obj:`dict`) description of unique keys between both configs
+ dict: description of unique keys between both configs
"""
tracked_config = get_config(config_filepath, True)
@@ -24,7 +46,7 @@ def compare_config_files(config_filepath):
unique_values = {}
- if not path.isfile(get_local_config_filepath(config_filepath)): #pragma: no cover
+ if not path.isfile(p_config.get_local_config_filepath(config_filepath)): #pragma: no cover
#pytest.skip('no local .cfg found, skipping')
return None
@@ -65,8 +87,8 @@ def find_unique_keys(base_config, comp_config, base_name):
base_name (str): name to tag mismatches with
Returns:
- (:obj:`list`): unique sections from ConfigParser
- (:obj:`list`): unique keys from ConfigParser sections
+ list: unique sections from ConfigParser
+ list: unique keys from ConfigParser sections
"""
unique_keys = []
| Add make_gunicorn_config entry_point for Flask apps in Docker
Make https://sebest.github.io/post/protips-using-gunicorn-inside-a-docker-image/ easier to access by adding `make_gunicorn_config` to `entry_points` | EVEprosper/ProsperCommon | diff --git a/tests/test_flask_utils.py b/tests/test_flask_utils.py
new file mode 100644
index 0000000..748a6f7
--- /dev/null
+++ b/tests/test_flask_utils.py
@@ -0,0 +1,64 @@
+"""test_flask_utils: validates expected behavior for prosper.common.flask_utils"""
+import atexit
+import importlib.util
+from os import path, environ, remove
+import platform
+
+import pytest
+from plumbum import local
+
+import prosper.common.flask_utils as flask_utils
+
+HERE = path.abspath(path.dirname(__file__))
+ROOT = path.dirname(HERE)
+
+python = local['python']
+if platform.system() == 'Windows':
+ which = local['where']
+else:
+ which = local['which']
+
+
+def atexit_remove_file(filepath):
+ """atexit handler to remove tempfiles and avoid clutter"""
+ print('ATEXIT removing: ' + path.abspath(filepath))
+ remove(filepath)
+ assert not path.isfile(filepath)
+
+def test_cli():
+ """make sure entry_point/console_script does what it says"""
+ # TODO: local.cwd() swapping to test dirs
+ gunicorn_conf = local[which('make_gunicorn_config').rstrip()]
+ if path.isfile('gunicorn.conf'):
+ remove('gunicorn.conf')
+
+ gunicorn_conf()
+
+ assert path.isfile('gunicorn.conf')
+ atexit.register(atexit_remove_file, 'gunicorn.conf')
+
+def test_gunicorn_conf():
+ """make sure gunicorn contents works as expected"""
+ # Prep Test
+ environ['GUNICORN_TEST1'] = 'hello'
+ environ['GUNICORN_TEST2'] = 'world'
+ gunicorn_filename = path.join(HERE, '_gunicorn.py')
+ if path.isfile(gunicorn_filename):
+ remove(gunicorn_filename)
+
+ # Create gunicorn config file (.py)
+ flask_utils.make_gunicorn_config(_gunicorn_config_path=gunicorn_filename)
+ assert path.isfile(gunicorn_filename)
+
+ # use importlib to load _gunicorn.py and make sure expected values are there
+ spec = importlib.util.spec_from_file_location('_gunicorn', gunicorn_filename)
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
+
+ assert module.test1 == 'hello'
+ assert module.test2 == 'world'
+
+ # Sanitize after test
+ del environ['GUNICORN_TEST1']
+ del environ['GUNICORN_TEST2']
+ atexit.register(atexit_remove_file, gunicorn_filename)
diff --git a/tests/test_prosper_cli.py b/tests/test_prosper_cli.py
index e25fe5b..00dbe04 100644
--- a/tests/test_prosper_cli.py
+++ b/tests/test_prosper_cli.py
@@ -4,12 +4,16 @@ Pytest functions for exercising prosper.common.cli
"""
import configparser
+import logging
from os import path
import pytest
from plumbum import local
+from testfixtures import LogCapture
import prosper.common.prosper_cli as p_cli
+import prosper.common.prosper_config as p_config
+import prosper.common.prosper_logging as p_logging
HERE = path.abspath(path.dirname(__file__))
ROOT = path.join(
@@ -39,6 +43,107 @@ class TestMetaClasses:
with pytest.raises(NotImplementedError):
dummy = DummyApplication()
+ def test_config_path_happypath(self):
+ """make sure __new__ works as expected"""
+ class DummyApplication(p_cli.ProsperApplication):
+ PROGNAME = 'DUMMY'
+ VERSION = '0.0.0'
+
+ here_path = HERE
+ config_path = LOCAL_CONFIG_PATH
+
+ def main(self):
+ return 'yes'
+ dummy = DummyApplication(__file__)
+
+ def test_app_properties_logger_verbose(self):
+ """make sure properties work as expected"""
+ class DummyVerboseApplication(p_cli.ProsperApplication):
+ PROGNAME = 'DUMMYVERBOSE'
+ VERSION = '0.0.0'
+
+ here_path = HERE
+ config_path = LOCAL_CONFIG_PATH
+
+ def main(self):
+ return 'yes'
+
+ dummy_v = DummyVerboseApplication(__file__)
+ dummy_v.verbose = True
+
+ assert dummy_v._logger is None
+ assert isinstance(dummy_v.logger, logging.Logger)
+
+ handler_types = [type(handler) for handler in dummy_v.logger.handlers]
+ assert logging.StreamHandler in handler_types
+ assert p_logging.HackyDiscordHandler not in handler_types
+ assert p_logging.HackySlackHandler not in handler_types
+ assert p_logging.HackyHipChatHandler not in handler_types
+
+ def test_app_properties_logger_normal(self):
+ """make sure properties work as expected"""
+ class DummyApplication(p_cli.ProsperApplication):
+ PROGNAME = 'DUMMY'
+ VERSION = '0.0.0'
+
+ here_path = HERE
+ config_path = LOCAL_CONFIG_PATH
+
+ def main(self):
+ return 'yes'
+
+ dummy = DummyApplication(__file__)
+
+ assert dummy._logger is None
+ assert isinstance(dummy.logger, logging.Logger)
+
+ handler_types = [type(handler) for handler in dummy.logger.handlers]
+ assert p_logging.HackyDiscordHandler in handler_types
+ assert p_logging.HackySlackHandler in handler_types
+ # assert p_logging.HackyHipChatHandler in handler_types # TODO: need hipchat test endpoint
+ assert logging.StreamHandler not in handler_types
+
+ def test_app_properties_config(self):
+ """make sure properties work as expected"""
+ class DummyApplication(p_cli.ProsperApplication):
+ PROGNAME = 'DUMMY'
+ VERSION = '0.0.0'
+
+ here_path = HERE
+ config_path = LOCAL_CONFIG_PATH
+
+ def main(self):
+ return 'yes'
+
+ dummy = DummyApplication(__file__)
+ assert isinstance(dummy.config, p_config.ProsperConfig)
+
+
+class TestFlaskLauncher:
+ """validate meta behavior of FlaskLauncher framework"""
+ class DummyFlaskLauncher(p_cli.FlaskLauncher):
+ PROGNAME = 'FLASK_LAUNCHER'
+ VERSION = '0.0.0'
+
+ here_path = HERE
+ config_path = LOCAL_CONFIG_PATH
+
+ def main(self):
+ self.verbose = True
+ self.logger('hello_world')
+
+ self.notify_launch()
+
+
+ def test_get_host(self):
+ """validate get_host method"""
+ dummy = self.DummyFlaskLauncher(__file__)
+ dummy.debug = True
+ assert dummy.get_host() == '127.0.0.1'
+
+ dummy.debug = False
+ assert dummy.get_host() == '0.0.0.0'
+
class TestCLI:
"""validate basic args work as expected"""
@@ -48,7 +153,7 @@ class TestCLI:
def test_happypath(self):
"""validate output is output"""
# TODO: test isn't working, but OK?
- result = self.cli('-v')
+ result = self.cli('--verbose')
if not result:
pytest.xfail('expected output? `{}`'.format(result))
diff --git a/tests/test_prosper_config.py b/tests/test_prosper_config.py
index ffe2f46..50432f7 100644
--- a/tests/test_prosper_config.py
+++ b/tests/test_prosper_config.py
@@ -9,7 +9,7 @@ import json
import pytest
import prosper.common.prosper_config as prosper_config
-import prosper.common.prosper_utilities as prosper_utilities
+import helpers
HERE = path.abspath(path.dirname(__file__))
ROOT = path.dirname(HERE)
@@ -88,7 +88,7 @@ def test_local_filepath_helper():
def test_config_file():
"""Test makes sure tracked/local configs have all matching keys"""
- unique_values = prosper_utilities.compare_config_files(LOCAL_CONFIG_PATH)
+ unique_values = helpers.compare_config_files(LOCAL_CONFIG_PATH)
message = ''
if unique_values:
diff --git a/tests/test_prosper_logging.py b/tests/test_prosper_logging.py
index 2af8220..92e9d35 100644
--- a/tests/test_prosper_logging.py
+++ b/tests/test_prosper_logging.py
@@ -5,9 +5,9 @@ Pytest functions for exercising prosper.common.prosper_logging
"""
from os import path, listdir, remove, makedirs, rmdir
-import configparser
import logging
from datetime import datetime
+import platform
from warnings import warn
import pytest
@@ -71,7 +71,8 @@ def test_cleanup_log_directory(
for log_file in log_list:
if '.log' in log_file: #mac adds .DS_Store and gets cranky about deleting
log_abspath = path.join(LOG_PATH, log_file)
- remove(log_abspath)
+ if not platform.system() == 'Windows':
+ remove(log_abspath)
def test_rotating_file_handle(config=TEST_CONFIG):
"""Exercise TimedRotatingFileHandler to make sure logs are generating as expected
@@ -145,7 +146,7 @@ def test_discord_webhook(config_override=TEST_CONFIG):
if not DISCORD_WEBHOOK: #FIXME: commenting doesn't work in config file?
- pytest.skip('discord_webhook is blank')
+ pytest.xfail('discord_webhook is blank')
webhook_obj = prosper_logging.DiscordWebhook()
webhook_obj.webhook(DISCORD_WEBHOOK)
@@ -158,7 +159,7 @@ SLACK_WEBHOOK = TEST_CONFIG.get_option('LOGGING', 'slack_webhook', None)
def test_slack_webhook(config_override=TEST_CONFIG):
"""push 'hello world' message through Slack webhook"""
if not SLACK_WEBHOOK:
- pytest.skip('slack_webhook is blank')
+ pytest.xfail('slack_webhook is blank')
test_payload = {
'fallback': 'hello world',
@@ -174,7 +175,7 @@ HIPCHAT_WEBHOOK = TEST_CONFIG.get_option('LOGGING', 'hipchat_webhook', None)
def test_hipchat_webhook(config_override=TEST_CONFIG):
"""push 'hello world' message through Slack webhook"""
if not HIPCHAT_WEBHOOK:
- pytest.skip('hipchat_webhook is blank')
+ pytest.xfail('hipchat_webhook is blank')
test_payload = {
'color': 'green',
@@ -285,7 +286,7 @@ REQUEST_POST_ENDPOINT = TEST_CONFIG.get_option('TEST', 'request_POST_endpoint',
def test_discord_logger(config=TEST_CONFIG):
"""Execute LogCapture on Discord logger object"""
if not DISCORD_WEBHOOK: #FIXME: commenting doesn't work in config file?
- pytest.skip('discord_webhook is blank')
+ pytest.xfail('discord_webhook is blank')
test_logname = 'discord_logger'
log_builder = prosper_logging.ProsperLogger(
@@ -323,7 +324,7 @@ SLACK_POST_ENDPOINT = TEST_CONFIG.get_option('TEST', 'slack_POST_endpoint', None
def test_slack_logger(config=TEST_CONFIG):
"""Execute LogCapture on Slack logger object"""
if not SLACK_WEBHOOK:
- pytest.skip('slack_webhook is blank')
+ pytest.xfail('slack_webhook is blank')
test_logname = 'slack_logger'
log_builder = prosper_logging.ProsperLogger(
@@ -359,7 +360,7 @@ HIPCHAT_PORT = TEST_CONFIG.get_option('TEST', 'hipchat_port', None)
def test_hipchat_logger(config=TEST_CONFIG):
"""Execute LogCapture on Slack logger object"""
if not HIPCHAT_WEBHOOK:
- pytest.skip('hipchat_webhook is blank')
+ pytest.xfail('hipchat_webhook is blank')
test_logname = 'hipchat_logger'
log_builder = prosper_logging.ProsperLogger(
diff --git a/tests/test_prosper_version.py b/tests/test_prosper_version.py
index 49b4615..df323db 100644
--- a/tests/test_prosper_version.py
+++ b/tests/test_prosper_version.py
@@ -7,6 +7,7 @@ from codecs import decode
from os import path
import os
import shutil
+import sys
from subprocess import check_output
import pytest
@@ -75,6 +76,10 @@ def test_read_git_tags_default():
def test_read_git_tags_happypath():
"""validate version matches expectation"""
+ tag_version = semantic_version.Version(p_version._read_git_tags())
+ if tag_version.prerelease:
+ pytest.xfail('PyPI prerelease formatting not compatable with `semantic_version`')
+
released_versions_report = check_output(['yolk', '-V', 'prospercommon']).splitlines()
released_versions = []
@@ -83,7 +88,6 @@ def test_read_git_tags_happypath():
current_version = max([semantic_version.Version(line) for line in released_versions])
- tag_version = semantic_version.Version(p_version._read_git_tags())
tag_status = tag_version <= current_version #expect equal-to or less-than current release
@@ -95,6 +99,7 @@ def test_read_git_tags_happypath():
pytest.xfail(
'Expected release mismatch -- tag={} yolk={}'.format(tag_version, current_version))
#assert tag_version <= current_version #expect equal-to or less-than current release
+
def test_version_from_file_default():
"""validate default version returns from _version_from_file()"""
with pytest.warns(exceptions.ProsperDefaultVersionWarning):
@@ -131,8 +136,12 @@ def test_version_installed_as_dep():
# Prep a dummy version
virtualenv_name = 'DUMMY_VENV'
dummy_version = '9.9.9'
+ python_version = 'python{major}.{minor}'.format(
+ major=sys.version_info[0],
+ minor=sys.version_info[1]
+ )
virtualenv_path = path.join(
- HERE, virtualenv_name, 'lib/python3.6/site-packages/prosper/common')
+ HERE, virtualenv_name, 'lib', python_version, 'site-packages/prosper/common')
os.makedirs(virtualenv_path, exist_ok=True)
with open(path.join(virtualenv_path, 'version.txt'), 'w') as dummy_fh:
dummy_fh.write(dummy_version)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 8
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"testfixtures",
"mock",
"yolk3k"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.0.3
MarkupSafe==2.0.1
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
plumbum==1.8.3
pockets==0.9.1
-e git+https://github.com/EVEprosper/ProsperCommon.git@9cb24f7eefe6a43d043fc541ad89d6e434aef0a4#egg=ProsperCommon
py @ file:///opt/conda/conda-bld/py_1644396412707/work
Pygments==2.14.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
pytz==2025.2
requests==2.27.1
semantic-version==2.10.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-napoleon==0.7
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
testfixtures==7.2.2
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
yolk3k==0.9
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: ProsperCommon
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- jinja2==3.0.3
- markupsafe==2.0.1
- mock==5.2.0
- plumbum==1.8.3
- pockets==0.9.1
- pygments==2.14.0
- pytest-cov==4.0.0
- pytz==2025.2
- requests==2.27.1
- semantic-version==2.10.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-napoleon==0.7
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- testfixtures==7.2.2
- tomli==1.2.3
- urllib3==1.26.20
- yolk3k==0.9
prefix: /opt/conda/envs/ProsperCommon
| [
"tests/test_flask_utils.py::test_gunicorn_conf",
"tests/test_prosper_cli.py::TestMetaClasses::test_no_config_path",
"tests/test_prosper_cli.py::TestMetaClasses::test_config_path_happypath",
"tests/test_prosper_cli.py::TestMetaClasses::test_app_properties_logger_verbose",
"tests/test_prosper_cli.py::TestMetaClasses::test_app_properties_config",
"tests/test_prosper_cli.py::TestFlaskLauncher::test_get_host",
"tests/test_prosper_cli.py::TestCLI::test_version",
"tests/test_prosper_cli.py::TestCLI::test_help",
"tests/test_prosper_cli.py::TestCLI::test_dump_config",
"tests/test_prosper_config.py::test_setup_environment",
"tests/test_prosper_config.py::test_bad_config",
"tests/test_prosper_config.py::test_priority_order",
"tests/test_prosper_config.py::test_local_filepath_helper",
"tests/test_prosper_config.py::test_config_file",
"tests/test_prosper_config.py::test_local_get",
"tests/test_prosper_config.py::test_global_get",
"tests/test_prosper_config.py::test_fail_get",
"tests/test_prosper_config.py::test_cleanup_environment",
"tests/test_prosper_logging.py::test_cleanup_log_directory",
"tests/test_prosper_logging.py::test_rotating_file_handle",
"tests/test_prosper_logging.py::test_default_logger_options",
"tests/test_prosper_logging.py::test_default_logger",
"tests/test_prosper_logging.py::test_debug_logger",
"tests/test_prosper_logging.py::test_iter_util",
"tests/test_prosper_logging.py::test_configure_common",
"tests/test_prosper_logging.py::test_bad_init",
"tests/test_prosper_logging.py::test_handle_str",
"tests/test_prosper_logging.py::test_log_format_name",
"tests/test_prosper_logging.py::test_debugmode_pathing",
"tests/test_prosper_logging.py::test_pathmaking",
"tests/test_prosper_logging.py::test_discordwebhook_api_keys",
"tests/test_prosper_logging.py::test_discordwebhook_webhook_url",
"tests/test_prosper_logging.py::test_discordwebhook_str",
"tests/test_prosper_logging.py::test_discordwebhook_get_webhook_info",
"tests/test_prosper_logging.py::test_discord_logginghook",
"tests/test_prosper_logging.py::test_discord_logginghook_unconfigured",
"tests/test_prosper_logging.py::test_pathmaking_fail_makedirs",
"tests/test_prosper_logging.py::test_pathmaking_fail_writeaccess",
"tests/test_prosper_logging.py::test_send_msg_to_webhook_success",
"tests/test_prosper_logging.py::test_send_msg_to_webhook_faulty",
"tests/test_prosper_version.py::test_version_virgin",
"tests/test_prosper_version.py::test_version_expected",
"tests/test_prosper_version.py::test_read_git_tags_default",
"tests/test_prosper_version.py::test_version_from_file_default",
"tests/test_prosper_version.py::test_version_from_file_happypath",
"tests/test_prosper_version.py::test_travis_tag_testmode",
"tests/test_prosper_version.py::test_version_installed_as_dep"
]
| [
"tests/test_flask_utils.py::test_cli",
"tests/test_prosper_cli.py::TestMetaClasses::test_app_properties_logger_normal",
"tests/test_prosper_version.py::test_read_git_tags_happypath"
]
| []
| []
| MIT License | 2,350 | [
"docs/prosper_cli.rst",
"prosper/common/prosper_version.py",
"prosper/common/flask_utils.py",
"setup.py",
"tests/conftest.py",
"prosper/common/prosper_utilities.py",
"prosper/common/prosper_logging.py",
"prosper/common/prosper_cli.py",
"prosper/common/prosper_config.py",
".coveragerc",
"docs/flask_utils.rst"
]
| [
"tests/helpers.py",
"docs/prosper_cli.rst",
"prosper/common/prosper_version.py",
"prosper/common/flask_utils.py",
"setup.py",
"tests/conftest.py",
"prosper/common/prosper_logging.py",
"prosper/common/prosper_cli.py",
"prosper/common/prosper_config.py",
".coveragerc",
"docs/flask_utils.rst"
]
|
|
andreroggeri__pynubank-12 | 9e1660516600a94f949259465c371acf7256f5ae | 2018-04-01 22:14:51 | 9e1660516600a94f949259465c371acf7256f5ae | coveralls:
[](https://coveralls.io/builds/16288275)
Coverage remained the same at 100.0% when pulling **19757302b91677ce5e659bbb28fc124dd5ef4ab7 on janjitsu:master** into **9e1660516600a94f949259465c371acf7256f5ae on andreroggeri:master**.
andreroggeri: Muito obrigado @janjitsu 🤑 | diff --git a/pynubank/nubank.py b/pynubank/nubank.py
index 3ba66af..ae31690 100644
--- a/pynubank/nubank.py
+++ b/pynubank/nubank.py
@@ -54,6 +54,7 @@ class Nubank:
self.headers['Authorization'] = 'Bearer {}'.format(data['access_token'])
self.feed_url = data['_links']['events']['href']
self.query_url = data['_links']['ghostflame']['href']
+ self.bills_url = data['_links']['bills_summary']['href']
def get_card_feed(self):
request = requests.get(self.feed_url, headers=self.headers)
@@ -63,6 +64,10 @@ class Nubank:
feed = self.get_card_feed()
return list(filter(lambda x: x['category'] == 'transaction', feed['events']))
+ def get_card_bills(self):
+ request = requests.get(self.bills_url, headers=self.headers)
+ return json.loads(request.content.decode('utf-8'))
+
def get_account_feed(self):
data = self._make_graphql_request('account_feed')
return data['data']['viewer']['savingsAccount']['feed']
| Acessar faturas do cartão
Olá, gostaria de um método para acessar as faturas do cartão! | andreroggeri/pynubank | diff --git a/tests/test_nubank_client.py b/tests/test_nubank_client.py
index 6626e1f..787a858 100644
--- a/tests/test_nubank_client.py
+++ b/tests/test_nubank_client.py
@@ -104,6 +104,132 @@ def events_return():
}
}
[email protected]
+def bills_return():
+ return {
+ "_links": {
+ "future": {
+ "href": "https://prod-s0-billing.nubank.com.br/api/accounts/abcde-fghi-jklmn-opqrst-uvxz/bills/future"
+ },
+ "open": {
+ "href": "https://prod-s0-billing.nubank.com.br/api/accounts/abcde-fghi-jklmn-opqrst-uvxz/bills/open"
+ }
+ },
+ "bills": [
+ {
+ "state": "future",
+ "summary": {
+ "adjustments": "0",
+ "close_date": "2018-05-03",
+ "due_date": "2018-05-10",
+ "effective_due_date": "2018-05-10",
+ "expenses": "126.94",
+ "fees": "0",
+ "interest": 0,
+ "interest_charge": "0",
+ "interest_rate": "0.1375",
+ "interest_reversal": "0",
+ "international_tax": "0",
+ "minimum_payment": 0,
+ "open_date": "2018-04-03",
+ "paid": 0,
+ "past_balance": 0,
+ "payments": "0",
+ "precise_minimum_payment": "0",
+ "precise_total_balance": "126.94",
+ "previous_bill_balance": "0",
+ "tax": "0",
+ "total_accrued": "0",
+ "total_balance": 12694,
+ "total_credits": "0",
+ "total_cumulative": 12694,
+ "total_financed": "0",
+ "total_international": "0",
+ "total_national": "126.94",
+ "total_payments": "0"
+ }
+ },
+ {
+ "_links": {
+ "self": {
+ "href": "https://prod-s0-billing.nubank.com.br/api/accounts/abcde-fghi-jklmn-opqrst-uvxz/bills/open"
+ }
+ },
+ "state": "open",
+ "summary": {
+ "adjustments": "0",
+ "close_date": "2018-04-03",
+ "due_date": "2018-04-10",
+ "effective_due_date": "2018-04-10",
+ "expenses": "303.36",
+ "fees": "0",
+ "interest": 0,
+ "interest_charge": "0",
+ "interest_rate": "0.1375",
+ "interest_reversal": "0",
+ "international_tax": "0",
+ "minimum_payment": 0,
+ "open_date": "2018-03-03",
+ "paid": 0,
+ "past_balance": 0,
+ "payments": "-285.15",
+ "precise_minimum_payment": "0",
+ "precise_total_balance": "303.362041645013",
+ "previous_bill_balance": "285.152041645013",
+ "tax": "0",
+ "total_accrued": "0",
+ "total_balance": 30336,
+ "total_credits": "0",
+ "total_cumulative": 30336,
+ "total_financed": "0",
+ "total_international": "0",
+ "total_national": "303.36",
+ "total_payments": "-285.15"
+ }
+ },
+ {
+ "_links": {
+ "self": {
+ "href": "https://prod-s0-billing.nubank.com.br/api/bills/abcde-fghi-jklmn-opqrst-uvxz"
+ }
+ },
+ "href": "nuapp://bill/abcde-fghi-jklmn-opqrst-uvxz",
+ "id": "abcde-fghi-jklmn-opqrst-uvxz",
+ "state": "overdue",
+ "summary": {
+ "adjustments": "-63.99106066",
+ "close_date": "2018-03-03",
+ "due_date": "2018-03-10",
+ "effective_due_date": "2018-03-12",
+ "expenses": "364.14",
+ "fees": "0",
+ "interest": 0,
+ "interest_charge": "0",
+ "interest_rate": "0.1375",
+ "interest_reversal": "0",
+ "international_tax": "0",
+ "minimum_payment": 8003,
+ "open_date": "2018-02-03",
+ "paid": 28515,
+ "past_balance": -1500,
+ "payments": "-960.47",
+ "precise_minimum_payment": "480.02544320601300",
+ "precise_total_balance": "285.152041645013",
+ "previous_bill_balance": "945.473102305013",
+ "remaining_minimum_payment": 0,
+ "tax": "0",
+ "total_accrued": "0",
+ "total_balance": 28515,
+ "total_credits": "-64.18",
+ "total_cumulative": 30015,
+ "total_financed": "0",
+ "total_international": "0",
+ "total_national": "364.32893934",
+ "total_payments": "-960.47"
+ }
+ },
+ ]
+ }
@pytest.fixture
def account_balance_return():
@@ -192,6 +318,55 @@ def test_get_card_feed(monkeypatch, authentication_return, events_return):
assert events[0]['href'] == 'nuapp://transaction/abcde-fghi-jklmn-opqrst-uvxz'
assert events[0]['_links']['self']['href'] == 'https://prod-s0-webapp-proxy.nubank.com.br/api/proxy/_links_123'
+def test_get_card_bills(monkeypatch, authentication_return, bills_return):
+ response = create_fake_response(authentication_return)
+ monkeypatch.setattr('requests.post', MagicMock(return_value=response))
+ nubank_client = Nubank('12345678909', '12345678')
+
+ response = create_fake_response(bills_return)
+ monkeypatch.setattr('requests.get', MagicMock(return_value=response))
+
+ bills_response = nubank_client.get_card_bills()
+ assert bills_response['_links']['future']['href'] == 'https://prod-s0-billing.nubank.com.br/api/accounts/abcde-fghi-jklmn-opqrst-uvxz/bills/future'
+ assert bills_response['_links']['open']['href'] == 'https://prod-s0-billing.nubank.com.br/api/accounts/abcde-fghi-jklmn-opqrst-uvxz/bills/open'
+
+ bills = bills_response['bills']
+ assert len(bills) == 3
+ assert bills[2]['_links']['self']['href'] == "https://prod-s0-billing.nubank.com.br/api/bills/abcde-fghi-jklmn-opqrst-uvxz"
+ assert bills[2]['href'] == 'nuapp://bill/abcde-fghi-jklmn-opqrst-uvxz'
+ assert bills[2]['id'] == 'abcde-fghi-jklmn-opqrst-uvxz'
+ assert bills[2]['state'] == 'overdue'
+
+ summary = bills[2]['summary']
+ assert summary["adjustments"] == "-63.99106066"
+ assert summary["close_date"] == "2018-03-03"
+ assert summary["due_date"] == "2018-03-10"
+ assert summary["effective_due_date"] == "2018-03-12"
+ assert summary["expenses"] == "364.14"
+ assert summary["fees"] == "0"
+ assert summary["interest"] == 0
+ assert summary["interest_charge"] == "0"
+ assert summary["interest_rate"] == "0.1375"
+ assert summary["interest_reversal"] == "0"
+ assert summary["international_tax"] == "0"
+ assert summary["minimum_payment"] == 8003
+ assert summary["open_date"] == "2018-02-03"
+ assert summary["paid"] == 28515
+ assert summary["past_balance"] == -1500
+ assert summary["payments"] == "-960.47"
+ assert summary["precise_minimum_payment"] == "480.02544320601300"
+ assert summary["precise_total_balance"] == "285.152041645013"
+ assert summary["previous_bill_balance"] == "945.473102305013"
+ assert summary["remaining_minimum_payment"] == 0
+ assert summary["tax"] == "0"
+ assert summary["total_accrued"] == "0"
+ assert summary["total_balance"] == 28515
+ assert summary["total_credits"] == "-64.18"
+ assert summary["total_cumulative"] == 30015
+ assert summary["total_financed"] == "0"
+ assert summary["total_international"] == "0"
+ assert summary["total_national"] == "364.32893934"
+ assert summary["total_payments"] == "-960.47"
def test_get_card_statements(monkeypatch, authentication_return, events_return):
response = create_fake_response(authentication_return)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
chardet==3.0.4
exceptiongroup==1.2.2
idna==2.5
iniconfig==2.1.0
nose==1.3.7
packaging==24.2
pluggy==1.5.0
-e git+https://github.com/andreroggeri/pynubank.git@9e1660516600a94f949259465c371acf7256f5ae#egg=pynubank
pytest==8.3.5
requests==2.18.1
tomli==2.2.1
urllib3==1.21.1
| name: pynubank
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- chardet==3.0.4
- exceptiongroup==1.2.2
- idna==2.5
- iniconfig==2.1.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- requests==2.18.1
- tomli==2.2.1
- urllib3==1.21.1
prefix: /opt/conda/envs/pynubank
| [
"tests/test_nubank_client.py::test_get_card_bills"
]
| []
| [
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[100]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[101]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[102]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[103]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[201]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[202]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[203]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[204]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[205]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[206]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[207]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[208]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[226]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[300]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[301]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[302]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[303]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[304]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[305]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[306]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[307]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[308]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[400]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[401]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[402]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[403]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[404]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[405]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[406]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[407]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[408]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[409]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[410]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[411]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[412]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[413]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[414]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[415]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[416]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[417]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[418]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[420]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[421]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[422]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[423]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[424]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[426]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[428]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[429]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[431]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[440]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[444]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[449]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[450]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[451]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[495]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[496]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[497]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[498]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[499]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[500]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[501]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[502]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[503]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[504]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[505]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[506]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[507]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[508]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[509]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[510]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[511]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[520]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[521]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[522]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[523]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[524]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[525]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[526]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[527]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[530]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[598]",
"tests/test_nubank_client.py::test_authentication_succeeds",
"tests/test_nubank_client.py::test_get_card_feed",
"tests/test_nubank_client.py::test_get_card_statements",
"tests/test_nubank_client.py::test_get_account_balance",
"tests/test_nubank_client.py::test_get_account_feed",
"tests/test_nubank_client.py::test_get_account_statements",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[100]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[101]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[102]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[103]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[201]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[202]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[203]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[204]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[205]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[206]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[207]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[208]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[226]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[300]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[301]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[302]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[303]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[304]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[305]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[306]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[307]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[308]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[400]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[401]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[402]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[403]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[404]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[405]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[406]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[407]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[408]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[409]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[410]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[411]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[412]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[413]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[414]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[415]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[416]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[417]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[418]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[420]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[421]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[422]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[423]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[424]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[426]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[428]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[429]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[431]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[440]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[444]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[449]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[450]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[451]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[495]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[496]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[497]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[498]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[499]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[500]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[501]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[502]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[503]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[504]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[505]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[506]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[507]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[508]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[509]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[510]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[511]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[520]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[521]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[522]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[523]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[524]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[525]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[526]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[527]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[530]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[598]"
]
| []
| MIT License | 2,351 | [
"pynubank/nubank.py"
]
| [
"pynubank/nubank.py"
]
|
conan-io__conan-2705 | e1e3cce13e69576f562296a1e6d64352e8e84ccd | 2018-04-02 15:07:40 | 419beea8c76ebf9271c8612339bdb0e5aa376306 | diff --git a/conans/__init__.py b/conans/__init__.py
index 97c78e433..2333a851a 100644
--- a/conans/__init__.py
+++ b/conans/__init__.py
@@ -16,5 +16,5 @@ COMPLEX_SEARCH_CAPABILITY = "complex_search"
SERVER_CAPABILITIES = [COMPLEX_SEARCH_CAPABILITY, ]
-__version__ = '1.2.0'
+__version__ = '1.2.1'
diff --git a/conans/client/build/cppstd_flags.py b/conans/client/build/cppstd_flags.py
index e5ca936c3..5435d63e6 100644
--- a/conans/client/build/cppstd_flags.py
+++ b/conans/client/build/cppstd_flags.py
@@ -26,7 +26,7 @@ def cppstd_flag(compiler, compiler_version, cppstd):
def cppstd_default(compiler, compiler_version):
default = {"gcc": _gcc_cppstd_default(compiler_version),
"clang": _clang_cppstd_default(compiler_version),
- "apple-clang": "gnu98",
+ "apple-clang": "gnu98", # Confirmed in apple-clang 9.1 with a simple "auto i=1;"
"Visual Studio": _visual_cppstd_default(compiler_version)}.get(str(compiler), None)
return default
@@ -85,6 +85,11 @@ def _cppstd_apple_clang(clang_version, cppstd):
v17 = "c++1z"
vgnu17 = "gnu++1z"
+ if Version(clang_version) >= "9.1":
+ # Not confirmed that it didn't work before 9.1 but 1z is still valid, so we are ok
+ v17 = "c++17"
+ vgnu17 = "gnu++17"
+
flag = {"98": v98, "gnu98": vgnu98,
"11": v11, "gnu11": vgnu11,
"14": v14, "gnu14": vgnu14,
diff --git a/conans/client/conf/__init__.py b/conans/client/conf/__init__.py
index 267416ced..96dd58553 100644
--- a/conans/client/conf/__init__.py
+++ b/conans/client/conf/__init__.py
@@ -65,7 +65,7 @@ compiler:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0", "5.0", "6.0"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
- version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0"]
+ version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release]
diff --git a/conans/client/migrations.py b/conans/client/migrations.py
index ef0ba6b31..27a86af1f 100644
--- a/conans/client/migrations.py
+++ b/conans/client/migrations.py
@@ -41,7 +41,7 @@ class ClientMigrator(Migrator):
# VERSION 0.1
if old_version is None:
return
- if old_version < Version("1.1.0-dev"):
+ if old_version < Version("1.2.1"):
old_settings = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
@@ -82,9 +82,9 @@ compiler:
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
- "5", "5.1", "5.2", "5.3", "5.4",
+ "5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
- "7", "7.1", "7.2"]
+ "7", "7.1", "7.2", "7.3"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
@@ -93,13 +93,14 @@ compiler:
version: ["8", "9", "10", "11", "12", "14", "15"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp, v140, v140_xp, v140_clang_c2, LLVM-vs2014, LLVM-vs2014_xp, v141, v141_xp, v141_clang_c2]
clang:
- version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0", "5.0"]
+ version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0", "5.0", "6.0"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release]
+cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
"""
self._update_settings_yml(old_settings)
| Apple-clang 9.1 not supported
Please, add apple-clang 9.1 to supported platform. I had problem with compile missing libraries. I fixed it with manually edited ```.conan/settings.yml```
My CMake output:
```
-- The CXX compiler identification is AppleClang 9.1.0.9020039
```
Clang version:
```sh
$ clang++ -v
Apple LLVM version 9.1.0 (clang-902.0.39.1)
Target: x86_64-apple-darwin17.5.0
Thread model: posix
InstalledDir: /Library/Developer/CommandLineTools/usr/bin
```
Conan version:
```
$ conan -v
Conan version 1.2.0
``` | conan-io/conan | diff --git a/conans/test/build_helpers/cpp_std_flags_test.py b/conans/test/build_helpers/cpp_std_flags_test.py
index b110bb3f4..7e7c1c5e4 100644
--- a/conans/test/build_helpers/cpp_std_flags_test.py
+++ b/conans/test/build_helpers/cpp_std_flags_test.py
@@ -136,6 +136,10 @@ class CompilerFlagsTest(unittest.TestCase):
self.assertEquals(cppstd_flag("apple-clang", "9", "14"), '-std=c++14')
self.assertEquals(cppstd_flag("apple-clang", "9", "17"), "-std=c++1z")
+ self.assertEquals(cppstd_flag("apple-clang", "9.1", "11"), '-std=c++11')
+ self.assertEquals(cppstd_flag("apple-clang", "9.1", "14"), '-std=c++14')
+ self.assertEquals(cppstd_flag("apple-clang", "9.1", "17"), "-std=c++17")
+
def test_apple_clang_cppstd_defaults(self):
self.assertEquals(cppstd_default("apple-clang", "2"), "gnu98")
self.assertEquals(cppstd_default("apple-clang", "3"), "gnu98")
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 4
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"conans/requirements.txt",
"conans/requirements_dev.txt",
"conans/requirements_server.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astroid==1.6.6
attrs==22.2.0
beautifulsoup4==4.12.3
bottle==0.12.25
certifi==2021.5.30
charset-normalizer==2.0.12
codecov==2.1.13
colorama==0.3.9
-e git+https://github.com/conan-io/conan.git@e1e3cce13e69576f562296a1e6d64352e8e84ccd#egg=conan
coverage==4.2
deprecation==2.0.7
distro==1.1.0
fasteners==0.19
future==0.16.0
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
mock==1.3.0
node-semver==0.2.0
nose==1.3.7
packaging==21.3
parameterized==0.8.1
patch==1.16
pbr==6.1.1
pluggy==1.0.0
pluginbase==0.7
py==1.11.0
Pygments==2.14.0
PyJWT==1.7.1
pylint==1.8.4
pyparsing==3.1.4
pytest==7.0.1
PyYAML==3.12
requests==2.27.1
six==1.17.0
soupsieve==2.3.2.post1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
waitress==2.0.0
WebOb==1.8.9
WebTest==2.0.35
wrapt==1.16.0
zipp==3.6.0
| name: conan
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astroid==1.6.6
- attrs==22.2.0
- beautifulsoup4==4.12.3
- bottle==0.12.25
- charset-normalizer==2.0.12
- codecov==2.1.13
- colorama==0.3.9
- coverage==4.2
- deprecation==2.0.7
- distro==1.1.0
- fasteners==0.19
- future==0.16.0
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- mock==1.3.0
- node-semver==0.2.0
- nose==1.3.7
- packaging==21.3
- parameterized==0.8.1
- patch==1.16
- pbr==6.1.1
- pluggy==1.0.0
- pluginbase==0.7
- py==1.11.0
- pygments==2.14.0
- pyjwt==1.7.1
- pylint==1.8.4
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==3.12
- requests==2.27.1
- six==1.17.0
- soupsieve==2.3.2.post1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- waitress==2.0.0
- webob==1.8.9
- webtest==2.0.35
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/conan
| [
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_apple_clang_cppstd_flags"
]
| []
| [
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_apple_clang_cppstd_defaults",
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_clang_cppstd_defaults",
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_clang_cppstd_flags",
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_gcc_cppstd_defaults",
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_gcc_cppstd_flags",
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_visual_cppstd_defaults",
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_visual_cppstd_flags"
]
| []
| MIT License | 2,352 | [
"conans/client/conf/__init__.py",
"conans/client/build/cppstd_flags.py",
"conans/__init__.py",
"conans/client/migrations.py"
]
| [
"conans/client/conf/__init__.py",
"conans/client/build/cppstd_flags.py",
"conans/__init__.py",
"conans/client/migrations.py"
]
|
|
cevoaustralia__aws-google-auth-67 | 7840706fb3b10dc45bcf829ff31c45d7e324e551 | 2018-04-03 06:38:18 | d473d67b0772700942f5bb0db3522af0a1005453 | diff --git a/aws_google_auth/configuration.py b/aws_google_auth/configuration.py
index ce02aa3..270cb5e 100644
--- a/aws_google_auth/configuration.py
+++ b/aws_google_auth/configuration.py
@@ -44,7 +44,7 @@ class Configuration(object):
@property
def max_duration(self):
- return 3600
+ return 43200
@property
def credentials_file(self):
diff --git a/aws_google_auth/google.py b/aws_google_auth/google.py
index cd5b636..70beb14 100644
--- a/aws_google_auth/google.py
+++ b/aws_google_auth/google.py
@@ -42,7 +42,7 @@ class Google:
sp_id: Google's assigned SP identifier for your AWS SAML app
Optionally, you can supply:
- duration_seconds: number of seconds for the session to be active (max 3600)
+ duration_seconds: number of seconds for the session to be active (max 43200)
"""
self.version = _version.__version__
| Max duration for a session can be increased to 12h
The limit was recently increased on the AWS side: https://aws.amazon.com/about-aws/whats-new/2018/03/longer-role-sessions/ | cevoaustralia/aws-google-auth | diff --git a/aws_google_auth/tests/test_config_parser.py b/aws_google_auth/tests/test_config_parser.py
index 223d51b..aa3e798 100644
--- a/aws_google_auth/tests/test_config_parser.py
+++ b/aws_google_auth/tests/test_config_parser.py
@@ -57,7 +57,7 @@ class TestDurationProcessing(unittest.TestCase):
def test_default(self):
args = parse_args([])
config = resolve_config(args)
- self.assertEqual(3600, config.duration)
+ self.assertEqual(43200, config.duration)
def test_cli_param_supplied(self):
args = parse_args(['-d', "500"])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[u2f]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"mock",
"pytest"
],
"pre_install": [],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
-e git+https://github.com/cevoaustralia/aws-google-auth.git@7840706fb3b10dc45bcf829ff31c45d7e324e551#egg=aws_google_auth
backports.zoneinfo==0.2.1
beautifulsoup4==4.12.3
boto3==1.23.10
botocore==1.26.10
certifi==2021.5.30
charset-normalizer==2.0.12
configparser==5.2.0
hidapi==0.14.0.post4
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
jmespath==0.10.0
lxml==5.3.1
mock==5.2.0
nose==1.3.7
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
python-u2flib-host==3.0.3
pytz-deprecation-shim==0.1.0.post0
requests==2.27.1
s3transfer==0.5.2
six==1.17.0
soupsieve==2.3.2.post1
tabulate==0.8.10
tomli==1.2.3
typing_extensions==4.1.1
tzdata==2025.2
tzlocal==4.2
urllib3==1.26.20
zipp==3.6.0
| name: aws-google-auth
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- backports-zoneinfo==0.2.1
- beautifulsoup4==4.12.3
- boto3==1.23.10
- botocore==1.26.10
- charset-normalizer==2.0.12
- configparser==5.2.0
- hidapi==0.14.0.post4
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jmespath==0.10.0
- lxml==5.3.1
- mock==5.2.0
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- python-u2flib-host==3.0.3
- pytz-deprecation-shim==0.1.0.post0
- requests==2.27.1
- s3transfer==0.5.2
- six==1.17.0
- soupsieve==2.3.2.post1
- tabulate==0.8.10
- tomli==1.2.3
- typing-extensions==4.1.1
- tzdata==2025.2
- tzlocal==4.2
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/aws-google-auth
| [
"aws_google_auth/tests/test_config_parser.py::TestDurationProcessing::test_default"
]
| []
| [
"aws_google_auth/tests/test_config_parser.py::TestProfileProcessing::test_cli_param_supplied",
"aws_google_auth/tests/test_config_parser.py::TestProfileProcessing::test_default",
"aws_google_auth/tests/test_config_parser.py::TestProfileProcessing::test_with_environment",
"aws_google_auth/tests/test_config_parser.py::TestUsernameProcessing::test_cli_param_supplied",
"aws_google_auth/tests/test_config_parser.py::TestUsernameProcessing::test_default",
"aws_google_auth/tests/test_config_parser.py::TestUsernameProcessing::test_with_environment",
"aws_google_auth/tests/test_config_parser.py::TestDurationProcessing::test_cli_param_supplied",
"aws_google_auth/tests/test_config_parser.py::TestDurationProcessing::test_invalid_cli_param_supplied",
"aws_google_auth/tests/test_config_parser.py::TestDurationProcessing::test_with_environment",
"aws_google_auth/tests/test_config_parser.py::TestIDPProcessing::test_cli_param_supplied",
"aws_google_auth/tests/test_config_parser.py::TestIDPProcessing::test_default",
"aws_google_auth/tests/test_config_parser.py::TestIDPProcessing::test_with_environment",
"aws_google_auth/tests/test_config_parser.py::TestSPProcessing::test_cli_param_supplied",
"aws_google_auth/tests/test_config_parser.py::TestSPProcessing::test_default",
"aws_google_auth/tests/test_config_parser.py::TestSPProcessing::test_with_environment",
"aws_google_auth/tests/test_config_parser.py::TestRegionProcessing::test_cli_param_supplied",
"aws_google_auth/tests/test_config_parser.py::TestRegionProcessing::test_with_environment",
"aws_google_auth/tests/test_config_parser.py::TestRoleProcessing::test_cli_param_supplied",
"aws_google_auth/tests/test_config_parser.py::TestRoleProcessing::test_default",
"aws_google_auth/tests/test_config_parser.py::TestRoleProcessing::test_with_environment",
"aws_google_auth/tests/test_config_parser.py::TestAskRoleProcessing::test_cli_param_supplied",
"aws_google_auth/tests/test_config_parser.py::TestAskRoleProcessing::test_default",
"aws_google_auth/tests/test_config_parser.py::TestU2FDisabledProcessing::test_cli_param_supplied",
"aws_google_auth/tests/test_config_parser.py::TestU2FDisabledProcessing::test_default",
"aws_google_auth/tests/test_config_parser.py::TestResolveAliasesProcessing::test_cli_param_supplied",
"aws_google_auth/tests/test_config_parser.py::TestResolveAliasesProcessing::test_default"
]
| []
| MIT License | 2,353 | [
"aws_google_auth/google.py",
"aws_google_auth/configuration.py"
]
| [
"aws_google_auth/google.py",
"aws_google_auth/configuration.py"
]
|
|
conan-io__conan-2708 | 777f846df4cabe366ddcb88e39f6c7cd8970d7e1 | 2018-04-03 11:42:07 | 419beea8c76ebf9271c8612339bdb0e5aa376306 | diff --git a/conans/client/userio.py b/conans/client/userio.py
index b42d8e39d..c64d67ff9 100644
--- a/conans/client/userio.py
+++ b/conans/client/userio.py
@@ -40,6 +40,7 @@ class UserIO(object):
def request_login(self, remote_name, username=None):
"""Request user to input their name and password
:param username If username is specified it only request password"""
+ self._raise_if_non_interactive()
user_input = ''
while not username:
try:
| Non interactive mode also prevents prompt from `conan user`
The new non interactive mode also prevents the prompt from `conan user <name> -p`. This is not what is specified in the documentation. (And also not what was originally implemented.)
I hope the prompt can be re-enabled for this particular case. If not, the documentation should be changed.
Oh, and a purely cosmetic remark: the non interactive error only appears after the prompt has been displayed.
Using Conan 1.2 on Windows 7.
- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [x] I've specified the Conan version, operating system version and any tool that can be relevant.
- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
| conan-io/conan | diff --git a/conans/test/command/user_test.py b/conans/test/command/user_test.py
index 86d7d15c4..c059b50e2 100644
--- a/conans/test/command/user_test.py
+++ b/conans/test/command/user_test.py
@@ -158,5 +158,10 @@ class ConanLib(ConanFile):
error = conan.run('user -p -r default lasote', ignore_error=True)
self.assertTrue(error)
self.assertIn('ERROR: Conan interactive mode disabled', conan.user_io.out)
+ self.assertNotIn("Please enter a password for \"lasote\" account:", conan.out)
conan.run("user")
self.assertIn("Current 'default' user: None", conan.user_io.out)
+ error = conan.run("user -p", ignore_error=True)
+ self.assertTrue(error)
+ self.assertIn('ERROR: Conan interactive mode disabled', conan.out)
+ self.assertNotIn("Remote 'default' username:", conan.out)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"conans/requirements.txt",
"conans/requirements_osx.txt",
"conans/requirements_server.txt",
"conans/requirements_dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asn1crypto==1.5.1
astroid==1.6.6
attrs==22.2.0
beautifulsoup4==4.12.3
bottle==0.12.25
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
codecov==2.1.13
colorama==0.3.9
-e git+https://github.com/conan-io/conan.git@777f846df4cabe366ddcb88e39f6c7cd8970d7e1#egg=conan
coverage==4.2
cryptography==2.1.4
deprecation==2.0.7
distro==1.1.0
fasteners==0.19
future==0.16.0
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
mock==1.3.0
ndg-httpsclient==0.4.4
node-semver==0.2.0
nose==1.3.7
packaging==21.3
parameterized==0.8.1
patch==1.16
pbr==6.1.1
pluggy==1.0.0
pluginbase==0.7
py==1.11.0
pyasn==1.5.0b7
pyasn1==0.5.1
pycparser==2.21
Pygments==2.14.0
PyJWT==1.7.1
pylint==1.8.4
pyOpenSSL==17.5.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==3.12
requests==2.27.1
six==1.17.0
soupsieve==2.3.2.post1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
waitress==2.0.0
WebOb==1.8.9
WebTest==2.0.35
wrapt==1.16.0
zipp==3.6.0
| name: conan
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asn1crypto==1.5.1
- astroid==1.6.6
- attrs==22.2.0
- beautifulsoup4==4.12.3
- bottle==0.12.25
- cffi==1.15.1
- charset-normalizer==2.0.12
- codecov==2.1.13
- colorama==0.3.9
- coverage==4.2
- cryptography==2.1.4
- deprecation==2.0.7
- distro==1.1.0
- fasteners==0.19
- future==0.16.0
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- mock==1.3.0
- ndg-httpsclient==0.4.4
- node-semver==0.2.0
- nose==1.3.7
- packaging==21.3
- parameterized==0.8.1
- patch==1.16
- pbr==6.1.1
- pluggy==1.0.0
- pluginbase==0.7
- py==1.11.0
- pyasn==1.5.0b7
- pyasn1==0.5.1
- pycparser==2.21
- pygments==2.14.0
- pyjwt==1.7.1
- pylint==1.8.4
- pyopenssl==17.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==3.12
- requests==2.27.1
- six==1.17.0
- soupsieve==2.3.2.post1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- waitress==2.0.0
- webob==1.8.9
- webtest==2.0.35
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/conan
| [
"conans/test/command/user_test.py::UserTest::test_command_user_with_interactive_password_login_prompt_disabled"
]
| []
| [
"conans/test/command/user_test.py::UserTest::test_clean",
"conans/test/command/user_test.py::UserTest::test_command_interactive_only",
"conans/test/command/user_test.py::UserTest::test_command_user_list",
"conans/test/command/user_test.py::UserTest::test_command_user_no_remotes",
"conans/test/command/user_test.py::UserTest::test_command_user_with_interactive_password",
"conans/test/command/user_test.py::UserTest::test_command_user_with_password",
"conans/test/command/user_test.py::UserTest::test_command_user_with_password_spaces",
"conans/test/command/user_test.py::UserTest::test_with_no_user",
"conans/test/command/user_test.py::UserTest::test_with_remote_no_connect"
]
| []
| MIT License | 2,354 | [
"conans/client/userio.py"
]
| [
"conans/client/userio.py"
]
|
|
nipy__nipype-2527 | e446466290b9ccba5d5aa589971c97e744d9267b | 2018-04-03 13:30:50 | 704b97dee7848283692bac38f04541c5af2a87b5 | diff --git a/nipype/interfaces/afni/utils.py b/nipype/interfaces/afni/utils.py
index 9f306c7b8..cb01ae4a2 100644
--- a/nipype/interfaces/afni/utils.py
+++ b/nipype/interfaces/afni/utils.py
@@ -564,8 +564,11 @@ class CatMatvecInputSpec(AFNICommandInputSpec):
argstr="%s",
position=-2)
out_file = File(
- desc="File to write concattenated matvecs to",
argstr=" > %s",
+ name_template='%s_cat.aff12.1D',
+ name_source='in_file',
+ keep_extension=False,
+ desc="File to write concattenated matvecs to",
position=-1,
mandatory=True)
matrix = traits.Bool(
| AFNI CatMatvec: Caching fails and output file undefined
### Summary
AFNI CatMatvec interface caching fails and output file is undefined
### How to replicate the behavior
```Python
from nipype.interfaces import afni
from nipype.caching import Memory
memory = Memory('/tmp/test_catmatvec/')
catmatvec = memory.cache(afni.CatMatvec)
out_catmatvec = catmatvec(in_file=[('/tmp/test_caching/allineate_affine.aff12.1D', 'ONELINE')], out_file='/tmp/test_catmatvec2.aff12.1D')
print(out_catmatvec.outputs)
```
gives `out_file = <undefined>`. Moreover, when launching the same code twice the computations are restarted
### Platform details:
{'nibabel_version': '2.2.1', 'sys_executable': '/home/salma/anaconda2/bin/python', 'networkx_version': '2.0', 'numpy_version': '1.13.3', 'sys_platform': 'linux2', 'sys_version': '2.7.14 |Anaconda custom (64-bit)| (default, Oct 16 2017, 17:29:19) \n[GCC 7.2.0]', 'commit_source': 'repository', 'commit_hash': 'e446466', 'pkg_path': '/home/salma/CODE/nipype/nipype', 'nipype_version': '1.0.3-dev+ge446466', 'traits_version': '4.6.0', 'scipy_version': '1.0.0'}
1.0.3-dev+ge446466
| nipy/nipype | diff --git a/nipype/interfaces/afni/tests/test_auto_CatMatvec.py b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py
index dc1c981bb..df0ac34e0 100644
--- a/nipype/interfaces/afni/tests/test_auto_CatMatvec.py
+++ b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py
@@ -38,7 +38,10 @@ def test_CatMatvec_inputs():
),
out_file=dict(
argstr=' > %s',
+ keep_extension=False,
mandatory=True,
+ name_source='in_file',
+ name_template='%s_cat.aff12.1D',
position=-1,
),
outputtype=dict(),
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
codecov==2.1.13
configparser==5.2.0
coverage==6.2
cycler==0.11.0
decorator==4.4.2
docutils==0.18.1
execnet==1.9.0
funcsigs==1.0.2
future==1.0.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
isodate==0.6.1
Jinja2==3.0.3
kiwisolver==1.3.1
lxml==5.3.1
MarkupSafe==2.0.1
matplotlib==3.3.4
mock==5.2.0
networkx==2.5.1
nibabel==3.2.2
-e git+https://github.com/nipy/nipype.git@e446466290b9ccba5d5aa589971c97e744d9267b#egg=nipype
numpy==1.19.5
numpydoc==1.1.0
packaging==21.3
Pillow==8.4.0
pluggy==1.0.0
prov==1.5.0
py==1.11.0
pydot==1.4.2
pydotplus==2.0.2
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-env==0.6.2
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
rdflib==5.0.0
requests==2.27.1
scipy==1.5.4
simplejson==3.20.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
traits==6.4.1
typing_extensions==4.1.1
urllib3==1.26.20
yapf==0.32.0
zipp==3.6.0
| name: nipype
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- click==8.0.4
- codecov==2.1.13
- configparser==5.2.0
- coverage==6.2
- cycler==0.11.0
- decorator==4.4.2
- docutils==0.18.1
- execnet==1.9.0
- funcsigs==1.0.2
- future==1.0.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isodate==0.6.1
- jinja2==3.0.3
- kiwisolver==1.3.1
- lxml==5.3.1
- markupsafe==2.0.1
- matplotlib==3.3.4
- mock==5.2.0
- networkx==2.5.1
- nibabel==3.2.2
- numpy==1.19.5
- numpydoc==1.1.0
- packaging==21.3
- pillow==8.4.0
- pluggy==1.0.0
- prov==1.5.0
- py==1.11.0
- pydot==1.4.2
- pydotplus==2.0.2
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-env==0.6.2
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- rdflib==5.0.0
- requests==2.27.1
- scipy==1.5.4
- simplejson==3.20.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- traits==6.4.1
- typing-extensions==4.1.1
- urllib3==1.26.20
- yapf==0.32.0
- zipp==3.6.0
prefix: /opt/conda/envs/nipype
| [
"nipype/interfaces/afni/tests/test_auto_CatMatvec.py::test_CatMatvec_inputs"
]
| []
| [
"nipype/interfaces/afni/tests/test_auto_CatMatvec.py::test_CatMatvec_outputs"
]
| []
| Apache License 2.0 | 2,355 | [
"nipype/interfaces/afni/utils.py"
]
| [
"nipype/interfaces/afni/utils.py"
]
|
|
G-Node__python-odml-275 | a66ecdf4c0d2ad3fbd098b72ad3d8705a43ccd3d | 2018-04-03 16:23:22 | eeff5922987b064681d1328f81af317d8171808f | diff --git a/odml/base.py b/odml/base.py
index e145c8c..3940880 100644
--- a/odml/base.py
+++ b/odml/base.py
@@ -84,34 +84,18 @@ class baseobject(_baseobj):
return id(self)
-class SafeList(list):
+class SmartList(list):
- def index(self, obj):
+ def __init__(self, content_type):
"""
- Find obj in list
-
- Be sure to use "is" based comparison (instead of __eq__)
+ Only values of the instance *content_type* can be added to the SmartList.
"""
- for i, e in enumerate(self):
- if e is obj:
- return i
- raise ValueError("remove: %s not in list" % repr(obj))
-
- def remove(self, obj):
- """
- Remove an element from this list.
-
- Be sure to use "is" based comparison (instead of __eq__)
- """
- del self[self.index(obj)]
-
-
-class SmartList(SafeList):
+ self._content_type = content_type
+ super(SmartList, self).__init__()
def __getitem__(self, key):
"""
- Provides element index also by searching for an element with a given
- name
+ Provides element index also by searching for an element with a given name.
"""
# Try normal list index first (for integers)
if isinstance(key, int):
@@ -125,31 +109,68 @@ class SmartList(SafeList):
# and fail eventually
raise KeyError(key)
+ def __setitem__(self, key, value):
+ """
+ Replaces item at list[*key*] with *value*.
+ :param key: index position
+ :param value: object that replaces item at *key* position.
+ value has to be of the same content type as the list.
+ In this context usually a Section or a Property.
+ """
+ if not isinstance(value, self._content_type):
+ raise ValueError("List only supports elements of type '%s'" %
+ self._content_type)
+
+ # If required remove new object from its old parents child-list
+ if hasattr(value, "_parent") and (value._parent and value in value._parent):
+ value._parent.remove(value)
+
+ # If required move parent reference from replaced to new object
+ # and set parent reference on replaced object None.
+ if hasattr(self[key], "_parent"):
+ value._parent = self[key]._parent
+ self[key]._parent = None
+
+ super(SmartList, self).__setitem__(key, value)
+
def __contains__(self, key):
for obj in self:
if (hasattr(obj, "name") and obj.name == key) or key == obj:
return True
+ def index(self, obj):
+ """
+ Find obj in list
+ """
+ for i, e in enumerate(self):
+ if e is obj:
+ return i
+ raise ValueError("remove: %s not in list" % repr(obj))
+
+ def remove(self, obj):
+ """
+ Remove an element from this list.
+ """
+ del self[self.index(obj)]
+
def append(self, *obj_tuple):
- from odml.section import BaseSection
- from odml.doc import BaseDocument
for obj in obj_tuple:
if obj.name in self:
raise KeyError(
"Object with the same name already exists! " + str(obj))
- if (not isinstance(obj, BaseSection)) & \
- isinstance(self, BaseDocument):
- raise KeyError("Object " + str(obj) + " is not a Section.")
+ if not isinstance(obj, self._content_type):
+ raise ValueError("List only supports elements of type '%s'" %
+ self._content_type)
super(SmartList, self).append(obj)
@allow_inherit_docstring
class sectionable(baseobject):
-
def __init__(self):
- self._sections = SmartList()
+ from odml.section import Section
+ self._sections = SmartList(Section)
self._repository = None
@property
@@ -518,9 +539,10 @@ class sectionable(baseobject):
Clone this object recursively allowing to copy it independently
to another document
"""
+ from odml.section import Section
obj = super(sectionable, self).clone(children)
obj._parent = None
- obj._sections = SmartList()
+ obj._sections = SmartList(Section)
if children:
for s in self._sections:
obj.append(s.clone())
diff --git a/odml/property.py b/odml/property.py
index 894296d..a23340b 100644
--- a/odml/property.py
+++ b/odml/property.py
@@ -373,45 +373,83 @@ class BaseProperty(base.baseobject, Property):
return obj
- def merge(self, other, strict=True):
+ def merge_check(self, source, strict=True):
"""
- Merges the property 'other' into self, if possible. Information
- will be synchronized. Method will raise a ValueError when the
- information in this property and the passed property are in
- conflict.
+ Checks whether a source Property can be merged with self as destination and
+ raises a ValueError if the values of source and destination are not compatible.
+ With parameter *strict=True* a ValueError is also raised, if any of the
+ attributes unit, definition, uncertainty, reference or value_origin and dtype
+ differ in source and destination.
- :param other: an odML Property.
- :param strict: Bool value to indicate whether types should be implicitly converted
- even when information may be lost. Default is True, i.e. no conversion,
- and a ValueError will be raised if types do not match.
+ :param source: an odML Property.
+ :param strict: If True, the attributes dtype, unit, uncertainty, definition,
+ reference and value_origin of source and destination
+ must be identical.
"""
- assert(isinstance(other, BaseProperty))
- if strict and self.dtype != other.dtype:
+ if not isinstance(source, BaseProperty):
+ raise ValueError("odml.Property.merge: odML Property required.")
+
+ # Catch unmerge-able values at this point to avoid
+ # failing Section tree merges which cannot easily be rolled back.
+ new_value = self._convert_value_input(source.value)
+ if not self._validate_values(new_value):
+ raise ValueError("odml.Property.merge: passed value(s) cannot "
+ "be converted to data type '%s'!" % self._dtype)
+ if not strict:
+ return
+
+ if (self.dtype is not None and source.dtype is not None and
+ self.dtype != source.dtype):
raise ValueError("odml.Property.merge: src and dest dtypes do not match!")
- if self.unit is not None and other.unit is not None and self.unit != other.unit:
- raise ValueError("odml.Property.merge: src and dest units (%s, %s) do not match!" % (other.unit, self.unit))
+ if self.unit is not None and source.unit is not None and self.unit != source.unit:
+ raise ValueError("odml.Property.merge: "
+ "src and dest units (%s, %s) do not match!" %
+ (source.unit, self.unit))
- if self.definition is not None and other.definition is not None:
+ if (self.uncertainty is not None and source.uncertainty is not None and
+ self.uncertainty != source.uncertainty):
+ raise ValueError("odml.Property.merge: "
+ "src and dest uncertainty both set and do not match!")
+
+ if self.definition is not None and source.definition is not None:
self_def = ''.join(map(str.strip, self.definition.split())).lower()
- other_def = ''.join(map(str.strip, other.definition.split())).lower()
+ other_def = ''.join(map(str.strip, source.definition.split())).lower()
if self_def != other_def:
- raise ValueError("odml.Property.merge: src and dest definitions do not match!")
-
- if self.uncertainty is not None and other.uncertainty is not None:
- raise ValueError("odml.Property.merge: src and dest uncertainty both set and do not match!")
+ raise ValueError("odml.Property.merge: "
+ "src and dest definitions do not match!")
- if self.reference is not None and other.reference is not None:
+ if self.reference is not None and source.reference is not None:
self_ref = ''.join(map(str.strip, self.reference.lower().split()))
- other_ref = ''.join(map(str.strip, other.reference.lower().split()))
+ other_ref = ''.join(map(str.strip, source.reference.lower().split()))
if self_ref != other_ref:
- raise ValueError("odml.Property.merge: src and dest references are in conflict!")
+ raise ValueError("odml.Property.merge: "
+ "src and dest references are in conflict!")
- if self.value_origin is not None and other.value_origin is not None:
+ if self.value_origin is not None and source.value_origin is not None:
self_ori = ''.join(map(str.strip, self.value_origin.lower().split()))
- other_ori = ''.join(map(str.strip, other.value_origin.lower().split()))
+ other_ori = ''.join(map(str.strip, source.value_origin.lower().split()))
if self_ori != other_ori:
- raise ValueError("odml.Property.merge: src and dest value_origin are in conflict!")
+ raise ValueError("odml.Property.merge: "
+ "src and dest value_origin are in conflict!")
+
+ def merge(self, other, strict=True):
+ """
+ Merges the Property 'other' into self, if possible. Information
+ will be synchronized. By default the method will raise a ValueError when the
+ information in this property and the passed property are in conflict.
+
+ :param other: an odML Property.
+ :param strict: Bool value to indicate whether types should be implicitly converted
+ even when information may be lost. Default is True, i.e. no conversion,
+ and a ValueError will be raised if types or other attributes do not match.
+ If a conflict arises with strict=False, the attribute value of self will
+ be kept, while the attribute value of other will be lost.
+ """
+ if not isinstance(other, BaseProperty):
+ raise TypeError("odml.Property.merge: odml Property required.")
+
+ self.merge_check(other, strict)
if self.value_origin is None and other.value_origin is not None:
self.value_origin = other.value_origin
diff --git a/odml/section.py b/odml/section.py
index 392e2af..4c32a32 100644
--- a/odml/section.py
+++ b/odml/section.py
@@ -7,7 +7,7 @@ from . import format
from . import terminology
from .doc import BaseDocument
# this is supposedly ok, as we only use it for an isinstance check
-from .property import Property
+from .property import BaseProperty
# it MUST however not be used to create any Property objects
from .tools.doc_inherit import inherit_docstring, allow_inherit_docstring
@@ -35,7 +35,7 @@ class BaseSection(base.sectionable, Section):
# Sets _sections Smartlist and _repository to None, so run first.
super(BaseSection, self).__init__()
- self._props = base.SmartList()
+ self._props = base.SmartList(BaseProperty)
try:
if id is not None:
@@ -173,8 +173,10 @@ class BaseSection(base.sectionable, Section):
return None
@definition.setter
- def definition(self, val):
- self._definition = val
+ def definition(self, new_value):
+ if new_value == "":
+ new_value = None
+ self._definition = new_value
@definition.deleter
def definition(self):
@@ -186,6 +188,8 @@ class BaseSection(base.sectionable, Section):
@reference.setter
def reference(self, new_value):
+ if new_value == "":
+ new_value = None
self._reference = new_value
# API (public)
@@ -266,10 +270,10 @@ class BaseSection(base.sectionable, Section):
:param obj: Section or Property object.
"""
- if isinstance(obj, Section):
+ if isinstance(obj, BaseSection):
self._sections.append(obj)
obj._parent = self
- elif isinstance(obj, Property):
+ elif isinstance(obj, BaseProperty):
self._props.append(obj)
obj._parent = self
elif isinstance(obj, collections.Iterable) and not isinstance(obj, str):
@@ -291,15 +295,15 @@ class BaseSection(base.sectionable, Section):
# Make sure only Sections and Properties with unique names will be added.
for obj in obj_list:
- if not isinstance(obj, Section) and not isinstance(obj, Property):
+ if not isinstance(obj, BaseSection) and not isinstance(obj, BaseProperty):
raise ValueError("odml.Section.extend: "
"Can only extend sections and properties.")
- elif isinstance(obj, Section) and obj.name in self.sections:
+ elif isinstance(obj, BaseSection) and obj.name in self.sections:
raise KeyError("odml.Section.extend: "
"Section with name '%s' already exists." % obj.name)
- elif isinstance(obj, Property) and obj.name in self.properties:
+ elif isinstance(obj, BaseProperty) and obj.name in self.properties:
raise KeyError("odml.Section.extend: "
"Property with name '%s' already exists." % obj.name)
@@ -315,14 +319,14 @@ class BaseSection(base.sectionable, Section):
:param position: index at which the object should be inserted.
:param obj: Section or Property object.
"""
- if isinstance(obj, Section):
+ if isinstance(obj, BaseSection):
if obj.name in self.sections:
raise ValueError("odml.Section.insert: "
"Section with name '%s' already exists." % obj.name)
self._sections.insert(position, obj)
obj._parent = self
- elif isinstance(obj, Property):
+ elif isinstance(obj, BaseProperty):
if obj.name in self.properties:
raise ValueError("odml.Section.insert: "
"Property with name '%s' already exists." % obj.name)
@@ -341,10 +345,10 @@ class BaseSection(base.sectionable, Section):
:param obj: Section or Property object.
"""
- if isinstance(obj, Section):
+ if isinstance(obj, BaseSection):
self._sections.remove(obj)
obj._parent = None
- elif isinstance(obj, Property):
+ elif isinstance(obj, BaseProperty):
self._props.remove(obj)
obj._parent = None
else:
@@ -373,7 +377,7 @@ class BaseSection(base.sectionable, Section):
obj = super(BaseSection, self).clone(children)
obj._id = str(uuid.uuid4())
- obj._props = base.SmartList()
+ obj._props = base.SmartList(BaseProperty)
if children:
for p in self._props:
obj.append(p.clone())
@@ -388,10 +392,10 @@ class BaseSection(base.sectionable, Section):
:param obj: Section or Property object.
"""
- if isinstance(obj, Section):
+ if isinstance(obj, BaseSection):
return super(BaseSection, self).contains(obj)
- elif isinstance(obj, Property):
+ elif isinstance(obj, BaseProperty):
for i in self._props:
if obj.name == i.name:
return i
@@ -399,6 +403,37 @@ class BaseSection(base.sectionable, Section):
raise ValueError("odml.Section.contains:"
"Section or Property object expected.")
+ def merge_check(self, source_section, strict=True):
+ """
+ Recursively checks whether a source Section and all its children can be merged
+ with self and all its children as destination and raises a ValueError if any of
+ the Section attributes definition and reference differ in source and destination.
+
+ :param source_section: an odML Section.
+ :param strict: If True, definition and reference attributes of any merged Sections
+ as well as most attributes of merged Properties on the same
+ tree level in source and destination have to be identical.
+ """
+ if strict and self.definition is not None and source_section.definition is not None:
+ self_def = ''.join(map(str.strip, self.definition.split())).lower()
+ other_def = ''.join(map(str.strip, source_section.definition.split())).lower()
+ if self_def != other_def:
+ raise ValueError(
+ "odml.Section.merge: src and dest definitions do not match!")
+
+ if strict and self.reference is not None and source_section.reference is not None:
+ self_ref = ''.join(map(str.strip, self.reference.lower().split()))
+ other_ref = ''.join(map(str.strip, source_section.reference.lower().split()))
+ if self_ref != other_ref:
+ raise ValueError(
+ "odml.Section.merge: src and dest references are in conflict!")
+
+ # Check all the way down the rabbit hole / Section tree.
+ for obj in source_section:
+ mine = self.contains(obj)
+ if mine is not None:
+ mine.merge_check(obj, strict)
+
def merge(self, section=None, strict=True):
"""
Merges this section with another *section*.
@@ -421,6 +456,16 @@ class BaseSection(base.sectionable, Section):
self.include = self._include
return
+ # Check all the way down the tree if the destination source and
+ # its children can be merged with self and its children since
+ # there is no rollback in case of a downstream merge error.
+ self.merge_check(section, strict)
+
+ if self.definition is None and section.definition is not None:
+ self.definition = section.definition
+ if self.reference is None and section.reference is not None:
+ self.reference = section.reference
+
for obj in section:
mine = self.contains(obj)
if mine is not None:
| Property handling
Currently it is possible to add anything to the ```properties``` list of a section without any check being performed.
This way
- non ```odml.Property``` objects can be inserted into the odml structure (eg ```sec.properties[0] = 'blaa'```)
- inserted ```odml.Property``` objects are not linked properly (eg. ```sec.properties[0] = odml.Property(name='myprop', value=3)``` results in ```sec.properties[0].parent``` is None)
Would it make sense to implement a similar functionality here as used for ```value``` lists in #237? | G-Node/python-odml | diff --git a/test/test_property.py b/test/test_property.py
index f0aa976..9138cae 100644
--- a/test/test_property.py
+++ b/test/test_property.py
@@ -430,9 +430,96 @@ class TestProperty(unittest.TestCase):
with self.assertRaises(ValueError):
prop.new_id("crash and burn")
+ def test_merge_check(self):
+ # Test dtype check
+ source = Property(name="source", dtype="string")
+ destination = Property(name="destination", dtype="string")
+
+ destination.merge_check(source)
+ source.dtype = "int"
+ with self.assertRaises(ValueError):
+ destination.merge_check(source)
+
+ destination.merge_check(source, False)
+
+ # Test value check
+ source = Property(name="source", value=[1, 2, 3])
+ destination = Property(name="destination", value=[4, 5, 6])
+ destination.merge_check(source)
+
+ # Test value convertable
+ source = Property(name="source", value=["7", "8"])
+ with self.assertRaises(ValueError):
+ destination.merge_check(source)
+
+ destination.merge_check(source, False)
+
+ # Test value not convertable
+ source = Property(name="source", value=["nine", "ten"])
+ with self.assertRaises(ValueError):
+ destination.merge_check(source)
+ with self.assertRaises(ValueError):
+ destination.merge_check(source, False)
+
+ # Test unit check
+ source = Property(name="source", unit="Hz")
+ destination = Property(name="destination", unit="Hz")
+
+ destination.merge_check(source)
+ source.unit = "s"
+ with self.assertRaises(ValueError):
+ destination.merge_check(source)
+
+ destination.merge_check(source, False)
+
+ # Test uncertainty check
+ source = Property(name="source", uncertainty=0.0)
+ destination = Property(name="destination", uncertainty=0.0)
+
+ destination.merge_check(source)
+ source.uncertainty = 10.0
+ with self.assertRaises(ValueError):
+ destination.merge_check(source)
+
+ destination.merge_check(source, False)
+
+ # Test definition check
+ source = Property(name="source", definition="Freude\t schoener\nGoetterfunken\n")
+ destination = Property(name="destination",
+ definition="FREUDE schoener GOETTERfunken")
+
+ destination.merge_check(source)
+ source.definition = "Freunde schoender Goetterfunken"
+ with self.assertRaises(ValueError):
+ destination.merge_check(source)
+
+ destination.merge_check(source, False)
+
+ # Test reference check
+ source = Property(name="source", reference="portal.g-node.org")
+ destination = Property(name="destination", reference="portal.g-node.org")
+
+ destination.merge_check(source)
+ source.reference = "portal.g-node.org/odml/terminologies/v1.1"
+ with self.assertRaises(ValueError):
+ destination.merge_check(source)
+
+ destination.merge_check(source, False)
+
+ # Test value origin check
+ source = Property(name="source", value_origin="file")
+ destination = Property(name="destination", value_origin="file")
+
+ destination.merge_check(source)
+ source.value_origin = "other file"
+ with self.assertRaises(ValueError):
+ destination.merge_check(source)
+
+ destination.merge_check(source, False)
+
def test_merge(self):
p_dst = Property("p1", value=[1, 2, 3], unit="Hz", definition="Freude\t schoener\nGoetterfunken\n",
- reference="portal.g-node.org", uncertainty=0.0)
+ reference="portal.g-node.org", uncertainty=0.0, value_origin="file")
p_src = Property("p2", value=[2, 4, 6], unit="Hz", definition="FREUDE schoener GOETTERfunken")
test_p = p_dst.clone()
@@ -451,11 +538,15 @@ class TestProperty(unittest.TestCase):
p_inv_ref = p_src.clone()
p_inv_ref.reference = "test"
+ p_inv_origin = p_src.clone()
+ p_inv_origin.value_origin = "other file"
+
test_p = p_dst.clone()
self.assertRaises(ValueError, test_p.merge, p_inv_unit)
self.assertRaises(ValueError, test_p.merge, p_inv_def)
self.assertRaises(ValueError, test_p.merge, p_inv_uncert)
self.assertRaises(ValueError, test_p.merge, p_inv_ref)
+ self.assertRaises(ValueError, test_p.merge, p_inv_origin)
test_p.reference = None
test_p.merge(p_src)
@@ -473,6 +564,10 @@ class TestProperty(unittest.TestCase):
test_p.merge(p_src)
self.assertEqual(test_p.definition, p_src.definition)
+ test_p.value_origin = ""
+ test_p.merge(p_src)
+ self.assertEqual(test_p.value_origin, p_src.value_origin)
+
double_p = Property("adouble", value=3.14)
int_p = Property("aint", value=3)
self.assertRaises(ValueError, double_p.merge, int_p)
diff --git a/test/test_section.py b/test/test_section.py
index d5c625a..84604aa 100644
--- a/test/test_section.py
+++ b/test/test_section.py
@@ -33,6 +33,12 @@ class TestSection(unittest.TestCase):
sec.reference = "%s_edit" % sec_ref
self.assertEqual(sec.reference, "%s_edit" % sec_ref)
+ # Test setting attributes to None when '' is passed.
+ sec.reference = ""
+ self.assertIsNone(sec.reference)
+ sec.definition = ""
+ self.assertIsNone(sec.definition)
+
def test_parent(self):
s = Section("Section")
self.assertIsNone(s.parent)
@@ -87,6 +93,72 @@ class TestSection(unittest.TestCase):
subsec.parent = None
self.assertEqual(subsec.get_path(), "/")
+ def test_children(self):
+ sec = Section(name="sec")
+
+ # Test set sections
+ subsec = Section(name="subsec", parent=sec)
+ newsec = Section(name="newsec")
+
+ self.assertEqual(subsec.parent, sec)
+ self.assertEqual(sec.sections[0], subsec)
+ self.assertEqual(len(sec.sections), 1)
+ self.assertIsNone(newsec.parent)
+
+ sec.sections[0] = newsec
+ self.assertEqual(newsec.parent, sec)
+ self.assertEqual(sec.sections[0], newsec)
+ self.assertEqual(len(sec.sections), 1)
+ self.assertIsNone(subsec.parent)
+
+ # Test parent cleanup
+ root = Section(name="root")
+ sec.parent = root
+ subsec.parent = newsec
+
+ self.assertEqual(len(newsec.sections), 1)
+ self.assertEqual(newsec.sections[0], subsec)
+ self.assertEqual(subsec.parent, newsec)
+ self.assertEqual(len(root.sections), 1)
+ self.assertEqual(root.sections[0], sec)
+
+ subsec.parent = root
+ self.assertEqual(len(newsec.sections), 0)
+ self.assertEqual(subsec.parent, root)
+ self.assertEqual(len(root.sections), 2)
+ self.assertEqual(root.sections[1], subsec)
+
+ # Test set section fails
+ with self.assertRaises(ValueError):
+ sec.sections[0] = Document()
+ with self.assertRaises(ValueError):
+ sec.sections[0] = Property("fail")
+ with self.assertRaises(ValueError):
+ sec.sections[0] = "subsec"
+
+ # Test set properties
+ prop = Property(name="prop", parent=sec)
+ newprop = Property(name="newprop")
+
+ self.assertEqual(prop.parent, sec)
+ self.assertEqual(sec.properties[0], prop)
+ self.assertEqual(len(sec.properties), 1)
+ self.assertIsNone(newprop.parent)
+
+ sec.properties[0] = newprop
+ self.assertEqual(newprop.parent, sec)
+ self.assertEqual(sec.properties[0], newprop)
+ self.assertEqual(len(sec.properties), 1)
+ self.assertIsNone(prop.parent)
+
+ # Test set property fails
+ with self.assertRaises(ValueError):
+ sec.properties[0] = Document()
+ with self.assertRaises(ValueError):
+ sec.properties[0] = newsec
+ with self.assertRaises(ValueError):
+ sec.properties[0] = "prop"
+
def test_id(self):
s = Section(name="S")
self.assertIsNotNone(s.id)
@@ -377,6 +449,252 @@ class TestSection(unittest.TestCase):
with self.assertRaises(ValueError):
sec.contains("some info")
+ def test_merge_check(self):
+ # -- Root level Section checks
+
+ # Test empty Section check
+ source = Section(name="source")
+ destination = Section(name="destination")
+
+ destination.merge_check(source, True)
+
+ # Test definition check
+ source = Section(name="source", definition="def")
+ destination = Section(name="destination", definition="def")
+
+ destination.merge_check(source, True)
+ source.definition = "other def"
+ with self.assertRaises(ValueError):
+ destination.merge_check(source, True)
+
+ # Test reference check
+ source = Section(name="source", reference="ref")
+ destination = Section(name="destination", reference="ref")
+
+ destination.merge_check(source, True)
+ source.reference = "other ref"
+ with self.assertRaises(ValueError):
+ destination.merge_check(source, True)
+
+ # -- First child level Section checks
+ source = Section(name="source")
+ destination = Section(name="destination")
+
+ s_sec_one = Section(name="lvl", type="one",
+ reference="ref", definition="def", parent=source)
+ s_sec_two = Section(name="unrelated", type="one",
+ reference="one", definition="one", parent=source)
+
+ d_sec_one = Section(name="lvl", type="one",
+ reference="ref", definition="def", parent=destination)
+ d_sec_two = Section(name="unrelated", type="two",
+ reference="two", definition="two", parent=destination)
+
+ # Test Section child level definition check
+ destination.merge_check(source, True)
+ s_sec_one.definition = "other def"
+ with self.assertRaises(ValueError):
+ destination.merge_check(source, True)
+
+ # Test Section child level reference check
+ s_sec_one.definition = "def"
+ s_sec_one.reference = "other ref"
+ with self.assertRaises(ValueError):
+ destination.merge_check(source, True)
+
+ # -- Second child level Section checks
+ source = Section(name="source")
+ destination = Section(name="destination")
+
+ s_sec_one = Section(name="lvl", type="one",
+ reference="ref", definition="def", parent=source)
+ s_subsec_one = Section(name="lvl", type="two",
+ reference="ref2", definition="def2", parent=s_sec_one)
+ s_sec_two = Section(name="unrelated", type="one",
+ reference="one", definition="one", parent=source)
+ s_subsec_two = Section(name="lvl", type="two",
+ reference="none1", definition="none1", parent=s_sec_two)
+
+ d_sec_one = Section(name="lvl", type="one",
+ reference="ref", definition="def", parent=destination)
+ d_subsec_one = Section(name="lvl", type="two",
+ reference="ref2", definition="def2", parent=d_sec_one)
+ d_sec_two = Section(name="unrelated", type="two",
+ reference="two", definition="two", parent=destination)
+ d_subsec_two = Section(name="lvl", type="two",
+ reference="none2", definition="none2", parent=d_sec_two)
+
+ # Test Section 2nd child level definition check
+ # Check no definition/reference ValueError between s_subsec_two and d_subsec_one
+ # since their parents will not be merged.
+ destination.merge_check(source, True)
+
+ # Raise a definition ValueError between s_subsec_one and d_subsec_one
+ # since their parents will be merged.
+ s_subsec_one.definition = "other def"
+ with self.assertRaises(ValueError):
+ destination.merge_check(source, True)
+
+ # Test Section 2nd child level reference check
+ s_subsec_one.definition = "def2"
+
+ # Raise a reference ValueError between s_subsec_one and d_subsec_one
+ # since their parents will be merged.
+ s_subsec_one.reference = "other ref"
+ with self.assertRaises(ValueError):
+ destination.merge_check(source, True)
+
+ # -- Root level Property checks
+ # All Property checks will only test unit failure in the Section merge context.
+ # Other failures are covered by the specific Property merge check tests.
+ source = Section(name="source")
+ destination = Section(name="destination")
+
+ s_prop = Property(name="prop", parent=source)
+ d_prop = Property(name="prop", parent=destination)
+
+ destination.merge_check(source, True)
+ s_prop.unit = "Hz"
+ d_prop.unit = "s"
+ with self.assertRaises(ValueError):
+ destination.merge_check(source, True)
+
+ # -- First child level Property checks
+ source = Section(name="source")
+ destination = Section(name="destination")
+
+ s_prop_one = Property(name="lvl one", unit="Hz", parent=source)
+ s_prop_two = Property(name="unrelated one", unit="one", parent=source)
+
+ d_prop_one = Property(name="lvl one", unit="Hz", parent=destination)
+ d_prop_two = Property(name="unrelated two", unit="two", parent=destination)
+
+ # Test Property child level check
+ destination.merge_check(source, True)
+
+ # Test raise ValueError between s_prop_one and d_prop_one
+ s_prop_one.unit = "other unit"
+ with self.assertRaises(ValueError):
+ destination.merge_check(source, True)
+
+ # -- Second child level Property checks
+ source = Section(name="source")
+ destination = Section(name="destination")
+
+ s_sec_one = Section(name="lvl", type="one", parent=source)
+ s_subprop_one = Property(name="lvl one", unit="Hz", parent=s_sec_one)
+
+ s_sec_two = Section(name="unrelated", type="one", parent=source)
+ s_subprop_two = Property(name="unrelated one", unit="one", parent=s_sec_two)
+
+ d_sec_one = Section(name="lvl", type="one", parent=destination)
+ d_subprop_one = Property(name="lvl one", unit="Hz", parent=d_sec_one)
+
+ d_sec_two = Section(name="unrelated", type="two", parent=destination)
+ d_subprop_two = Property(name="unrelated one", unit="two", parent=d_sec_two)
+
+ # Test Property 2nd child level definition check
+ # Check no unit ValueError between s_subprop_two and d_subprop_one
+ # since their parents will not be merged.
+ destination.merge_check(source, True)
+
+ # Raise a unit ValueError between s_subprop_one and d_subprop_one
+ # since their parents will be merged.
+ s_subprop_one.unit = "other unit"
+ with self.assertRaises(ValueError):
+ destination.merge_check(source, True)
+
+ def test_merge(self):
+ # -- Root level Section merge tests
+ source = Section(name="source", definition="def", reference="ref")
+ destination = Section(name="destination")
+
+ destination.merge(source)
+ self.assertEqual(destination.definition, source.definition)
+ self.assertEqual(destination.reference, source.reference)
+
+ # -- First child level Section merge tests
+ s_sec_one = Section(name="lvl", type="one", definition="def", parent=source)
+ s_sec_two = Section(name="other", type="one", parent=source)
+ d_sec_one = Section(name="lvl", type="one", parent=destination)
+
+ self.assertEqual(len(destination), 1)
+ self.assertIsNone(destination.sections["lvl"].definition)
+ self.assertIsNone(destination.sections["lvl"].reference)
+
+ destination.merge(source)
+ self.assertEqual(len(destination), 2)
+ self.assertEqual(destination.sections["lvl"].definition, s_sec_one.definition)
+ self.assertEqual(destination.sections["lvl"].reference, s_sec_one.reference)
+ self.assertEqual(destination.sections["other"], s_sec_two)
+
+ # -- Root level Property merge tests
+ source = Section(name="source")
+ destination = Section(name="destination")
+
+ s_prop_one = Property(name="prop_one", unit="Hz", parent=source)
+ s_prop_two = Property(name="prop_two", parent=source)
+ d_prop_one = Property(name="prop_one", parent=destination)
+
+ self.assertEqual(len(destination.properties), 1)
+ self.assertIsNone(destination.properties["prop_one"].unit)
+
+ destination.merge(source)
+ self.assertEqual(len(destination.properties), 2)
+ self.assertEqual(destination.properties["prop_one"].unit, s_prop_one.unit)
+ self.assertEqual(destination.properties["prop_two"], s_prop_two)
+
+ # -- First child level Property merge tests
+ source = Section(name="source")
+ destination = Section(name="destination")
+
+ s_sec_one = Section(name="lvl", type="one", definition="def", parent=source)
+ s_prop_one = Property(name="prop_one", unit="Hz", parent=s_sec_one)
+ s_prop_two = Property(name="prop_two", parent=s_sec_one)
+
+ d_sec_one = Section(name="lvl", type="one", parent=destination)
+ d_prop_one = Property(name="prop_one", parent=d_sec_one)
+
+ self.assertEqual(len(destination.properties), 0)
+ self.assertEqual(len(destination.sections["lvl"].properties), 1)
+ self.assertIsNone(destination.sections["lvl"].properties["prop_one"].unit)
+
+ destination.merge(source)
+ self.assertEqual(len(destination.properties), 0)
+ self.assertEqual(len(destination.sections["lvl"].properties), 2)
+ self.assertEqual(destination.sections["lvl"].properties["prop_one"].unit,
+ s_prop_one.unit)
+ self.assertEqual(destination.sections["lvl"].properties["prop_two"],
+ s_prop_two)
+
+ # -- Test nothing merged on second child level ValueError
+ source = Section(name="source", definition="def", reference="ref")
+ destination = Section(name="destination")
+
+ s_sec_one = Section(name="lvl", type="one", definition="def", parent=source)
+ s_sec_two = Section(name="other", type="one", parent=source)
+ d_sec_one = Section(name="lvl", type="one", parent=destination)
+
+ s_subprop_one = Property(name="prop", value=[1, 2, 3], parent=s_sec_one)
+ d_subprop_one = Property(name="prop", value=["four", "five"], parent=d_sec_one)
+
+ self.assertEqual(len(destination.sections), 1)
+ self.assertEqual(len(destination.sections["lvl"].properties), 1)
+ self.assertIsNone(destination.definition)
+ self.assertIsNone(destination.sections["lvl"].definition)
+ self.assertEqual(destination.sections["lvl"].properties[0].value,
+ d_subprop_one.value)
+
+ with self.assertRaises(ValueError):
+ destination.merge(source)
+
+ self.assertEqual(len(destination.sections), 1)
+ self.assertEqual(len(destination.sections["lvl"].properties), 1)
+ self.assertIsNone(destination.definition)
+ self.assertIsNone(destination.sections["lvl"].definition)
+ self.assertEqual(destination.sections["lvl"].properties[0].value,
+ d_subprop_one.value)
+
def test_link(self):
pass
@@ -386,8 +704,5 @@ class TestSection(unittest.TestCase):
def test_repository(self):
pass
- def test_merge(self):
- pass
-
def test_unmerge(self):
pass
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
isodate==0.7.2
lxml==5.3.1
-e git+https://github.com/G-Node/python-odml.git@a66ecdf4c0d2ad3fbd098b72ad3d8705a43ccd3d#egg=odML
packaging==24.2
pluggy==1.5.0
pyparsing==3.2.3
pytest==8.3.5
PyYAML==6.0.2
rdflib==7.1.4
tomli==2.2.1
| name: python-odml
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- isodate==0.7.2
- lxml==5.3.1
- packaging==24.2
- pluggy==1.5.0
- pyparsing==3.2.3
- pytest==8.3.5
- pyyaml==6.0.2
- rdflib==7.1.4
- tomli==2.2.1
prefix: /opt/conda/envs/python-odml
| [
"test/test_property.py::TestProperty::test_merge_check",
"test/test_section.py::TestSection::test_children",
"test/test_section.py::TestSection::test_merge",
"test/test_section.py::TestSection::test_merge_check",
"test/test_section.py::TestSection::test_simple_attributes"
]
| []
| [
"test/test_property.py::TestProperty::test_bool_conversion",
"test/test_property.py::TestProperty::test_clone",
"test/test_property.py::TestProperty::test_dtype",
"test/test_property.py::TestProperty::test_get_merged_equivalent",
"test/test_property.py::TestProperty::test_get_path",
"test/test_property.py::TestProperty::test_get_set_value",
"test/test_property.py::TestProperty::test_id",
"test/test_property.py::TestProperty::test_merge",
"test/test_property.py::TestProperty::test_new_id",
"test/test_property.py::TestProperty::test_parent",
"test/test_property.py::TestProperty::test_simple_attributes",
"test/test_property.py::TestProperty::test_str_to_int_convert",
"test/test_property.py::TestProperty::test_value",
"test/test_property.py::TestProperty::test_value_append",
"test/test_property.py::TestProperty::test_value_extend",
"test/test_section.py::TestSection::test_append",
"test/test_section.py::TestSection::test_clone",
"test/test_section.py::TestSection::test_contains",
"test/test_section.py::TestSection::test_extend",
"test/test_section.py::TestSection::test_id",
"test/test_section.py::TestSection::test_include",
"test/test_section.py::TestSection::test_insert",
"test/test_section.py::TestSection::test_link",
"test/test_section.py::TestSection::test_new_id",
"test/test_section.py::TestSection::test_parent",
"test/test_section.py::TestSection::test_path",
"test/test_section.py::TestSection::test_remove",
"test/test_section.py::TestSection::test_reorder",
"test/test_section.py::TestSection::test_repository",
"test/test_section.py::TestSection::test_unmerge"
]
| []
| BSD 4-Clause "Original" or "Old" License | 2,356 | [
"odml/base.py",
"odml/section.py",
"odml/property.py"
]
| [
"odml/base.py",
"odml/section.py",
"odml/property.py"
]
|
|
prjemian__pyRestTable-13 | 3e477f655666c5a530ddbb95bc64fff31fa1fd69 | 2018-04-03 18:03:36 | 3e477f655666c5a530ddbb95bc64fff31fa1fd69 | coveralls:
[](https://coveralls.io/builds/16321609)
Coverage increased (+0.2%) to 96.286% when pulling **b946c75d77a8941951c6a20552efb40d8247a526 on 12-html-output** into **3e477f655666c5a530ddbb95bc64fff31fa1fd69 on master**.
| diff --git a/.travis.yml b/.travis.yml
index 1a3ea07..c033926 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -10,6 +10,7 @@ sudo: false
python:
- "2.7"
- "3.5"
+ - "3.6"
install:
- pip install -r requirements.txt
diff --git a/CHANGES.rst b/CHANGES.rst
index bc718a2..a714698 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -7,6 +7,11 @@ Change History
Production
**********
+:2018.4.0:
+
+ * `#12 <https://github.com/prjemian/pyRestTable/issues/12>`_
+ provide HTML table output format
+
:2017.2.0:
* `#9 <https://github.com/prjemian/pyRestTable/issues/9>`_
diff --git a/README.rst b/README.rst
index b63e36d..7402113 100644
--- a/README.rst
+++ b/README.rst
@@ -10,7 +10,7 @@ that does not span any cells (no rowspans or columnspans).
:author: Pete R. Jemian
:email: [email protected]
-:copyright: 2014-2017, Pete R. Jemian
+:copyright: 2014-2018, Pete R. Jemian
:license: Creative Commons Attribution 4.0 International Public License (see *LICENSE.txt*)
:docs: http://pyRestTable.readthedocs.io
:URL: https://github.com/prjemian/pyRestTable
diff --git a/docs/source/examples/html.rst b/docs/source/examples/html.rst
new file mode 100644
index 0000000..999ab33
--- /dev/null
+++ b/docs/source/examples/html.rst
@@ -0,0 +1,51 @@
+*html*
+######
+
+:see: https://www.w3schools.com/html/html_tables.asp
+
+These python commands:
+
+.. code-block:: python
+ :linenos:
+
+ import pyRestTable
+ t = pyRestTable.Table()
+ t.labels = ('one', 'two', 'three' )
+ t.rows.append( ['1,1', '1,2', '1,3',] )
+ t.rows.append( ['2,1', '2,2', '2,3',] )
+ t.rows.append( ['3,1', '3,2', '3,3',] )
+ t.rows.append( ['4,1', '4,2', '4,3',] )
+ print(t.reST(fmt='html'))
+
+build this table in HTML source code:
+
+.. code-block:: guess
+ :linenos:
+
+ <table>
+ <tr>
+ <th>one</th>
+ <th>two</th>
+ <th>three</th>
+ </tr>
+ <tr>
+ <td>1,1</td>
+ <td>1,2</td>
+ <td>1,3</td>
+ </tr>
+ <tr>
+ <td>2,1</td>
+ <td>2,2</td>
+ <td>2,3</td>
+ </tr>
+ <tr>
+ <td>3,1</td>
+ <td>3,2</td>
+ <td>3,3</td>
+ </tr>
+ <tr>
+ <td>4,1</td>
+ <td>4,2</td>
+ <td>4,3</td>
+ </tr>
+ </table>
diff --git a/docs/source/examples/index.rst b/docs/source/examples/index.rst
index c0028d1..7e1f7b3 100644
--- a/docs/source/examples/index.rst
+++ b/docs/source/examples/index.rst
@@ -12,5 +12,6 @@ Examples are provided to demonstrate usage.
plain
grid
list-table
+ html
example2
cansas
diff --git a/src/pyRestTable/rest_table.py b/src/pyRestTable/rest_table.py
index 01696dc..931a91d 100644
--- a/src/pyRestTable/rest_table.py
+++ b/src/pyRestTable/rest_table.py
@@ -4,7 +4,7 @@
#-----------------------------------------------------------------------------
# :author: Pete R. Jemian
# :email: [email protected]
-# :copyright: (c) 2014-2017, Pete R. Jemian
+# :copyright: (c) 2014-2018, Pete R. Jemian
#
# Distributed under the terms of the Creative Commons Attribution 4.0 International Public License.
#
@@ -24,6 +24,14 @@ User Interface Description
:meth:`setTabularColumns` set `use_tabular_columns` & `alignment` attributes
:meth:`reST` render the table in reST format
=========================== ============================================================
+
+.. autosummary::
+
+ ~Table
+ ~example_minimal
+ ~example_basic
+ ~example_complicated
+
"""
@@ -32,7 +40,8 @@ def _prepare_results_(t):
s += t.reST(fmt='plain') + '\n'
s += t.reST(fmt='simple') + '\n'
s += t.reST(fmt='grid') + '\n'
- s += t.reST(fmt='list-table')
+ s += t.reST(fmt='list-table') + '\n'
+ s += t.reST(fmt='html')
return s
@@ -86,6 +95,27 @@ class Table(object):
http://sphinx-doc.org/markup/misc.html?highlight=tabularcolumns#directive-tabularcolumns
:param bool longtable: with `use_tabular_columns`,
if True, add Sphinx `:longtable:` directive
+
+ MAIN METHODS
+
+ .. autosummary::
+
+ ~addLabel
+ ~addRow
+ ~reST
+
+ SUPPORTING METHODS
+
+ .. autosummary::
+
+ ~setLongTable
+ ~setTabularColumns
+ ~plain_table
+ ~simple_table
+ ~grid_table
+ ~list_table
+ ~html_table
+
"""
def __init__(self):
@@ -150,6 +180,7 @@ class Table(object):
'complex': self.grid_table, # alias for `grid`, do not deprecate
'grid': self.grid_table,
'list-table': self.list_table,
+ 'html': self.html_table,
}[fmt](indentation)
def plain_table(self, indentation = ''):
@@ -278,6 +309,19 @@ class Table(object):
return '\n'.join(rest)
+ def html_table(self, indentation = ''):
+ """render the table in *HTML*"""
+ html = "<table>\n"
+ html += ' <tr>\n' # start the labels
+ html += "".join([" <th>{}</th>\n".format(k) for k in self.labels]) # labels
+ html += ' </tr>\n' # end the labels
+ for row in self.rows:
+ html += ' <tr>\n' # start each row
+ html += "".join([" <td>{}</td>\n".format(k) for k in row]) # each row
+ html += ' </tr>\n' # end each row
+ html += '</table>' # end of table
+ return html
+
def _row(self, row, fmt, indentation = ''):
"""
Given a list of entry nodes in this table row,
| provide HTML table output format | prjemian/pyRestTable | diff --git a/tests/test_results.py b/tests/test_results.py
index 264a255..55375d1 100644
--- a/tests/test_results.py
+++ b/tests/test_results.py
@@ -80,14 +80,25 @@ MINIMAL_LISTTABLE_RESULT = '''\
- 2\
'''
+MINIMAL_HTML_RESULT = '''\
+<table>
+ <tr>
+ <th>x</th>
+ <th>y</th>
+ </tr>
+ <tr>
+ <td>1</td>
+ <td>2</td>
+ </tr>
+</table>\
+'''
+
-EXAMPLE_MINIMAL_RESULT = MINIMAL_PLAIN_RESULT
-EXAMPLE_MINIMAL_RESULT += '\n'
-EXAMPLE_MINIMAL_RESULT += MINIMAL_SIMPLE_RESULT
-EXAMPLE_MINIMAL_RESULT += '\n'
-EXAMPLE_MINIMAL_RESULT += MINIMAL_GRID_RESULT
-EXAMPLE_MINIMAL_RESULT += '\n'
-EXAMPLE_MINIMAL_RESULT += MINIMAL_LISTTABLE_RESULT
+EXAMPLE_MINIMAL_RESULT = MINIMAL_PLAIN_RESULT + '\n'
+EXAMPLE_MINIMAL_RESULT += MINIMAL_SIMPLE_RESULT + '\n'
+EXAMPLE_MINIMAL_RESULT += MINIMAL_GRID_RESULT + '\n'
+EXAMPLE_MINIMAL_RESULT += MINIMAL_LISTTABLE_RESULT + '\n'
+EXAMPLE_MINIMAL_RESULT += MINIMAL_HTML_RESULT
EXAMPLE_BASIC_RESULT = '''\
@@ -136,7 +147,36 @@ one two three
- 3,3
* - 4,1
- 4,2
- - 4,3'''
+ - 4,3
+\
+<table>
+ <tr>
+ <th>one</th>
+ <th>two</th>
+ <th>three</th>
+ </tr>
+ <tr>
+ <td>1,1</td>
+ <td>1,2</td>
+ <td>1,3</td>
+ </tr>
+ <tr>
+ <td>2,1</td>
+ <td>2,2</td>
+ <td>2,3</td>
+ </tr>
+ <tr>
+ <td>3,1</td>
+ <td>3,2</td>
+ <td>3,3</td>
+ </tr>
+ <tr>
+ <td>4,1</td>
+ <td>4,2</td>
+ <td>4,3</td>
+ </tr>
+</table>\
+'''
EXAMPLE_COMPLICATED_RESULT = '''
@@ -170,7 +210,7 @@ class Test_pyRestTable(unittest.TestCase):
def apply_test(self, table, reference_text, style='simple'):
text = table.reST(fmt=style)
- self.assertTrue(text == reference_text)
+ self.assertTrue(text.strip() == reference_text.strip())
def population_table(self):
t = pyRestTable.Table()
@@ -231,10 +271,14 @@ class Test_pyRestTable(unittest.TestCase):
def test_minimal_listtable(self):
self.apply_test(pyRestTable.rest_table.example_minimal(), MINIMAL_LISTTABLE_RESULT, 'list-table')
+ def test_minimal_htmltable(self):
+ table = pyRestTable.rest_table.example_minimal()
+ self.apply_test(table, MINIMAL_HTML_RESULT, 'html')
+
def test_example_basic(self):
t = pyRestTable.rest_table.example_basic()
s = pyRestTable.rest_table._prepare_results_(t)
- self.assertEqual(s, EXAMPLE_BASIC_RESULT)
+ self.assertEqual(s.strip(), EXAMPLE_BASIC_RESULT.strip())
def test_example_complicated(self):
t = pyRestTable.rest_table.example_complicated()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 1
},
"num_modified_files": 5
} | 2017.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
coveralls==4.0.1
docopt==0.6.2
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
lxml==5.3.1
packaging==24.2
pluggy==1.5.0
-e git+https://github.com/prjemian/pyRestTable.git@3e477f655666c5a530ddbb95bc64fff31fa1fd69#egg=pyRestTable
pytest==8.3.5
requests==2.32.3
tomli==2.2.1
urllib3==2.3.0
| name: pyRestTable
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- coveralls==4.0.1
- docopt==0.6.2
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- lxml==5.3.1
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- requests==2.32.3
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/pyRestTable
| [
"tests/test_results.py::Test_pyRestTable::test_example_basic",
"tests/test_results.py::Test_pyRestTable::test_example_minimal",
"tests/test_results.py::Test_pyRestTable::test_minimal_htmltable"
]
| [
"tests/test_results.py::Test_pyRestTable::test_example_complicated"
]
| [
"tests/test_results.py::Test_pyRestTable::test_cansas",
"tests/test_results.py::Test_pyRestTable::test_default_str",
"tests/test_results.py::Test_pyRestTable::test_grid",
"tests/test_results.py::Test_pyRestTable::test_minimal_complex",
"tests/test_results.py::Test_pyRestTable::test_minimal_grid",
"tests/test_results.py::Test_pyRestTable::test_minimal_listtable",
"tests/test_results.py::Test_pyRestTable::test_minimal_simple",
"tests/test_results.py::Test_pyRestTable::test_num_col_labels_different_from_col_width_specifiers",
"tests/test_results.py::Test_pyRestTable::test_plain",
"tests/test_results.py::Test_pyRestTable::test_simple",
"tests/test_results.py::Test_pyRestTable::test_zero_columns",
"tests/test_results.py::Test_pyRestTable::test_zero_width_column"
]
| []
| null | 2,357 | [
"README.rst",
"docs/source/examples/html.rst",
"src/pyRestTable/rest_table.py",
".travis.yml",
"CHANGES.rst",
"docs/source/examples/index.rst"
]
| [
"README.rst",
"docs/source/examples/html.rst",
"src/pyRestTable/rest_table.py",
".travis.yml",
"CHANGES.rst",
"docs/source/examples/index.rst"
]
|
cevoaustralia__aws-google-auth-71 | 390e181516b02baf55bdf67296fe3437e7c8904a | 2018-04-03 21:57:06 | d473d67b0772700942f5bb0db3522af0a1005453 | diff --git a/aws_google_auth/__init__.py b/aws_google_auth/__init__.py
index 0ec8126..2163356 100644
--- a/aws_google_auth/__init__.py
+++ b/aws_google_auth/__init__.py
@@ -51,7 +51,7 @@ def exit_if_unsupported_python():
sys.exit(1)
-def main(cli_args):
+def cli(cli_args):
try:
exit_if_unsupported_python()
@@ -201,6 +201,10 @@ def process_auth(args, config):
amazon_client.print_export_line()
-if __name__ == '__main__':
+def main():
cli_args = sys.argv[1:]
- main(cli_args)
+ cli(cli_args)
+
+
+if __name__ == '__main__':
+ main()
| main() takes exactly 1 argument (0 given)
I'm getting the following error, introduced in 48d22e4d62bb6e216cd8932739ea0be4735e2588 (Determined by `git bisect`). I don't have the time to dig right now, but I will later.
```
$ aws-google-auth --help
Traceback (most recent call last):
File "/Users/mide/virtualenv/aws-google-auth-dev/bin/aws-google-auth", line 11, in <module>
load_entry_point('aws-google-auth', 'console_scripts', 'aws-google-auth')()
TypeError: main() takes exactly 1 argument (0 given)
``` | cevoaustralia/aws-google-auth | diff --git a/aws_google_auth/tests/test_init.py b/aws_google_auth/tests/test_init.py
index 9a298a8..cd818d2 100644
--- a/aws_google_auth/tests/test_init.py
+++ b/aws_google_auth/tests/test_init.py
@@ -11,6 +11,20 @@ class TestInit(unittest.TestCase):
def setUp(self):
pass
+ @patch('aws_google_auth.cli', spec=True)
+ def test_main_method_has_no_parameters(self, mock_cli):
+ """
+ This is the entrypoint for the cli tool, and should require no parameters
+
+ :param mock_cli:
+ :return:
+ """
+
+ # Function under test
+ aws_google_auth.main()
+
+ self.assertTrue(mock_cli.called)
+
@patch('aws_google_auth.exit_if_unsupported_python', spec=True)
@patch('aws_google_auth.resolve_config', spec=True)
@patch('aws_google_auth.process_auth', spec=True)
@@ -22,7 +36,7 @@ class TestInit(unittest.TestCase):
aws_google_auth.resolve_config.return_value = mock_config
# Function under test
- aws_google_auth.main([])
+ aws_google_auth.cli([])
self.assertTrue(exit_if_unsupported_python.called)
self.assertTrue(resolve_config.called)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_git_commit_hash"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/cevoaustralia/aws-google-auth.git@390e181516b02baf55bdf67296fe3437e7c8904a#egg=aws_google_auth
beautifulsoup4==4.13.3
boto3==1.37.23
botocore==1.37.23
certifi==2025.1.31
charset-normalizer==3.4.1
configparser==7.2.0
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
jmespath==1.0.1
lxml==5.3.1
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
python-dateutil==2.9.0.post0
requests==2.32.3
s3transfer==0.11.4
six==1.17.0
soupsieve==2.6
tabulate==0.9.0
tomli==2.2.1
typing_extensions==4.13.0
tzlocal==5.3.1
urllib3==1.26.20
| name: aws-google-auth
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- beautifulsoup4==4.13.3
- boto3==1.37.23
- botocore==1.37.23
- certifi==2025.1.31
- charset-normalizer==3.4.1
- configparser==7.2.0
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- jmespath==1.0.1
- lxml==5.3.1
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- requests==2.32.3
- s3transfer==0.11.4
- six==1.17.0
- soupsieve==2.6
- tabulate==0.9.0
- tomli==2.2.1
- typing-extensions==4.13.0
- tzlocal==5.3.1
- urllib3==1.26.20
prefix: /opt/conda/envs/aws-google-auth
| [
"aws_google_auth/tests/test_init.py::TestInit::test_main_method_chaining",
"aws_google_auth/tests/test_init.py::TestInit::test_main_method_has_no_parameters"
]
| []
| [
"aws_google_auth/tests/test_init.py::TestInit::test_process_auth_dont_resolve_alias",
"aws_google_auth/tests/test_init.py::TestInit::test_process_auth_specified_role",
"aws_google_auth/tests/test_init.py::TestInit::test_process_auth_standard",
"aws_google_auth/tests/test_init.py::TestInit::test_process_auth_with_profile",
"aws_google_auth/tests/test_init.py::TestInit::test_process_auth_with_saml_cache"
]
| []
| MIT License | 2,358 | [
"aws_google_auth/__init__.py"
]
| [
"aws_google_auth/__init__.py"
]
|
|
TheFriendlyCoder__friendlypins-2 | 3ebf4892111351fc82c38a2b1a9f81ab099294a9 | 2018-04-04 02:36:59 | 3ebf4892111351fc82c38a2b1a9f81ab099294a9 | diff --git a/setup.py b/setup.py
index c322c9e..7827c73 100755
--- a/setup.py
+++ b/setup.py
@@ -7,20 +7,18 @@ from setuptools import setup, find_packages
# project specific parameters
PROJECT_NAME = 'friendlypins'
PROJECT_DEPENDENCIES = [
- #'requests[security]>=2.0.1',
- 'requests',
- 'six',
- 'tqdm']
+ 'requests<3.0.0,>=2.0.0',
+ 'six<2.0.0,>=1.0.0',]
PROJECT_DEV_DEPENDENCIES = [
- 'wheel',
- 'twine',
- 'pytest',
- 'pytest-cov',
- 'mock',
- 'radon',
- 'pylint',
- 'sphinx>=1.2.3',
- 'tox']
+ 'wheel<1.0.0',
+ 'twine<2.0.0',
+ 'pytest>=3.5.0<4.0.0',
+ 'pytest-cov>=2.5.0<3.0.0',
+ 'mock>=2.0.0<3.0.0',
+ 'radon>=2.2.0<3.0.0',
+ 'pylint>=1.8.0<2.0.0',
+ 'sphinx>=1.2.3<2.0.0',
+ 'tox>=3.0.0<4.0.0']
PROJECT_DESCRIPTION = 'Python wrapper around the Pinterest developer APIs'
PROJECT_KEYWORDS = 'pinterest api wrapper library'
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
diff --git a/src/friendlypins/api.py b/src/friendlypins/api.py
index 8b28950..c9540e1 100644
--- a/src/friendlypins/api.py
+++ b/src/friendlypins/api.py
@@ -1,10 +1,25 @@
"""Primary entry point for the Friendly Pinterest library"""
from __future__ import print_function
+import logging
+import requests
+from friendlypins.user import User
class API(object): # pylint: disable=too-few-public-methods
"""High level abstraction for the core Pinterest API"""
- def __init__(self):
- self.name = "hello"
+
+ # URL of the root namespace for the Pinterest API
+ _root_url = 'https://api.pinterest.com/v1'
+
+ def __init__(self, personal_access_token):
+ """Constructor
+
+ :param str personal_access_token:
+ API authentication token used for secure access to a users'
+ Pinterest data
+ """
+
+ self._log = logging.getLogger(__name__)
+ self._token = personal_access_token
def get_user(self, username=None):
"""Gets all primitives associated with a particular Pinterst user
@@ -15,11 +30,18 @@ class API(object): # pylint: disable=too-few-public-methods
returns: Pinterest user with the given name
rtype: :class:`friendly_pinterest.user.User`
"""
- print(self.name)
if username:
- return None
- return None
+ raise NotImplementedError(
+ "Querying arbitrary Pinerest users is not yet supported.")
+ else:
+ temp_url = "{0}/me".format(self._root_url)
+ temp_url += "?access_token={0}".format(self._token)
+ response = requests.get(temp_url)
+ response.raise_for_status()
+ assert 'data' in response.json()
+ return User(response.json()['data'])
+# pylint: disable-all
if __name__ == "__main__":
pass
diff --git a/src/friendlypins/user.py b/src/friendlypins/user.py
new file mode 100644
index 0000000..02dd4e7
--- /dev/null
+++ b/src/friendlypins/user.py
@@ -0,0 +1,44 @@
+"""Interfaces for interacting with Pinterest users"""
+import logging
+
+class User(object):
+ """Abstraction around a Pinterest user and their associated data"""
+
+ def __init__(self, data):
+ """Constructor
+
+ :param dict data: JSON data parsed from the API
+ """
+ self._log = logging.getLogger(__name__)
+ self._data = data
+
+ @property
+ def unique_id(self):
+ """Gets the internal unique ID associated with the user
+ :rtype: :class:`str`
+ """
+ return self._data['id']
+
+ @property
+ def first_name(self):
+ """Gets the first name of the user
+ :rtype: :class:`str`
+ """
+ return self._data['first_name']
+
+ @property
+ def last_name(self):
+ """Gets the last name of the user
+ :rtype: :class:`str`
+ """
+ return self._data['last_name']
+
+ @property
+ def url(self):
+ """Gets the URL of the users profile
+ :rtype: :class:`str`
+ """
+ return self._data['url']
+
+if __name__ == "__main__":
+ pass
| Add support for basic connectivity
Implement basic init method for API class, allowing connections to be made to a Pinterest account by specifying a user's API token, and perform a simple query to get information about the user who's token we're authenticating with. | TheFriendlyCoder/friendlypins | diff --git a/unit_tests/test_api.py b/unit_tests/test_api.py
index 2526048..67ada2e 100644
--- a/unit_tests/test_api.py
+++ b/unit_tests/test_api.py
@@ -1,12 +1,32 @@
import pytest
+import mock
from friendlypins.api import API
-def test_constructor():
- obj = API()
-
def test_get_user():
- obj = API()
- obj.get_user()
+ obj = API('abcd1234')
+ expected_url = 'https://www.pinterest.com/MyUserName/'
+ expected_firstname = "John"
+ expected_lastname = "Doe"
+ expected_id = "12345678"
+ expected_data = {
+ 'data': {
+ 'url': expected_url,
+ 'first_name': expected_firstname,
+ 'last_name': expected_lastname,
+ 'id': expected_id
+ }
+ }
+ with mock.patch("friendlypins.api.requests") as mock_requests:
+ mock_response = mock.MagicMock()
+ mock_response.json.return_value = expected_data
+ mock_requests.get.return_value = mock_response
+ result = obj.get_user()
+
+ assert expected_url == result.url
+ assert expected_firstname == result.first_name
+ assert expected_lastname == result.last_name
+ assert expected_id == result.unique_id
+
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
astroid==2.11.7
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
Babel==2.11.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
colorama==0.4.5
coverage==6.2
cryptography==40.0.2
dill==0.3.4
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
-e git+https://github.com/TheFriendlyCoder/friendlypins.git@3ebf4892111351fc82c38a2b1a9f81ab099294a9#egg=friendlypins
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==5.10.1
jeepney==0.7.1
Jinja2==3.0.3
keyring==23.4.1
lazy-object-proxy==1.7.1
mando==0.7.1
MarkupSafe==2.0.1
mccabe==0.7.0
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pkginfo==1.10.0
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycparser==2.21
Pygments==2.14.0
pylint==2.13.9
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
pytz==2025.2
radon==6.0.1
readme-renderer==34.0
requests==2.27.1
requests-toolbelt==1.0.0
rfc3986==1.5.0
SecretStorage==3.3.3
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==3.28.0
tqdm==4.64.1
twine==3.8.0
typed-ast==1.5.5
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
virtualenv==20.17.1
webencodings==0.5.1
wrapt==1.16.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: friendlypins
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- astroid==2.11.7
- babel==2.11.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- colorama==0.4.5
- coverage==6.2
- cryptography==40.0.2
- dill==0.3.4
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- isort==5.10.1
- jeepney==0.7.1
- jinja2==3.0.3
- keyring==23.4.1
- lazy-object-proxy==1.7.1
- mando==0.7.1
- markupsafe==2.0.1
- mccabe==0.7.0
- mock==5.2.0
- pkginfo==1.10.0
- platformdirs==2.4.0
- pycparser==2.21
- pygments==2.14.0
- pylint==2.13.9
- pytest-cov==4.0.0
- pytz==2025.2
- radon==6.0.1
- readme-renderer==34.0
- requests==2.27.1
- requests-toolbelt==1.0.0
- rfc3986==1.5.0
- secretstorage==3.3.3
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- tox==3.28.0
- tqdm==4.64.1
- twine==3.8.0
- typed-ast==1.5.5
- urllib3==1.26.20
- virtualenv==20.17.1
- webencodings==0.5.1
- wrapt==1.16.0
prefix: /opt/conda/envs/friendlypins
| [
"unit_tests/test_api.py::test_get_user"
]
| []
| []
| []
| Apache License 2.0 | 2,359 | [
"setup.py",
"src/friendlypins/user.py",
"src/friendlypins/api.py"
]
| [
"setup.py",
"src/friendlypins/user.py",
"src/friendlypins/api.py"
]
|
|
PlasmaPy__PlasmaPy-334 | 8d9f1fe553df5786013b65c65bca21602fed9c32 | 2018-04-04 05:57:35 | 8d9f1fe553df5786013b65c65bca21602fed9c32 | diff --git a/plasmapy/physics/parameters.py b/plasmapy/physics/parameters.py
index 19f4a1c3..198a5e99 100644
--- a/plasmapy/physics/parameters.py
+++ b/plasmapy/physics/parameters.py
@@ -144,13 +144,13 @@ def Alfven_speed(B, density, ion="p+", z_mean=None):
If the density is negative, or the ion mass or charge state
cannot be found.
- UserWarning
- if units are not provided and SI units are assumed.
-
Warns
-----
~plasmapy.utils.RelativityWarning
- If the Alfven velocity exceeds 10% of the speed of light
+ If the Alfven velocity exceeds 5% of the speed of light
+
+ ~astropy.units.UnitsWarning
+ if units are not provided, SI units are assumed.
Notes
-----
@@ -266,9 +266,13 @@ def ion_sound_speed(T_e,
~astropy.units.UnitConversionError
If the temperature is in incorrect units.
- UserWarning
- If the ion sound speed exceeds 10% of the speed of light, or
- if units are not provided and SI units are assumed.
+ Warns
+ -----
+ RelativityWarning
+ If the ion sound speed exceeds 5% of the speed of light.
+
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed.
Notes
-----
@@ -370,9 +374,13 @@ def thermal_speed(T, particle="e-", method="most_probable"):
The particle temperature is invalid or particle cannot be used to
identify an isotope or particle
- UserWarning
- If the particle thermal speed exceeds 10% of the speed of light, or
- if units are not provided and SI units are assumed.
+ Warns
+ -----
+ RelativityWarning
+ If the ion sound speed exceeds 5% of the speed of light, or
+
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed.
Notes
-----
@@ -474,9 +482,13 @@ def kappa_thermal_speed(T, kappa, particle="e-", method="most_probable"):
The particle temperature is invalid or particle cannot be used to
identify an isotope or particle.
- UserWarning
- If the particle thermal speed exceeds 10% of the speed of light, or
- if units are not provided and SI units are assumed.
+ Warns
+ -----
+ RelativityWarning
+ If the particle thermal speed exceeds 5% of the speed of light, or
+
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed.
Notes
-----
@@ -847,8 +859,10 @@ def gyrofrequency(B, particle='e-', signed=False, Z=None):
If the magnetic field contains invalid values or particle cannot be
used to identify an particle or isotope
- UserWarning
- If units are not provided and SI units are assumed
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
Notes
-----
@@ -946,8 +960,10 @@ def gyroradius(B, particle='e-', *, Vperp=np.nan * u.m / u.s, T_i=np.nan * u.K):
ValueError
If any argument contains invalid values
- UserWarning
- If units are not provided and SI units are assumed
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
Notes
-----
@@ -1049,8 +1065,10 @@ def plasma_frequency(n, particle='e-', z_mean=None):
If `n_i` contains invalid values or particle cannot be used to
identify an particle or isotope.
- UserWarning
- If units are not provided and SI units are assumed
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
Notes
-----
@@ -1133,8 +1151,10 @@ def Debye_length(T_e, n_e):
ValueError
If either argument contains invalid values
- UserWarning
- If units are not provided and SI units are assumed
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
Notes
-----
@@ -1196,8 +1216,10 @@ def Debye_number(T_e, n_e):
ValueError
If either argument contains invalid values
- UserWarning
- If units are not provided and SI units are assumed
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
Returns
-------
@@ -1268,8 +1290,10 @@ def inertial_length(n, particle='e-'):
ValueError
The particle density does not have an appropriate value.
- UserWarning
- If units are not provided and SI units are assumed.
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
Notes
-----
@@ -1324,8 +1348,10 @@ def magnetic_pressure(B):
If the magnetic field strength is not a real number between
+/- infinity.
- UserWarning
- If units are not provided and SI units are assumed.
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
Notes
-----
@@ -1386,8 +1412,10 @@ def magnetic_energy_density(B: u.T):
If the magnetic field strength does not have an appropriate.
value.
- UserWarning
- If units are not provided and SI units are assumed.
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
Notes
-----
@@ -1443,17 +1471,19 @@ def upper_hybrid_frequency(B, n_e):
Raises
------
TypeError
- If either of B or n_e is not a Quantity.
+ If either of `B` or `n_e` is not a Quantity.
~astropy.units.UnitConversionError
- If either of B or n_e is in incorrect units.
+ If either of `B` or `n_e` is in incorrect units.
ValueError
- If either of B or n_e contains invalid values or are of
+ If either of `B` or `n_e` contains invalid values or are of
incompatible dimensions.
- UserWarning
- If units are not provided and SI units are assumed.
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
Notes
-----
@@ -1521,8 +1551,10 @@ def lower_hybrid_frequency(B, n_i, ion='p+'):
incompatible dimensions, or ion cannot be used to identify an
ion or isotope.
- UserWarning
- If units are not provided and SI units are assumed.
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
Notes
-----
diff --git a/plasmapy/physics/quantum.py b/plasmapy/physics/quantum.py
index 8e60344d..568c3a34 100644
--- a/plasmapy/physics/quantum.py
+++ b/plasmapy/physics/quantum.py
@@ -19,6 +19,9 @@
# TODO: Use @check_relativistic and @particle_input
[email protected]_quantity({
+ 'V': {'units': u.m/u.s, 'can_be_negative': True}
+ })
def deBroglie_wavelength(V, particle):
r"""
Calculates the de Broglie wavelength.
@@ -52,9 +55,8 @@ def deBroglie_wavelength(V, particle):
Warns
-----
- UserWarning
- If `V` is not a `~astropy.units.Quantity`, then a `UserWarning`
- will be raised and units of meters per second will be assumed.
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
Notes
-----
@@ -150,8 +152,8 @@ def thermal_deBroglie_wavelength(T_e):
Warns
-----
- UserWarning
- If units are not provided and SI units are assumed.
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed.
Notes
-----
@@ -203,8 +205,8 @@ def Fermi_energy(n_e):
Warns
-----
- UserWarning
- If units are not provided and SI units are assumed.
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed.
Notes
-----
@@ -264,8 +266,8 @@ def Thomas_Fermi_length(n_e):
Warns
-----
- UserWarning
- If units are not provided and SI units are assumed.
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed.
Notes
-----
@@ -339,8 +341,8 @@ def Wigner_Seitz_radius(n: u.m**-3):
Warns
-----
- UserWarning
- If units are not provided and SI units are assumed.
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed.
Notes
-----
@@ -397,8 +399,8 @@ def chemical_potential(n_e: u.m ** -3, T: u.K):
Warns
-----
- UserWarning
- If units are not provided and SI units are assumed.
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed.
Notes
-----
@@ -498,8 +500,8 @@ def chemical_potential_interp(n_e, T):
Warnings
--------
- UserWarning
- If units are not provided and SI units are assumed.
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed.
Notes
-----
diff --git a/plasmapy/physics/relativity.py b/plasmapy/physics/relativity.py
index 84517ff1..5bfa9cd4 100644
--- a/plasmapy/physics/relativity.py
+++ b/plasmapy/physics/relativity.py
@@ -5,7 +5,10 @@
from plasmapy import atomic, utils
-def Lorentz_factor(V):
[email protected]_quantity({
+ 'V': {'units': u.m/u.s, 'can_be_negative': True}
+ })
+def Lorentz_factor(V: u.m/u.s):
r"""
Return the Lorentz factor.
@@ -31,9 +34,10 @@ def Lorentz_factor(V):
ValueError
If the magnitude of `V` is faster than the speed of light.
- UserWarning
- If `V` is not a `~astropy.units.Quantity`, then a `UserWarning`
- will be raised and units of meters per second will be assumed.
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed.
Notes
-----
diff --git a/plasmapy/physics/transport/collisions.py b/plasmapy/physics/transport/collisions.py
index 41fc97e0..de6f35fc 100644
--- a/plasmapy/physics/transport/collisions.py
+++ b/plasmapy/physics/transport/collisions.py
@@ -78,16 +78,20 @@ def Coulomb_logarithm(T,
UnitConversionError
If the units on any of the inputs are incorrect.
- UserWarning
- If the input velocity is greater than 80% of the speed of
- light.
-
- TypeError
If the n_e, T, or V are not Quantities.
PhysicsError
If the result is smaller than 1.
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
+
+ ~plasmapy.utils.RelativityWarning
+ If the input velocity is greater than 5% of the speed of
+ light.
+
Notes
-----
The classical Coulomb logarithm is given by
@@ -303,13 +307,18 @@ def b_perp(T,
UnitConversionError
If the units on any of the inputs are incorrect
- UserWarning
- If the inputted velocity is greater than 80% of the speed of
- light.
-
TypeError
If T, or V are not Quantities.
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
+
+ ~plasmapy.utils.RelativityWarning
+ If the input velocity is greater than 5% of the speed of
+ light.
+
Notes
-----
The distance of closest approach, b_perp, is given by [1]_
@@ -341,8 +350,7 @@ def b_perp(T,
# classical effects dominate.
# !!!Note: an average ionization parameter will have to be
# included here in the future
- bPerp = (charges[0] * charges[1] /
- (4 * pi * eps0 * reduced_mass * V ** 2))
+ bPerp = (charges[0] * charges[1] / (4 * pi * eps0 * reduced_mass * V ** 2))
return bPerp.to(u.m)
@@ -403,13 +411,18 @@ def impact_parameter(T,
UnitConversionError
If the units on any of the inputs are incorrect
- UserWarning
- If the inputted velocity is greater than 80% of the speed of
- light.
-
TypeError
If the n_e, T, or V are not Quantities.
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
+
+ ~plasmapy.utils.RelativityWarning
+ If the input velocity is greater than 5% of the speed of
+ light.
+
Notes
-----
The minimum and maximum impact parameters may be calculated in a
@@ -588,13 +601,18 @@ def collision_frequency(T,
UnitConversionError
If the units on any of the inputs are incorrect
- UserWarning
- If the inputted velocity is greater than 80% of the speed of
- light.
-
TypeError
If the n_e, T, or V are not Quantities.
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
+
+ ~plasmapy.utils.RelativityWarning
+ If the input velocity is greater than 5% of the speed of
+ light.
+
Notes
-----
The collision frequency is given by [1]_
@@ -745,13 +763,18 @@ def mean_free_path(T,
UnitConversionError
If the units on any of the inputs are incorrect
- UserWarning
- If the inputted velocity is greater than 80% of the speed of
- light.
-
TypeError
If the n_e, T, or V are not Quantities.
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
+
+ ~plasmapy.utils.RelativityWarning
+ If the input velocity is greater than 5% of the speed of
+ light.
+
Notes
-----
The collisional mean free path is given by [1]_
@@ -854,13 +877,18 @@ def Spitzer_resistivity(T,
UnitConversionError
If the units on any of the inputs are incorrect
- UserWarning
- If the inputted velocity is greater than 80% of the speed of
- light.
-
TypeError
If the n_e, T, or V are not Quantities.
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
+
+ ~plasmapy.utils.RelativityWarning
+ If the input velocity is greater than 5% of the speed of
+ light.
+
Notes
-----
The Spitzer resistivity is given by [1]_ [2]_
@@ -972,13 +1000,18 @@ def mobility(T,
UnitConversionError
If the units on any of the inputs are incorrect
- UserWarning
- If the inputted velocity is greater than 80% of the speed of
- light.
-
TypeError
If the n_e, T, or V are not Quantities.
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
+
+ ~plasmapy.utils.RelativityWarning
+ If the input velocity is greater than 5% of the speed of
+ light.
+
Notes
-----
The mobility is given by [1]_
@@ -1088,13 +1121,18 @@ def Knudsen_number(characteristic_length,
UnitConversionError
If the units on any of the inputs are incorrect
- UserWarning
- If the inputted velocity is greater than 80% of the speed of
- light.
-
TypeError
If the n_e, T, or V are not Quantities.
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
+
+ ~plasmapy.utils.RelativityWarning
+ If the input velocity is greater than 5% of the speed of
+ light.
+
Notes
-----
The Knudsen number is given by [1]_
@@ -1197,13 +1235,18 @@ def coupling_parameter(T,
UnitConversionError
If the units on any of the inputs are incorrect
- UserWarning
- If the inputted velocity is greater than 80% of the speed of
- light.
-
TypeError
If the n_e, T, or V are not Quantities.
+ Warns
+ -----
+ ~astropy.units.UnitsWarning
+ If units are not provided, SI units are assumed
+
+ ~plasmapy.utils.RelativityWarning
+ If the input velocity is greater than 5% of the speed of
+ light.
+
Notes
-----
The coupling parameter is given by
diff --git a/plasmapy/utils/checks.py b/plasmapy/utils/checks.py
index a25b4a50..4bb14b0b 100644
--- a/plasmapy/utils/checks.py
+++ b/plasmapy/utils/checks.py
@@ -3,9 +3,11 @@
import numpy as np
from astropy import units as u
+from astropy.units import UnitsWarning
from plasmapy.constants import c
import warnings
from plasmapy.utils.exceptions import RelativityWarning, RelativityError
+from textwrap import dedent
def check_quantity(validations):
@@ -86,94 +88,26 @@ def wrapper(*args, **kwargs):
for param_to_check, validation_settings in validations.items():
value_to_check = given_params_values[param_to_check]
- can_be_negative = validation_settings.get(
- 'can_be_negative', True)
- can_be_complex = validation_settings.get(
- 'can_be_complex', False)
- can_be_inf = validation_settings.get(
- 'can_be_inf', True)
- can_be_nan = validation_settings.get(
- 'can_be_nan', False)
-
- _check_quantity(value_to_check,
- param_to_check,
- fname,
- validation_settings['units'],
- can_be_negative=can_be_negative,
- can_be_complex=can_be_complex,
- can_be_inf=can_be_inf,
- can_be_nan=can_be_nan)
-
- return f(*args, **kwargs)
+ can_be_negative = validation_settings.get('can_be_negative', True)
+ can_be_complex = validation_settings.get('can_be_complex', False)
+ can_be_inf = validation_settings.get('can_be_inf', True)
+ can_be_nan = validation_settings.get('can_be_nan', False)
+
+ validated_value = _check_quantity(value_to_check,
+ param_to_check,
+ fname,
+ validation_settings['units'],
+ can_be_negative=can_be_negative,
+ can_be_complex=can_be_complex,
+ can_be_inf=can_be_inf,
+ can_be_nan=can_be_nan)
+ given_params_values[param_to_check] = validated_value
+
+ return f(**given_params_values)
return wrapper
return decorator
-def check_relativistic(func=None, betafrac=0.1):
- r"""
- Warns or raises an exception when the output of the decorated
- function is greater than `betafrac` times the speed of light.
-
- Parameters
- ----------
- func : `function`, optional
- The function to decorate.
-
- betafrac : float, optional
- The minimum fraction of the speed of light that will raise a
- `~plasmapy.utils.RelativityWarning`. Defaults to 0.1.
-
- Returns
- -------
- function
- Decorated function.
-
- Raises
- ------
- TypeError
- If `V` is not a `~astropy.units.Quantity`.
-
- ~astropy.units.UnitConversionError
- If `V` is not in units of velocity.
-
- ValueError
- If `V` contains any `~numpy.nan` values.
-
- ~plasmapy.utils.RelativityError
- If `V` is greater than or equal to the speed of light.
-
- Warns
- -----
- ~plasmapy.utils.RelativityWarning
- If `V` is greater than or equal to `betafrac` times the speed of light,
- but less than the speed of light.
-
- Examples
- --------
- >>> from astropy import units as u
- >>> @check_relativistic
- ... def speed():
- ... return 1 * u.m / u.s
-
- Passing in a custom `betafrac`:
-
- >>> @check_relativistic(betafrac=0.01)
- ... def speed():
- ... return 1 * u.m / u.s
-
- """
- def decorator(f):
- @functools.wraps(f)
- def wrapper(*args, **kwargs):
- return_ = f(*args, **kwargs)
- _check_relativistic(return_, f.__name__, betafrac=betafrac)
- return return_
- return wrapper
- if func:
- return decorator(func)
- return decorator
-
-
def _check_quantity(arg, argname, funcname, units, can_be_negative=True,
can_be_complex=False, can_be_inf=True, can_be_nan=False):
"""
@@ -229,15 +163,18 @@ def _check_quantity(arg, argname, funcname, units, can_be_negative=True,
Warns
-----
- UserWarning
+ ~astropy.units.UnitsWarning
If a `~astropy.units.Quantity` is not provided and unique units
- are provided, a `UserWarning` will be raised and the inputted
+ are provided, a `UnitsWarning` will be raised and the inputted
units will be assumed.
Examples
--------
>>> from astropy import units as u
>>> _check_quantity(4*u.T, 'B', 'f', u.T)
+ <Quantity 4. T>
+ >>> _check_quantity(4, 'B', 'f', u.T)
+ <Quantity 4. T>
"""
@@ -272,6 +209,13 @@ def _check_quantity(arg, argname, funcname, units, can_be_negative=True,
# Make sure arg is a quantity with correct units
+ unit_casting_warning = dedent(
+ f"""No units are specified for {argname} = {arg} in {funcname}. Assuming units of {str(units[0])}.
+ To silence this warning, explicitly pass in an Astropy Quantity (from astropy.units)
+ (see http://docs.astropy.org/en/stable/units/)""")
+
+ # TODO include explicit note on how to pass in Astropy Quantity
+
if not isinstance(arg, (u.Quantity)):
if len(units) != 1:
raise TypeError(typeerror_message)
@@ -281,10 +225,7 @@ def _check_quantity(arg, argname, funcname, units, can_be_negative=True,
except Exception:
raise TypeError(typeerror_message)
else:
- raise UserWarning(
- f"No units are specified for {argname} in {funcname}. "
- f"Assuming units of {str(units[0])}."
- )
+ warnings.warn(UnitsWarning(unit_casting_warning))
if not isinstance(arg, u.Quantity):
raise u.UnitsError("{} is still not a Quantity after checks!".format(arg))
@@ -316,8 +257,75 @@ def _check_quantity(arg, argname, funcname, units, can_be_negative=True,
elif not can_be_inf and np.any(np.isinf(arg.value)):
raise ValueError(f"{valueerror_message} infs.")
+ return arg
-def _check_relativistic(V, funcname, betafrac=0.1):
+
+def check_relativistic(func=None, betafrac=0.05):
+ r"""
+ Warns or raises an exception when the output of the decorated
+ function is greater than `betafrac` times the speed of light.
+
+ Parameters
+ ----------
+ func : `function`, optional
+ The function to decorate.
+
+ betafrac : float, optional
+ The minimum fraction of the speed of light that will raise a
+ `~plasmapy.utils.RelativityWarning`. Defaults to 5%.
+
+ Returns
+ -------
+ function
+ Decorated function.
+
+ Raises
+ ------
+ TypeError
+ If `V` is not a `~astropy.units.Quantity`.
+
+ ~astropy.units.UnitConversionError
+ If `V` is not in units of velocity.
+
+ ValueError
+ If `V` contains any `~numpy.nan` values.
+
+ ~plasmapy.utils.RelativityError
+ If `V` is greater than or equal to the speed of light.
+
+ Warns
+ -----
+ ~plasmapy.utils.RelativityWarning
+ If `V` is greater than or equal to `betafrac` times the speed of light,
+ but less than the speed of light.
+
+ Examples
+ --------
+ >>> from astropy import units as u
+ >>> @check_relativistic
+ ... def speed():
+ ... return 1 * u.m / u.s
+
+ Passing in a custom `betafrac`:
+
+ >>> @check_relativistic(betafrac=0.01)
+ ... def speed():
+ ... return 1 * u.m / u.s
+
+ """
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ return_ = f(*args, **kwargs)
+ _check_relativistic(return_, f.__name__, betafrac=betafrac)
+ return return_
+ return wrapper
+ if func:
+ return decorator(func)
+ return decorator
+
+
+def _check_relativistic(V, funcname, betafrac=0.05):
r"""
Warn or raise error for relativistic or superrelativistic
velocities.
@@ -331,9 +339,9 @@ def _check_relativistic(V, funcname, betafrac=0.1):
The name of the original function to be printed in the error
messages.
- betafrac : float
+ betafrac : float, optional
The minimum fraction of the speed of light that will generate
- a warning.
+ a warning. Defaults to 5%.
Raises
------
| Warn, not raise with UserWarning
That seems to have landed under raise clauses for a whole bunch of lines in our code. We'll have to fix that, update tests and the docs as well. | PlasmaPy/PlasmaPy | diff --git a/plasmapy/physics/tests/test_parameters.py b/plasmapy/physics/tests/test_parameters.py
index 7b010549..227dc69b 100644
--- a/plasmapy/physics/tests/test_parameters.py
+++ b/plasmapy/physics/tests/test_parameters.py
@@ -148,7 +148,7 @@ def test_Alfven_speed():
assert Alfven_speed(-np.inf * u.T, 1 * u.m ** -3,
ion='p') == np.inf * u.m / u.s
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert Alfven_speed(1.0, n_i) == Alfven_speed(1.0 * u.T, n_i)
Alfven_speed(1 * u.T, 5e19 * u.m ** -3, ion='p')
@@ -234,11 +234,11 @@ def test_ion_sound_speed():
with pytest.raises(ValueError):
ion_sound_speed(T_e=T_negarr, T_i=0 * u.K)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert ion_sound_speed(T_e=1.2e6, T_i=0 * u.K) == ion_sound_speed(T_e=1.2e6 * u.K,
T_i=0 * u.K)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert ion_sound_speed(T_i=1.3e6, T_e=0 * u.K) == ion_sound_speed(T_i=1.3e6 * u.K,
T_e=0 * u.K)
@@ -276,7 +276,7 @@ def test_thermal_speed():
with pytest.raises(RelativityError):
thermal_speed(5e19 * u.K)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert thermal_speed(1e5) == thermal_speed(1e5 * u.K)
assert thermal_speed(T_i, particle='p').unit.is_equivalent(u.m / u.s)
@@ -306,7 +306,7 @@ def test_thermal_speed():
with pytest.raises(ValueError):
thermal_speed(T_i, particle='asdfasd')
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert thermal_speed(1e6, particle='p') == thermal_speed(1e6 * u.K, particle='p')
assert np.isclose(thermal_speed(1e6 * u.K,
@@ -438,7 +438,7 @@ def test_gyrofrequency():
f_ce_use_equiv = omega_ce.to(u.Hz, equivalencies=[(u.cy / u.s, u.Hz)])
assert np.isclose(f_ce.value, f_ce_use_equiv.value)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert gyrofrequency(5.0) == gyrofrequency(5.0 * u.T)
assert gyrofrequency(B, particle=ion).unit.is_equivalent(u.rad / u.s)
@@ -459,7 +459,7 @@ def test_gyrofrequency():
assert gyrofrequency(B, particle='e+') == gyrofrequency(B)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
gyrofrequency(8, 'p')
with pytest.raises(u.UnitConversionError):
@@ -468,7 +468,7 @@ def test_gyrofrequency():
with pytest.raises(InvalidParticleError):
gyrofrequency(8 * u.T, particle='asdfasd')
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
# TODO this should be WARNS, not RAISES. and it's probably still raised
assert gyrofrequency(5.0, 'p') == gyrofrequency(5.0 * u.T, 'p')
@@ -514,10 +514,10 @@ def test_gyroradius():
with pytest.raises(ValueError):
gyroradius(3.14159 * u.T, T_i=-1 * u.K)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert gyroradius(1.0, Vperp=1.0) == gyroradius(1.0 * u.T, Vperp=1.0 * u.m / u.s)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert gyroradius(1.1, T_i=1.2) == gyroradius(1.1 * u.T, T_i=1.2 * u.K)
with pytest.raises(ValueError):
@@ -564,12 +564,12 @@ def test_gyroradius():
with pytest.raises(ValueError):
gyroradius(B, particle='p', T_i=-1 * u.K)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
gyro_without_units = gyroradius(1.0, particle="p", Vperp=1.0)
gyro_with_units = gyroradius(1.0 * u.T, particle="p", Vperp=1.0 * u.m / u.s)
assert gyro_without_units == gyro_with_units
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
gyro_t_without_units = gyroradius(1.1, particle="p", T_i=1.2)
gyro_t_with_units = gyroradius(1.1 * u.T, particle="p", T_i=1.2 * u.K)
assert gyro_t_with_units == gyro_t_without_units
@@ -600,7 +600,7 @@ def test_plasma_frequency():
with pytest.raises(ValueError):
plasma_frequency(np.nan * u.m ** -3)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert plasma_frequency(1e19) == plasma_frequency(1e19 * u.m ** -3)
assert plasma_frequency(n_i, particle='p').unit.is_equivalent(u.rad / u.s)
@@ -614,7 +614,7 @@ def test_plasma_frequency():
with pytest.raises(ValueError):
plasma_frequency(n=5 * u.m ** -3, particle='sdfas')
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
plasma_freq_no_units = plasma_frequency(1e19, particle='p')
assert plasma_freq_no_units == plasma_frequency(1e19 * u.m ** -3, particle='p')
@@ -639,7 +639,7 @@ def test_Debye_length():
assert np.isclose(Debye_length(
1 * u.eV, 1 * u.cm ** -3).value, 7.43, atol=0.005)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
Debye_length(5, 5 * u.m ** -3)
with pytest.raises(u.UnitConversionError):
@@ -656,10 +656,10 @@ def test_Debye_length():
with pytest.raises(ValueError):
Debye_length(Tarr2, narr3)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert Debye_length(2.0, 2.0) == Debye_length(2.0 * u.K, 2.0 * u.m ** -3)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert Debye_length(2.0 * u.K, 2.0) == Debye_length(2.0, 2.0 * u.m ** -3)
@@ -674,7 +674,7 @@ def test_Debye_number():
assert np.isclose(Debye_number(1 * u.eV, 1 * u.cm ** -3).value, 1720862385.43342)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
Debye_number(T_e, 4)
with pytest.raises(TypeError):
@@ -694,10 +694,10 @@ def test_Debye_number():
with pytest.raises(ValueError):
Debye_number(Tarr2, narr3)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert Debye_number(1.1, 1.1) == Debye_number(1.1 * u.K, 1.1 * u.m ** -3)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert Debye_number(1.1 * u.K, 1.1) == Debye_number(1.1, 1.1 * u.m ** -3)
@@ -714,7 +714,7 @@ def test_inertial_length():
assert inertial_length(n_i, particle='p') == inertial_length(n_i, particle='p')
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
inertial_length(4, particle='p')
with pytest.raises(u.UnitConversionError):
@@ -726,7 +726,7 @@ def test_inertial_length():
with pytest.raises(ValueError):
inertial_length(n_i, particle=-135)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
inertial_length_no_units = inertial_length(1e19, particle='p')
assert inertial_length_no_units == inertial_length(1e19 * u.m ** -3, particle='p')
@@ -734,7 +734,7 @@ def test_inertial_length():
assert np.isclose(inertial_length(1 * u.cm ** -3).cgs.value, 5.31e5, rtol=1e-3)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
inertial_length(5)
with pytest.raises(u.UnitConversionError):
@@ -743,7 +743,7 @@ def test_inertial_length():
with pytest.raises(ValueError):
inertial_length(-5 * u.m ** -3)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert inertial_length(1e19) == inertial_length(1e19 * u.m ** -3)
@@ -762,7 +762,7 @@ def test_magnetic_pressure():
assert np.isclose(magnetic_pressure(B).value, 397887.35772973835)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
magnetic_pressure(5)
with pytest.raises(u.UnitConversionError):
@@ -777,7 +777,7 @@ def test_magnetic_pressure():
with pytest.raises(ValueError):
magnetic_pressure(B_nanarr)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert magnetic_pressure(22.2) == magnetic_pressure(22.2 * u.T)
@@ -800,7 +800,7 @@ def test_magnetic_energy_density():
assert magnetic_energy_density(B_arr)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
magnetic_energy_density(5)
with pytest.raises(u.UnitConversionError):
@@ -815,7 +815,7 @@ def test_magnetic_energy_density():
with pytest.raises(ValueError):
magnetic_energy_density(B_nanarr)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert magnetic_energy_density(22.2) == magnetic_energy_density(22.2 * u.T)
@@ -835,11 +835,11 @@ def test_upper_hybrid_frequency():
with pytest.raises(ValueError):
upper_hybrid_frequency(5 * u.T, n_e=-1 * u.m ** -3)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert upper_hybrid_frequency(1.2, 1.3) == upper_hybrid_frequency(1.2 * u.T,
1.3 * u.m ** -3)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert upper_hybrid_frequency(1.4 * u.T, 1.3) == upper_hybrid_frequency(1.4,
1.3 * u.m ** -3)
@@ -870,6 +870,6 @@ def test_lower_hybrid_frequency():
lower_hybrid_frequency(
np.nan * u.T, n_i=-5e19 * u.m ** -3, ion='asdfasd')
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
assert lower_hybrid_frequency(1.3, 1e19) == lower_hybrid_frequency(1.3 * u.T,
1e19 * u.m ** -3)
diff --git a/plasmapy/physics/tests/test_quantum.py b/plasmapy/physics/tests/test_quantum.py
index 0c264fe0..ea0ecd9f 100644
--- a/plasmapy/physics/tests/test_quantum.py
+++ b/plasmapy/physics/tests/test_quantum.py
@@ -47,7 +47,7 @@ def test_deBroglie_wavelength():
with pytest.raises(RelativityError):
deBroglie_wavelength(c * 1.000000001, 'e')
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
deBroglie_wavelength(0.79450719277, 'Be-7 1+')
with pytest.raises(u.UnitConversionError):
diff --git a/plasmapy/physics/tests/test_relativity.py b/plasmapy/physics/tests/test_relativity.py
index d4df1c74..2b47ad50 100644
--- a/plasmapy/physics/tests/test_relativity.py
+++ b/plasmapy/physics/tests/test_relativity.py
@@ -29,10 +29,10 @@ def test_Lorentz_factor():
with pytest.raises(RelativityError):
Lorentz_factor(1.0000000001*c)
- with pytest.raises((ValueError, UserWarning)):
+ with pytest.raises(ValueError):
Lorentz_factor(299792459)
- with pytest.raises(UserWarning):
+ with pytest.warns(u.UnitsWarning):
Lorentz_factor(2.2)
with pytest.raises(u.UnitConversionError):
diff --git a/plasmapy/utils/tests/test_checks.py b/plasmapy/utils/tests/test_checks.py
index 4dda9e29..7b9482a2 100644
--- a/plasmapy/utils/tests/test_checks.py
+++ b/plasmapy/utils/tests/test_checks.py
@@ -20,7 +20,6 @@
(5 * u.T, [u.T, 1], TypeError),
(5 * u.T, [1, u.m], TypeError),
(u.T, u.J, TypeError),
- (5.0, u.m, UserWarning),
(3 * u.m / u.s, u.m, u.UnitConversionError),
(5j * u.K, u.K, ValueError),
]
@@ -66,6 +65,10 @@ def test__check_quantity_errors_non_default(
can_be_inf=can_be_inf)
+def test__check_quantity_warns_on_casting():
+ with pytest.warns(u.UnitsWarning):
+ _check_quantity(5, 'arg', 'funcname', u.m,)
+
@pytest.mark.parametrize(
"value, units, error", quantity_error_examples_default)
def test__check_quantity_errors_default(value, units, error):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 5
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/automated-code-tests.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asteval==1.0.6
astropy==6.0.1
astropy-iers-data==0.2025.3.31.0.36.18
certifi==2025.1.31
charset-normalizer==3.4.1
colorama==0.4.6
contourpy==1.3.0
coverage==7.8.0
coveralls==4.0.1
cycler==0.12.1
Cython==3.0.12
dill==0.3.9
docopt==0.6.2
exceptiongroup==1.2.2
flake8==7.2.0
fonttools==4.56.0
idna==3.10
importlib_resources==6.5.2
iniconfig==2.1.0
kiwisolver==1.4.7
lmfit==1.3.3
matplotlib==3.9.4
mccabe==0.7.0
mpmath==1.3.0
numpy==1.26.4
packaging==24.2
pillow==11.1.0
-e git+https://github.com/PlasmaPy/PlasmaPy.git@8d9f1fe553df5786013b65c65bca21602fed9c32#egg=plasmapy
pluggy==1.5.0
pycodestyle==2.13.0
pyerfa==2.0.1.5
pyflakes==3.3.2
pyparsing==3.2.3
pytest==8.3.5
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
roman==5.0
scipy==1.13.1
six==1.17.0
tomli==2.2.1
uncertainties==3.2.2
urllib3==2.3.0
zipp==3.21.0
| name: PlasmaPy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asteval==1.0.6
- astropy==6.0.1
- astropy-iers-data==0.2025.3.31.0.36.18
- certifi==2025.1.31
- charset-normalizer==3.4.1
- colorama==0.4.6
- contourpy==1.3.0
- coverage==7.8.0
- coveralls==4.0.1
- cycler==0.12.1
- cython==3.0.12
- dill==0.3.9
- docopt==0.6.2
- exceptiongroup==1.2.2
- flake8==7.2.0
- fonttools==4.56.0
- idna==3.10
- importlib-resources==6.5.2
- iniconfig==2.1.0
- kiwisolver==1.4.7
- lmfit==1.3.3
- matplotlib==3.9.4
- mccabe==0.7.0
- mpmath==1.3.0
- numpy==1.26.4
- packaging==24.2
- pillow==11.1.0
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyerfa==2.0.1.5
- pyflakes==3.3.2
- pyparsing==3.2.3
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- requests==2.32.3
- roman==5.0
- scipy==1.13.1
- six==1.17.0
- tomli==2.2.1
- uncertainties==3.2.2
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/PlasmaPy
| [
"plasmapy/physics/tests/test_parameters.py::test_thermal_speed",
"plasmapy/physics/tests/test_parameters.py::test_gyroradius",
"plasmapy/physics/tests/test_parameters.py::test_Debye_length",
"plasmapy/physics/tests/test_parameters.py::test_Debye_number",
"plasmapy/physics/tests/test_parameters.py::test_inertial_length",
"plasmapy/physics/tests/test_parameters.py::test_magnetic_pressure",
"plasmapy/physics/tests/test_parameters.py::test_upper_hybrid_frequency",
"plasmapy/physics/tests/test_parameters.py::test_lower_hybrid_frequency",
"plasmapy/physics/tests/test_quantum.py::test_deBroglie_wavelength",
"plasmapy/physics/tests/test_relativity.py::test_Lorentz_factor",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_warns_on_casting"
]
| [
"plasmapy/physics/tests/test_parameters.py::test_Alfven_speed",
"plasmapy/physics/tests/test_parameters.py::test_ion_sound_speed",
"plasmapy/physics/tests/test_parameters.py::test_gyrofrequency",
"plasmapy/physics/tests/test_parameters.py::test_plasma_frequency",
"plasmapy/physics/tests/test_parameters.py::test_magnetic_energy_density",
"plasmapy/physics/tests/test_quantum.py::test_thermal_deBroglie_wavelength",
"plasmapy/physics/tests/test_quantum.py::test_Fermi_energy",
"plasmapy/physics/tests/test_quantum.py::test_Thomas_Fermi_length",
"plasmapy/physics/tests/test_quantum.py::Test_chemical_potential::test_known1",
"plasmapy/physics/tests/test_quantum.py::Test_chemical_potential::test_fail1",
"plasmapy/physics/tests/test_quantum.py::Test_chemical_potential::test_polog_fail",
"plasmapy/physics/tests/test_quantum.py::Test_chemical_potential_interp::test_known1"
]
| [
"plasmapy/physics/tests/test_parameters.py::Test_mass_density::test_particleless",
"plasmapy/physics/tests/test_parameters.py::Test_mass_density::test_wrong_units",
"plasmapy/physics/tests/test_parameters.py::Test_kappa_thermal_speed::test_invalid_kappa",
"plasmapy/physics/tests/test_parameters.py::Test_kappa_thermal_speed::test_invalid_method",
"plasmapy/physics/tests/test_parameters.py::Test_kappa_thermal_speed::test_probable1",
"plasmapy/physics/tests/test_parameters.py::Test_kappa_thermal_speed::test_rms1",
"plasmapy/physics/tests/test_parameters.py::Test_kappa_thermal_speed::test_mean1",
"plasmapy/physics/tests/test_quantum.py::test_Wigner_Seitz_radius",
"plasmapy/physics/tests/test_quantum.py::Test_chemical_potential_interp::test_fail1",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_errors_non_default[value0-units0-False-False-True-ValueError]",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_errors_non_default[value1-units1-True-False-False-ValueError]",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_errors_default[value0-units0-TypeError]",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_errors_default[value1-5-TypeError]",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_errors_default[value2-units2-TypeError]",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_errors_default[value3-units3-TypeError]",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_errors_default[value4-units4-TypeError]",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_errors_default[value5-units5-UnitConversionError]",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_errors_default[value6-units6-ValueError]",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_non_default[value0-units0-True-True-True]",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_default[value0-units0]",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_default[value1-units1]",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_default[value2-units2]",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_default[value3-units3]",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_default[value4-units4]",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_default[value5-units5]",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_default[value6-units6]",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_default[value7-units7]",
"plasmapy/utils/tests/test_checks.py::test__check_quantity_default[value8-units8]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_errors_default[value0-units0-TypeError]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_errors_default[value1-5-TypeError]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_errors_default[value2-units2-TypeError]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_errors_default[value3-units3-TypeError]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_errors_default[value4-units4-TypeError]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_errors_default[value5-units5-UnitConversionError]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_errors_default[value6-units6-ValueError]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_errors_non_default[value0-units0-False-False-True-ValueError]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_errors_non_default[value1-units1-True-False-False-ValueError]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_default[value0-units0]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_default[value1-units1]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_default[value2-units2]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_default[value3-units3]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_default[value4-units4]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_default[value5-units5]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_default[value6-units6]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_default[value7-units7]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_default[value8-units8]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_non_default[value0-units0-True-True-True]",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_missing_validated_params",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_two_args_default",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_two_args_not_default",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_two_args_one_kwargs_default",
"plasmapy/utils/tests/test_checks.py::test_check_quantity_decorator_two_args_one_kwargs_not_default",
"plasmapy/utils/tests/test_checks.py::test__check_relativisitc_valid[speed0-0.1]",
"plasmapy/utils/tests/test_checks.py::test__check_relativisitc_valid[speed1-0.1]",
"plasmapy/utils/tests/test_checks.py::test__check_relativisitc_valid[speed2-0.1]",
"plasmapy/utils/tests/test_checks.py::test__check_relativisitc_valid[speed3-0.1]",
"plasmapy/utils/tests/test_checks.py::test__check_relativistic_errors[speed0-0.1-TypeError]",
"plasmapy/utils/tests/test_checks.py::test__check_relativistic_errors[51513.35-0.1-TypeError]",
"plasmapy/utils/tests/test_checks.py::test__check_relativistic_errors[speed2-0.1-UnitConversionError]",
"plasmapy/utils/tests/test_checks.py::test__check_relativistic_errors[speed3-0.1-ValueError]",
"plasmapy/utils/tests/test_checks.py::test__check_relativistic_errors[speed4-0.1-RelativityError]",
"plasmapy/utils/tests/test_checks.py::test__check_relativistic_errors[speed5-0.1-RelativityError]",
"plasmapy/utils/tests/test_checks.py::test__check_relativistic_errors[speed6-0.1-RelativityError]",
"plasmapy/utils/tests/test_checks.py::test__check_relativistic_errors[speed7-0.1-RelativityError]",
"plasmapy/utils/tests/test_checks.py::test__check_relativistic_errors[speed8-0.1-RelativityError]",
"plasmapy/utils/tests/test_checks.py::test__check_relativistic_errors[speed9-0.1-RelativityError]",
"plasmapy/utils/tests/test_checks.py::test__check_relativistic_warnings[speed0-0.1]",
"plasmapy/utils/tests/test_checks.py::test__check_relativistic_warnings[speed1-0.1]",
"plasmapy/utils/tests/test_checks.py::test__check_relativistic_warnings[speed2-0.1]",
"plasmapy/utils/tests/test_checks.py::test__check_relativistic_warnings[speed3-0.01]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator[speed0-0.1]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator[speed1-0.1]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator[speed2-0.1]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator[speed3-0.1]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator_no_args[speed0]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator_no_args[speed1]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator_no_args[speed2]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator_no_args[speed3]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator_no_args_parentheses[speed0]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator_no_args_parentheses[speed1]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator_no_args_parentheses[speed2]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator_no_args_parentheses[speed3]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator_errors[speed0-0.1-TypeError]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator_errors[51513.35-0.1-TypeError]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator_errors[speed2-0.1-UnitConversionError]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator_errors[speed3-0.1-ValueError]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator_errors[speed4-0.1-RelativityError]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator_errors[speed5-0.1-RelativityError]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator_errors[speed6-0.1-RelativityError]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator_errors[speed7-0.1-RelativityError]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator_errors[speed8-0.1-RelativityError]",
"plasmapy/utils/tests/test_checks.py::test_check_relativistic_decorator_errors[speed9-0.1-RelativityError]"
]
| []
| BSD 3-Clause "New" or "Revised" License | 2,360 | [
"plasmapy/physics/transport/collisions.py",
"plasmapy/physics/quantum.py",
"plasmapy/utils/checks.py",
"plasmapy/physics/parameters.py",
"plasmapy/physics/relativity.py"
]
| [
"plasmapy/physics/transport/collisions.py",
"plasmapy/physics/quantum.py",
"plasmapy/utils/checks.py",
"plasmapy/physics/parameters.py",
"plasmapy/physics/relativity.py"
]
|
|
elastic__rally-454 | 7a0e577136d44360d61b3b91a0dc00ecc659f016 | 2018-04-04 10:58:30 | a5408e0d0d07b271b509df8057a7c73303604c10 | diff --git a/docs/track.rst b/docs/track.rst
index ec849cb1..4fcfefb1 100644
--- a/docs/track.rst
+++ b/docs/track.rst
@@ -308,9 +308,10 @@ bulk
With the operation type ``bulk`` you can execute `bulk requests <http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html>`_. It supports the following properties:
* ``bulk-size`` (mandatory): Defines the bulk size in number of documents.
+* ``ingest-percentage`` (optional, defaults to 100): A number between (0, 100] that defines how much of the document corpus will be bulk-indexed.
* ``corpora`` (optional): A list of document corpus names that should be targeted by this bulk-index operation. Only needed if the ``corpora`` section contains more than one document corpus and you don't want to index all of them with this operation.
* ``indices`` (optional): A list of index names that defines which indices should be used by this bulk-index operation. Rally will then only select the documents files that have a matching ``target-index`` specified.
-* ``batch-size`` (optional): Defines how many documents Rally will read at once. This is an expert setting and only meant to avoid accidental bottlenecks for very small bulk sizes (e.g. if you want to benchmark with a bulk-size of 1, you should set batch-size higher).
+* ``batch-size`` (optional): Defines how many documents Rally will read at once. This is an expert setting and only meant to avoid accidental bottlenecks for very small bulk sizes (e.g. if you want to benchmark with a bulk-size of 1, you should set ``batch-size`` higher).
* ``pipeline`` (optional): Defines the name of an (existing) ingest pipeline that should be used (only supported from Elasticsearch 5.0).
* ``conflicts`` (optional): Type of index conflicts to simulate. If not specified, no conflicts will be simulated. Valid values are: 'sequential' (A document id is replaced with a document id with a sequentially increasing id), 'random' (A document id is replaced with a document id with a random other id).
diff --git a/esrally/track/params.py b/esrally/track/params.py
index e9838253..48ef30eb 100644
--- a/esrally/track/params.py
+++ b/esrally/track/params.py
@@ -1,6 +1,7 @@
import logging
import random
import time
+import math
import types
import inspect
from enum import Enum
@@ -482,6 +483,14 @@ class BulkIndexParamSource(ParamSource):
except ValueError:
raise exceptions.InvalidSyntax("'batch-size' must be numeric")
+ try:
+ self.ingest_percentage = float(params.get("ingest-percentage", 100.0))
+ if self.ingest_percentage <= 0 or self.ingest_percentage > 100.0:
+ raise exceptions.InvalidSyntax(
+ "'ingest-percentage' must be in the range (0.0, 100.0] but was {:.1f}".format(self.ingest_percentage))
+ except ValueError:
+ raise exceptions.InvalidSyntax("'ingest-percentage' must be numeric")
+
def used_corpora(self, t, params):
corpora = []
track_corpora_names = [corpus.name for corpus in t.corpora]
@@ -504,7 +513,7 @@ class BulkIndexParamSource(ParamSource):
def partition(self, partition_index, total_partitions):
return PartitionBulkIndexParamSource(self.corpora, partition_index, total_partitions, self.batch_size, self.bulk_size,
- self.id_conflicts, self.pipeline, self._params)
+ self.ingest_percentage, self.id_conflicts, self.pipeline, self._params)
def params(self):
raise exceptions.RallyError("Do not use a BulkIndexParamSource without partitioning")
@@ -514,7 +523,7 @@ class BulkIndexParamSource(ParamSource):
class PartitionBulkIndexParamSource:
- def __init__(self, corpora, partition_index, total_partitions, batch_size, bulk_size, id_conflicts=None,
+ def __init__(self, corpora, partition_index, total_partitions, batch_size, bulk_size, ingest_percentage, id_conflicts=None,
pipeline=None, original_params=None):
"""
@@ -523,14 +532,17 @@ class PartitionBulkIndexParamSource:
:param total_partitions: The total number of partitions (i.e. clients) for bulk index operations.
:param batch_size: The number of documents to read in one go.
:param bulk_size: The size of bulk index operations (number of documents per bulk).
+ :param ingest_percentage: A number between (0.0, 100.0] that defines how much of the whole corpus should be ingested.
:param id_conflicts: The type of id conflicts.
:param pipeline: The name of the ingest pipeline to run.
+ :param original_params: The original dict passed to the parent parameter source.
"""
self.corpora = corpora
self.partition_index = partition_index
self.total_partitions = total_partitions
self.batch_size = batch_size
self.bulk_size = bulk_size
+ self.ingest_percentage = ingest_percentage
self.id_conflicts = id_conflicts
self.pipeline = pipeline
self.internal_params = bulk_data_based(total_partitions, partition_index, corpora, batch_size,
@@ -543,7 +555,8 @@ class PartitionBulkIndexParamSource:
return next(self.internal_params)
def size(self):
- return number_of_bulks(self.corpora, self.partition_index, self.total_partitions, self.bulk_size)
+ all_bulks = number_of_bulks(self.corpora, self.partition_index, self.total_partitions, self.bulk_size)
+ return math.ceil((all_bulks * self.ingest_percentage) / 100)
def number_of_bulks(corpora, partition_index, total_partitions, bulk_size):
| Allow to ingest only a subset of the document corpus
For benchmarking smaller configurations (e.g. with very little memory), ingesting a full document corpus with tens of GB may be too much. For these cases we should allow users to provide a track parameter which allows them to define a percentage of the document corpus to ingest.
Notes:
* Actually, this is already possible at the moment by forcing a specific number of iterations for the bulk operation. However, this is a bit hard to understand because the number of iterations is also tied to the bulk size.
* This is not meant to be accurate down to document level but only on a granularity of individual bulk requests (e.g. suppose we have a corpus with 1 million documents and a bulk size of 10.000 documents. If a user specifies 2.5% (= 25.000 documents) this will correspond to three bulk requests (i.e. 30.000 documents)). | elastic/rally | diff --git a/tests/track/params_test.py b/tests/track/params_test.py
index 13794565..c6b9d8f2 100644
--- a/tests/track/params_test.py
+++ b/tests/track/params_test.py
@@ -475,11 +475,39 @@ class BulkIndexParamSourceTests(TestCase):
self.assertEqual("Unknown 'conflicts' setting [crazy]", ctx.exception.args[0])
+ def test_create_with_ingest_percentage_too_low(self):
+ with self.assertRaises(exceptions.InvalidSyntax) as ctx:
+ params.BulkIndexParamSource(track=track.Track(name="unit-test"), params={
+ "bulk-size": 5000,
+ "ingest-percentage": 0.0
+ })
+
+ self.assertEqual("'ingest-percentage' must be in the range (0.0, 100.0] but was 0.0", ctx.exception.args[0])
+
+ def test_create_with_ingest_percentage_too_high(self):
+ with self.assertRaises(exceptions.InvalidSyntax) as ctx:
+ params.BulkIndexParamSource(track=track.Track(name="unit-test"), params={
+ "bulk-size": 5000,
+ "ingest-percentage": 100.1
+ })
+
+ self.assertEqual("'ingest-percentage' must be in the range (0.0, 100.0] but was 100.1", ctx.exception.args[0])
+
+ def test_create_with_ingest_percentage_not_numeric(self):
+ with self.assertRaises(exceptions.InvalidSyntax) as ctx:
+ params.BulkIndexParamSource(track=track.Track(name="unit-test"), params={
+ "bulk-size": 5000,
+ "ingest-percentage": "100 percent"
+ })
+
+ self.assertEqual("'ingest-percentage' must be numeric", ctx.exception.args[0])
+
def test_create_valid_param_source(self):
self.assertIsNotNone(params.BulkIndexParamSource(track.Track(name="unit-test"), params={
"conflicts": "random",
"bulk-size": 5000,
"batch-size": 20000,
+ "ingest-percentage": 20.5,
"pipeline": "test-pipeline"
}))
@@ -565,6 +593,63 @@ class BulkIndexParamSourceTests(TestCase):
self.assertEqual("The provided corpus ['does_not_exist'] does not match any of the corpora ['default'].", ctx.exception.args[0])
+ def test_ingests_all_documents_by_default(self):
+ corpora = [
+ track.DocumentCorpus(name="default", documents=[
+ track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
+ number_of_documents=300000,
+ target_index="test-idx",
+ target_type="test-type"
+ )
+ ]),
+ track.DocumentCorpus(name="special", documents=[
+ track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
+ number_of_documents=700000,
+ target_index="test-idx2",
+ target_type="type"
+ )
+ ]),
+ ]
+
+ source = params.BulkIndexParamSource(
+ track=track.Track(name="unit-test", corpora=corpora),
+ params={
+ "bulk-size": 10000
+ })
+
+ partition = source.partition(0, 1)
+ # # no ingest-percentage specified, should issue all one hundred bulk requests
+ self.assertEqual(100, partition.size())
+
+ def test_restricts_number_of_bulks_if_required(self):
+ corpora = [
+ track.DocumentCorpus(name="default", documents=[
+ track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
+ number_of_documents=300000,
+ target_index="test-idx",
+ target_type="test-type"
+ )
+ ]),
+ track.DocumentCorpus(name="special", documents=[
+ track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK,
+ number_of_documents=700000,
+ target_index="test-idx2",
+ target_type="type"
+ )
+ ]),
+ ]
+
+ source = params.BulkIndexParamSource(
+ track=track.Track(name="unit-test", corpora=corpora),
+ params={
+ "bulk-size": 10000,
+ "ingest-percentage": 2.5
+ })
+
+ partition = source.partition(0, 1)
+ # should issue three bulks of size 10.000
+ self.assertEqual(3, partition.size())
+
class BulkDataGeneratorTests(TestCase):
class TestBulkReader:
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 2
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-benchmark"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc python3-pip python3-dev"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
elasticsearch==6.0.0
-e git+https://github.com/elastic/rally.git@7a0e577136d44360d61b3b91a0dc00ecc659f016#egg=esrally
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==2.9.5
jsonschema==2.5.1
MarkupSafe==2.0.1
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==5.4.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
py-cpuinfo==3.2.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-benchmark==3.4.1
tabulate==0.8.1
thespian==3.9.2
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.22
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: rally
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- elasticsearch==6.0.0
- jinja2==2.9.5
- jsonschema==2.5.1
- markupsafe==2.0.1
- psutil==5.4.0
- py-cpuinfo==3.2.0
- pytest-benchmark==3.4.1
- tabulate==0.8.1
- thespian==3.9.2
- urllib3==1.22
prefix: /opt/conda/envs/rally
| [
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_with_ingest_percentage_not_numeric",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_with_ingest_percentage_too_high",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_with_ingest_percentage_too_low",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_restricts_number_of_bulks_if_required"
]
| []
| [
"tests/track/params_test.py::SliceTests::test_slice_with_slice_larger_than_source",
"tests/track/params_test.py::SliceTests::test_slice_with_source_larger_than_slice",
"tests/track/params_test.py::ConflictingIdsBuilderTests::test_no_id_conflicts",
"tests/track/params_test.py::ConflictingIdsBuilderTests::test_random_conflicts",
"tests/track/params_test.py::ConflictingIdsBuilderTests::test_sequential_conflicts",
"tests/track/params_test.py::ActionMetaDataTests::test_generate_action_meta_data_with_id_conflicts",
"tests/track/params_test.py::ActionMetaDataTests::test_generate_action_meta_data_without_id_conflicts",
"tests/track/params_test.py::ActionMetaDataTests::test_source_file_action_meta_data",
"tests/track/params_test.py::IndexDataReaderTests::test_read_bulk_larger_than_number_of_docs",
"tests/track/params_test.py::IndexDataReaderTests::test_read_bulk_smaller_than_number_of_docs",
"tests/track/params_test.py::IndexDataReaderTests::test_read_bulk_smaller_than_number_of_docs_and_multiple_clients",
"tests/track/params_test.py::IndexDataReaderTests::test_read_bulk_with_offset",
"tests/track/params_test.py::IndexDataReaderTests::test_read_bulks_and_assume_metadata_line_in_source_file",
"tests/track/params_test.py::InvocationGeneratorTests::test_build_conflicting_ids",
"tests/track/params_test.py::InvocationGeneratorTests::test_calculate_bounds",
"tests/track/params_test.py::InvocationGeneratorTests::test_calculate_non_multiple_bounds",
"tests/track/params_test.py::InvocationGeneratorTests::test_calculate_number_of_bulks",
"tests/track/params_test.py::InvocationGeneratorTests::test_iterator_chaining_respects_context_manager",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_valid_param_source",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_with_fraction_larger_batch_size",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_with_fraction_smaller_batch_size",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_with_metadata_in_source_file_but_conflicts",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_with_negative_bulk_size",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_with_non_numeric_bulk_size",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_with_unknown_id_conflicts",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_without_params",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_filters_corpora",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_ingests_all_documents_by_default",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_passes_all_corpora_by_default",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_raises_exception_if_no_corpus_matches",
"tests/track/params_test.py::BulkDataGeneratorTests::test_generate_bulks_from_multiple_corpora",
"tests/track/params_test.py::BulkDataGeneratorTests::test_generate_two_bulks",
"tests/track/params_test.py::BulkDataGeneratorTests::test_internal_params_take_precedence",
"tests/track/params_test.py::ParamsRegistrationTests::test_can_register_class_as_param_source",
"tests/track/params_test.py::ParamsRegistrationTests::test_can_register_function_as_param_source",
"tests/track/params_test.py::ParamsRegistrationTests::test_can_register_legacy_class_as_param_source",
"tests/track/params_test.py::ParamsRegistrationTests::test_can_register_legacy_function_as_param_source",
"tests/track/params_test.py::CreateIndexParamSourceTests::test_create_index_from_track_with_settings",
"tests/track/params_test.py::CreateIndexParamSourceTests::test_create_index_from_track_without_settings",
"tests/track/params_test.py::CreateIndexParamSourceTests::test_create_index_inline_with_body",
"tests/track/params_test.py::CreateIndexParamSourceTests::test_create_index_inline_without_body",
"tests/track/params_test.py::CreateIndexParamSourceTests::test_filter_index",
"tests/track/params_test.py::DeleteIndexParamSourceTests::test_delete_index_by_name",
"tests/track/params_test.py::DeleteIndexParamSourceTests::test_delete_index_from_track",
"tests/track/params_test.py::DeleteIndexParamSourceTests::test_delete_no_index",
"tests/track/params_test.py::DeleteIndexParamSourceTests::test_filter_index_from_track",
"tests/track/params_test.py::CreateIndexTemplateParamSourceTests::test_create_index_template_from_track",
"tests/track/params_test.py::CreateIndexTemplateParamSourceTests::test_create_index_template_inline",
"tests/track/params_test.py::DeleteIndexTemplateParamSourceTests::test_delete_index_template_by_name",
"tests/track/params_test.py::DeleteIndexTemplateParamSourceTests::test_delete_index_template_by_name_and_matching_indices",
"tests/track/params_test.py::DeleteIndexTemplateParamSourceTests::test_delete_index_template_by_name_and_matching_indices_missing_index_pattern",
"tests/track/params_test.py::DeleteIndexTemplateParamSourceTests::test_delete_index_template_from_track",
"tests/track/params_test.py::SearchParamSourceTests::test_passes_request_parameters",
"tests/track/params_test.py::SearchParamSourceTests::test_replaces_body_params"
]
| []
| Apache License 2.0 | 2,361 | [
"docs/track.rst",
"esrally/track/params.py"
]
| [
"docs/track.rst",
"esrally/track/params.py"
]
|
|
aio-libs__aiosmtpd-143 | 23b743be942ffd37f9c051c1f6efe0108ffd456d | 2018-04-04 20:24:59 | 23b743be942ffd37f9c051c1f6efe0108ffd456d | diff --git a/.travis.yml b/.travis.yml
index 6f1e947..8fb569e 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -12,3 +12,5 @@ matrix:
script:
- tox -e $INTERP-nocov,$INTERP-cov,qa,docs
- 'if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then tox -e $INTERP-diffcov; fi'
+before_script:
+ - echo 0 | sudo tee /proc/sys/net/ipv6/conf/all/disable_ipv6
diff --git a/aiosmtpd/smtp.py b/aiosmtpd/smtp.py
index 681732a..24661a6 100644
--- a/aiosmtpd/smtp.py
+++ b/aiosmtpd/smtp.py
@@ -234,10 +234,20 @@ class SMTP(asyncio.StreamReaderProtocol):
# re-encoded back to the original bytes when the SMTP command
# is handled.
if i < 0:
- command = line.upper().decode(encoding='ascii')
+ try:
+ command = line.upper().decode(encoding='ascii')
+ except UnicodeDecodeError:
+ await self.push('500 Error: bad syntax')
+ continue
+
arg = None
else:
- command = line[:i].upper().decode(encoding='ascii')
+ try:
+ command = line[:i].upper().decode(encoding='ascii')
+ except UnicodeDecodeError:
+ await self.push('500 Error: bad syntax')
+ continue
+
arg = line[i+1:].strip()
# Remote SMTP servers can send us UTF-8 content despite
# whether they've declared to do so or not. Some old
| Exception when parsing binary data
aiosmtpd will try to ASCII decode binary data as SMTP commands, raising a `UnicodeDecodeError`. A simple test case that triggers this is shown at the end of this report. This can be caught by a generic exception handler, however it would be better to do this inside the library.
This can be trivially fixed with a `try`/`except` handler. PR incoming.
``` python
from base64 import b64decode
import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 8025
BUFFER_SIZE = 1024
RAW = "gUMBAwMBGgAAACAAwDAAwCwAwCgAwCQAwBQAwAoAwCIAwCEAAKMAAJ8AAGsAAGoAADkAADgAAIgAAIcAwBkAwCAAwDIAwC4AwCoAwCYAwA8AwAUAAJ0AAD0AADUAAIQAwBIAwAgAwBwAwBsAABYAABMAwBcAwBoAwA0AwAMAAAoHAMAAwC8AwCsAwCcAwCMAwBMAwAkAwB8AwB4AAKIAAJ4AAGcAAEAAADMAADIAAJoAAJkAAEUAAEQAwBgAwB0AwDEAwC0AwCkAwCUAwA4AwAQAAJwAADwAAC8AAJYAAEEAAAcAwBEAwAcAwBYAwAwAwAIAAAUAAAQFAIADAIABAIAAABUAABIAAAkGAEAAABQAABEAAAgAAAYAAAMEAIACAIAAAP8cXF6WB1DBTAUUZfksmYhwy/mtOvciiLZb+ZNMaF/tYg=="
MESSAGE = b64decode(RAW)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
data = s.recv(BUFFER_SIZE)
print("Received: {}".format(data))
s.send(MESSAGE)
data = s.recv(BUFFER_SIZE)
print("Received: {}".format(data))
s.close()
``` | aio-libs/aiosmtpd | diff --git a/aiosmtpd/tests/test_smtp.py b/aiosmtpd/tests/test_smtp.py
index be1da96..975f252 100644
--- a/aiosmtpd/tests/test_smtp.py
+++ b/aiosmtpd/tests/test_smtp.py
@@ -197,6 +197,20 @@ class TestSMTP(unittest.TestCase):
self.addCleanup(controller.stop)
self.address = (controller.hostname, controller.port)
+ def test_binary(self):
+ with SMTP(*self.address) as client:
+ client.sock.send(b"\x80FAIL\r\n")
+ code, response = client.getreply()
+ self.assertEqual(code, 500)
+ self.assertEqual(response, b'Error: bad syntax')
+
+ def test_binary_space(self):
+ with SMTP(*self.address) as client:
+ client.sock.send(b"\x80 FAIL\r\n")
+ code, response = client.getreply()
+ self.assertEqual(code, 500)
+ self.assertEqual(response, b'Error: bad syntax')
+
def test_helo(self):
with SMTP(*self.address) as client:
code, response = client.helo('example.com')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 2
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose2",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/aio-libs/aiosmtpd.git@23b743be942ffd37f9c051c1f6efe0108ffd456d#egg=aiosmtpd
atpublic==5.1
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
nose2==0.15.1
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: aiosmtpd
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- atpublic==5.1
- nose2==0.15.1
prefix: /opt/conda/envs/aiosmtpd
| [
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_binary",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_binary_space"
]
| []
| [
"aiosmtpd/tests/test_smtp.py::TestProtocol::test_empty_email",
"aiosmtpd/tests/test_smtp.py::TestProtocol::test_honors_mail_delimeters",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_data_invalid_params",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_data_no_helo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_data_no_rcpt",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_ehlo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_ehlo_duplicate",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_ehlo_no_hostname",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_ehlo_then_helo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_empty_command",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_expn",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_helo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_helo_duplicate",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_helo_no_hostname",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_helo_then_ehlo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_bad_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_data",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_ehlo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_helo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_mail",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_mail_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_noop",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_quit",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_rcpt",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_rcpt_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_rset",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_vrfy",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_fail_parse_email",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_from_malformed",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_from_twice",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_malformed_params_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_missing_params_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_no_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_no_from",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_no_helo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_params_bad_syntax_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_params_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_params_no_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_unrecognized_params_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_noop",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_noop_with_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_quit",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_quit_with_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_fail_parse_email",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_no_address",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_no_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_no_arg_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_no_helo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_no_mail",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_no_to",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_with_bad_params",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_with_params_no_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_with_unknown_params",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rset",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rset_with_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_too_long_command",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_unknown_command",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_vrfy",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_vrfy_no_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_vrfy_not_an_address",
"aiosmtpd/tests/test_smtp.py::TestResetCommands::test_ehlo",
"aiosmtpd/tests/test_smtp.py::TestResetCommands::test_helo",
"aiosmtpd/tests/test_smtp.py::TestResetCommands::test_rset",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_bad_encodings",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_dots_escaped",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_esmtp_no_size_limit",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_exception_handler_exception",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_exception_handler_undescribable",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_mail_invalid_body",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_mail_with_compatible_smtputf8",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_mail_with_incompatible_smtputf8",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_mail_with_size_too_large",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_mail_with_unrequited_smtputf8",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_process_message_error",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_too_long_message_body",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_unexpected_errors",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_unexpected_errors_custom_response",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_unexpected_errors_unhandled",
"aiosmtpd/tests/test_smtp.py::TestCustomizations::test_custom_greeting",
"aiosmtpd/tests/test_smtp.py::TestCustomizations::test_custom_hostname",
"aiosmtpd/tests/test_smtp.py::TestCustomizations::test_default_greeting",
"aiosmtpd/tests/test_smtp.py::TestCustomizations::test_mail_invalid_body_param",
"aiosmtpd/tests/test_smtp.py::TestClientCrash::test_close_in_command",
"aiosmtpd/tests/test_smtp.py::TestClientCrash::test_close_in_data",
"aiosmtpd/tests/test_smtp.py::TestClientCrash::test_connection_reset_during_DATA",
"aiosmtpd/tests/test_smtp.py::TestClientCrash::test_connection_reset_during_command",
"aiosmtpd/tests/test_smtp.py::TestStrictASCII::test_bad_encoded_param",
"aiosmtpd/tests/test_smtp.py::TestStrictASCII::test_data",
"aiosmtpd/tests/test_smtp.py::TestStrictASCII::test_ehlo",
"aiosmtpd/tests/test_smtp.py::TestStrictASCII::test_mail_param",
"aiosmtpd/tests/test_smtp.py::TestSleepingHandler::test_close_after_helo"
]
| []
| Apache License 2.0 | 2,363 | [
".travis.yml",
"aiosmtpd/smtp.py"
]
| [
".travis.yml",
"aiosmtpd/smtp.py"
]
|
|
jmwri__simplejwt-2 | e3edef90eda15e3f4c23bce0465b2def14868d30 | 2018-04-04 20:49:38 | e3edef90eda15e3f4c23bce0465b2def14868d30 | diff --git a/README.md b/README.md
index cc0b04c..2b7b764 100644
--- a/README.md
+++ b/README.md
@@ -23,6 +23,30 @@ token = encode('secret', {'my_payload': 'some_data'}, 'HS256')
| `payload` | `dict` | *N/A* | The payload data contained within the token. |
| `alg` | `int` | `HS256` | The algorithm to use to create the token. |
+## Make
+Returns a new token. This function has arguments for registered claims as specified in [rfc7519](https://tools.ietf.org/html/rfc7519#section-4.1).
+
+Any registered claims provided in the payload will take precedence over any provided as arguments.
+
+```
+from simplejwt import make
+token = make('secret', {'my_payload': 'some_data'}, 'HS256', issuer='acme', valid_to=1234567)
+# eyJ0eXBlIjogIkpXVCIsICJhbGciOiAiSFMyNTYifQ.eyJteV9wYXlsb2FkIjogInNvbWVfZGF0YSIsICJpc3MiOiAiYWNtZSIsICJleHAiOiAxMjM0NTY3fQ.Nr5IADzsOhlzjxnghquBrRwewg10srDHu__-HN7GGGA
+```
+
+| Name | Type | Default | Description |
+| --- | --- | --- | --- |
+| `secret` | `str` | *N/A* | The secret used to create the token. |
+| `payload` | `dict` | *N/A* | The payload data contained within the token. |
+| `alg` | `int` | `HS256` | The algorithm to use to create the token. |
+| `issuer` | `str` | `None` | The issuer of the token. |
+| `subject` | `str` | `None` | The subject of the token. |
+| `audience` | `str` | `None` | The audience of the token. |
+| `valid_to` | `int` | `None` | The expiry date of the token as a timestamp. |
+| `valid_from` | `int` | `None` | The date the token is valid from as a timestamp. |
+| `issued_at` | `int` | `None` | The date the token was issued as a timestamp. |
+| `id` | `str` | `None` | The id of the token. |
+
## Decode
Returns the payload from a token.
diff --git a/simplejwt/__init__.py b/simplejwt/__init__.py
index 774ddc8..1fa6839 100644
--- a/simplejwt/__init__.py
+++ b/simplejwt/__init__.py
@@ -1,1 +1,1 @@
-from simplejwt.jwt import encode, decode
+from simplejwt.jwt import encode, make, decode
diff --git a/simplejwt/jwt.py b/simplejwt/jwt.py
index 5645302..c776ec4 100644
--- a/simplejwt/jwt.py
+++ b/simplejwt/jwt.py
@@ -26,6 +26,28 @@ def _hash(secret: bytes, data: bytes, alg: str) -> bytes:
.digest()
+def make(secret: Union[str, bytes], payload: dict, alg='HS256',
+ issuer: str = None, subject: str = None, audience: str = None,
+ valid_to: int = None, valid_from: int = None, issued_at: int = None,
+ id: str = None):
+ new_payload = payload.copy()
+ if issuer and 'iss' not in new_payload:
+ new_payload['iss'] = issuer
+ if subject and 'sub' not in new_payload:
+ new_payload['sub'] = subject
+ if audience and 'aud' not in new_payload:
+ new_payload['aud'] = audience
+ if valid_to and 'exp' not in new_payload:
+ new_payload['exp'] = valid_to
+ if valid_from and 'nbf' not in new_payload:
+ new_payload['nbf'] = valid_from
+ if issued_at and 'iat' not in new_payload:
+ new_payload['iat'] = issued_at
+ if id and 'jti' not in new_payload:
+ new_payload['jti'] = id
+ return encode(secret, new_payload, alg)
+
+
def encode(secret: Union[str, bytes], payload: dict, alg='HS256'):
secret = util.to_bytes(secret)
| Add support for registered claims
There are a list of registered claims in [rfc7519](https://tools.ietf.org/html/rfc7519#section-4.1).
Simplejwt should support these out of the box. | jmwri/simplejwt | diff --git a/tests/test_jwt.py b/tests/test_jwt.py
index ebb62f9..cacda88 100644
--- a/tests/test_jwt.py
+++ b/tests/test_jwt.py
@@ -25,6 +25,26 @@ test_token_data = {
}
}
+registered_claims = {
+ 'issuer': 'iss',
+ 'subject': 'sub',
+ 'audience': 'aud',
+ 'valid_to': 'exp',
+ 'valid_from': 'nbf',
+ 'issued_at': 'iat',
+ 'id': 'jti',
+}
+
+test_registered_claims = {
+ 'issuer': 'test_issuer',
+ 'subject': 'test_subject',
+ 'audience': 'test_audience',
+ 'valid_to': 789,
+ 'valid_from': 456,
+ 'issued_at': 123,
+ 'id': 'test_id',
+}
+
def test_get_algorithm_hs256():
assert jwt.get_algorithm('HS256') is hashlib.sha256
@@ -63,6 +83,31 @@ def test_encode():
) == token
+def test_make_claims():
+ for name, abb in registered_claims.items():
+ args = {
+ 'secret': test_token_data['secret'],
+ 'payload': test_token_data['payload'],
+ name: test_registered_claims[name]
+ }
+ token = jwt.make(**args)
+ payload = jwt.decode(test_token_data['secret'], token)
+ assert payload[abb] == test_registered_claims[name]
+
+
+def test_make_precedence():
+ token = jwt.make(test_token_data['secret'], {'iss': 'usr_defined_iss'},
+ issuer='my_iss')
+ payload = jwt.decode(test_token_data['secret'], token)
+ assert payload['iss'] == 'usr_defined_iss'
+
+
+def test_make_leaves_payload_unmodified():
+ payload = {'my': 'payload'}
+ jwt.make(test_token_data['secret'], payload, issuer='my_iss')
+ assert payload == {'my': 'payload'}
+
+
def test_decode():
for alg, token in test_tokens.items():
assert jwt.decode(
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 3
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cachetools==5.5.2
chardet==5.2.0
colorama==0.4.6
coverage==7.8.0
distlib==0.3.9
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
filelock==3.18.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pyproject-api==1.9.0
pytest @ file:///croot/pytest_1738938843180/work
-e git+https://github.com/jmwri/simplejwt.git@e3edef90eda15e3f4c23bce0465b2def14868d30#egg=simplejwt
tomli==2.2.1
tox==4.25.0
typing==3.7.4.3
typing_extensions==4.13.0
virtualenv==20.29.3
| name: simplejwt
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cachetools==5.5.2
- chardet==5.2.0
- colorama==0.4.6
- coverage==7.8.0
- distlib==0.3.9
- filelock==3.18.0
- platformdirs==4.3.7
- pyproject-api==1.9.0
- tomli==2.2.1
- tox==4.25.0
- typing==3.7.4.3
- typing-extensions==4.13.0
- virtualenv==20.29.3
prefix: /opt/conda/envs/simplejwt
| [
"tests/test_jwt.py::test_make_claims",
"tests/test_jwt.py::test_make_precedence",
"tests/test_jwt.py::test_make_leaves_payload_unmodified"
]
| []
| [
"tests/test_jwt.py::test_get_algorithm_hs256",
"tests/test_jwt.py::test_get_algorithm_hs384",
"tests/test_jwt.py::test_get_algorithm_hs512",
"tests/test_jwt.py::test_get_algorithm_incorrect",
"tests/test_jwt.py::test_cover_all_algorithms",
"tests/test_jwt.py::test_encode",
"tests/test_jwt.py::test_decode",
"tests/test_jwt.py::test_decode_invalid_signature",
"tests/test_jwt.py::test_decode_invalid_payload"
]
| []
| MIT License | 2,364 | [
"simplejwt/__init__.py",
"README.md",
"simplejwt/jwt.py"
]
| [
"simplejwt/__init__.py",
"README.md",
"simplejwt/jwt.py"
]
|
|
conan-io__conan-2717 | 760a071260bc1c3b777356a7e4f9c639a5cd3eaa | 2018-04-05 10:02:34 | 419beea8c76ebf9271c8612339bdb0e5aa376306 | danimtb: My rework on the libcurl library: https://github.com/danimtb/conan-libcurl/pull/1
Some commented code, not deleted as still not tested.
I have not changed the code of unused function to keep the diff readable.
This PR improves the usability in general but the helper is not as easy to use as the cmake one, but also autotools is not as "easy" as cmake is 😆
lasote: I agree, still not an ideal recipe, but it is a very special recipe and with this PR it is improved in the cross build flags and the env vars.
danimtb: Something else:
Original libcurl recipe was doing this
```python
params.append('--prefix=%s' % self.package_folder.replace('\\', '/'))
```
As the helper now takes care of the `--prefix`, is it needed to add the replace in the helper to be safe?
lasote: good catch. Yes, do it.
danimtb: Build failed due to pip issues. Relaunch if possible | diff --git a/conans/client/build/autotools_environment.py b/conans/client/build/autotools_environment.py
index 0106e1cb0..a73d55b7f 100644
--- a/conans/client/build/autotools_environment.py
+++ b/conans/client/build/autotools_environment.py
@@ -12,7 +12,7 @@ from conans.client.build.cppstd_flags import cppstd_flag
from conans.client.tools.oss import OSInfo
from conans.client.tools.win import unix_path
from conans.tools import (environment_append, args_to_string, cpu_count, cross_building,
- detected_architecture)
+ detected_architecture, get_gnu_triplet)
class AutoToolsBuildEnvironment(object):
@@ -57,68 +57,32 @@ class AutoToolsBuildEnvironment(object):
# Not declared by default
self.fpic = None
- def _get_triplet(self, the_arch, the_os):
- """
- machine-vendor-op_system, But vendor can be omitted in practice
- """
-
- # Calculate the arch
- machine = {"x86": "i686" if the_os != "Linux" else "x86",
- "x86_64": "x86_64",
- "armv6": "arm",
- "armv7": "arm",
- "armv7s": "arm",
- "armv7k": "arm",
- "armv7hf": "arm",
- "armv8": "aarch64"}.get(the_arch, None)
- if machine is None:
- self._conanfile.output.warn("Unknown '%s' machine, Conan doesn't know how to "
- "translate it to the GNU triplet, please report at "
- " https://github.com/conan-io/conan/issues" % the_arch)
- return "unknown"
-
- # Calculate the OS
- compiler = self._conanfile.settings.get_safe("compiler")
- if compiler == "gcc":
- windows_op = "w64-mingw32"
- elif compiler == "Visual Studio":
- windows_op = "windows-msvc"
- else:
- windows_op = "windows"
-
- op_system = {"Windows": windows_op,
- "Linux": "linux-gnu",
- "Darwin": "apple-darwin",
- "Android": "linux-android",
- "Macos": "apple-darwin",
- "iOS": "apple-darwin",
- "watchOS": "apple-darwin",
- "tvOS": "apple-darwin"}.get(the_os, the_os.lower())
-
- if the_os in ("Linux", "Android"):
- if "arm" in the_arch and the_arch != "armv8":
- op_system += "eabi"
-
- if the_arch == "armv7hf" and the_os == "Linux":
- op_system += "hf"
-
- return "%s-%s" % (machine, op_system)
+ # Precalculate build, host, target triplets
+ self.build, self.host, self.target = self._get_host_build_target_flags()
- def _get_host_build_target_flags(self, arch_detected, os_detected):
+ def _get_host_build_target_flags(self):
"""Based on google search for build/host triplets, it could need a lot
and complex verification"""
+ arch_detected = detected_architecture() or platform.machine()
+ os_detected = platform.system()
+ arch_settings = self._conanfile.settings.get_safe("arch")
+ os_settings = self._conanfile.settings.get_safe("os")
+ compiler = self._conanfile.settings.get_safe("compiler")
+
+ if (os_detected is None or arch_detected is None or arch_settings is None or
+ os_settings is None):
+ return False, False, False
if not cross_building(self._conanfile.settings, os_detected, arch_detected):
return False, False, False
- build = self._get_triplet(arch_detected, os_detected)
- host = self._get_triplet(self._conanfile.settings.get_safe("arch"),
- self._conanfile.settings.get_safe("os"))
+ build = get_gnu_triplet(os_detected, arch_detected, compiler)
+ host = get_gnu_triplet(os_settings, arch_settings, compiler)
return build, host, None
def configure(self, configure_dir=None, args=None, build=None, host=None, target=None,
- pkg_config_paths=None):
+ pkg_config_paths=None, vars=None):
"""
:param pkg_config_paths: Optional paths to locate the *.pc files
:param configure_dir: Absolute or relative path to the configure script
@@ -141,25 +105,20 @@ class AutoToolsBuildEnvironment(object):
configure_dir = configure_dir.rstrip("/")
else:
configure_dir = "."
- auto_build, auto_host, auto_target = None, None, None
- if build is None or host is None or target is None:
- arch_detected = detected_architecture() or platform.machine()
- os_detected = platform.system()
- flags = self._get_host_build_target_flags(arch_detected, os_detected)
- auto_build, auto_host, auto_target = flags
+
triplet_args = []
if build is not False: # Skipped by user
- if build or auto_build: # User specified value or automatic
- triplet_args.append("--build=%s" % (build or auto_build))
+ if build or self.build: # User specified value or automatic
+ triplet_args.append("--build=%s" % (build or self.build))
if host is not False: # Skipped by user
- if host or auto_host: # User specified value or automatic
- triplet_args.append("--host=%s" % (host or auto_host))
+ if host or self.host: # User specified value or automatic
+ triplet_args.append("--host=%s" % (host or self.host))
if target is not False: # Skipped by user
- if target or auto_target: # User specified value or automatic
- triplet_args.append("--target=%s" % (target or auto_target))
+ if target or self.target: # User specified value or automatic
+ triplet_args.append("--target=%s" % (target or self.target))
if pkg_config_paths:
pkg_env = {"PKG_CONFIG_PATH": os.pathsep.join(pkg_config_paths)}
@@ -169,8 +128,14 @@ class AutoToolsBuildEnvironment(object):
pkg_env = {"PKG_CONFIG_PATH": self._conanfile.build_folder} \
if "pkg_config" in self._conanfile.generators else {}
+ if self._conanfile.package_folder is not None:
+ if not args:
+ args = ["--prefix=%s" % self._conanfile.package_folder.replace("\\", "/")]
+ elif not any(["--prefix=" in arg for arg in args]):
+ args.append("--prefix=%s" % self._conanfile.package_folder.replace("\\", "/"))
+
with environment_append(pkg_env):
- with environment_append(self.vars):
+ with environment_append(vars or self.vars):
configure_dir = self._adjust_path(configure_dir)
command = '%s/configure %s %s' % (configure_dir,
args_to_string(args), " ".join(triplet_args))
@@ -184,17 +149,20 @@ class AutoToolsBuildEnvironment(object):
path = unix_path(path, path_flavor=self.subsystem)
return '"%s"' % path if " " in path else path
- def make(self, args="", make_program=None, target=None):
+ def make(self, args="", make_program=None, target=None, vars=None):
if not self._conanfile.should_build:
return
make_program = os.getenv("CONAN_MAKE_PROGRAM") or make_program or "make"
- with environment_append(self.vars):
+ with environment_append(vars or self.vars):
str_args = args_to_string(args)
cpu_count_option = ("-j%s" % cpu_count()) if "-j" not in str_args else None
self._conanfile.run("%s" % join_arguments([make_program, target, str_args,
cpu_count_option]),
win_bash=self._win_bash, subsystem=self.subsystem)
+ def install(self, args="", make_program=None, vars=None):
+ self.make(args=args, make_program=make_program, target="install", vars=vars)
+
def _configure_link_flags(self):
"""Not the -L"""
ret = copy.copy(self._deps_cpp_info.sharedlinkflags)
@@ -313,7 +281,6 @@ class AutoToolsBuildEnvironment(object):
@property
def vars(self):
-
ld_flags, cpp_flags, libs, cxx_flags, c_flags = self._get_vars()
cpp_flags = " ".join(cpp_flags) + _environ_value_prefix("CPPFLAGS")
@@ -328,7 +295,6 @@ class AutoToolsBuildEnvironment(object):
"LDFLAGS": ldflags.strip(),
"LIBS": libs.strip(),
}
-
return ret
diff --git a/conans/client/tools/oss.py b/conans/client/tools/oss.py
index 014224365..fb9398b74 100644
--- a/conans/client/tools/oss.py
+++ b/conans/client/tools/oss.py
@@ -307,6 +307,60 @@ def get_cross_building_settings(settings, self_os=None, self_arch=None):
return build_os, build_arch, host_os, host_arch
+def get_gnu_triplet(os, arch, compiler=None):
+ """
+ Returns string with <machine>-<vendor>-<op_system> triplet (<vendor> can be omitted in practice)
+
+ :param os: os to be used to create the triplet
+ :param arch: arch to be used to create the triplet
+ :param compiler: compiler used to create the triplet (only needed fo windows)
+ """
+
+ if os == "Windows" and compiler is None:
+ raise ConanException("'compiler' parameter for 'get_gnu_triplet()' is not specified and "
+ "needed for os=Windows")
+
+ # Calculate the arch
+ machine = {"x86": "i686" if os != "Linux" else "x86",
+ "x86_64": "x86_64",
+ "armv6": "arm",
+ "armv7": "arm",
+ "armv7s": "arm",
+ "armv7k": "arm",
+ "armv7hf": "arm",
+ "armv8": "aarch64"}.get(arch, None)
+ if machine is None:
+ raise ConanException("Unknown '%s' machine. Conan doesn't know how to "
+ "translate it to the GNU triplet, please report at "
+ "https://github.com/conan-io/conan/issues" % arch)
+
+ # Calculate the OS
+ if compiler == "gcc":
+ windows_op = "w64-mingw32"
+ elif compiler == "Visual Studio":
+ windows_op = "windows-msvc"
+ else:
+ windows_op = "windows"
+
+ op_system = {"Windows": windows_op,
+ "Linux": "linux-gnu",
+ "Darwin": "apple-darwin",
+ "Android": "linux-android",
+ "Macos": "apple-darwin",
+ "iOS": "apple-darwin",
+ "watchOS": "apple-darwin",
+ "tvOS": "apple-darwin"}.get(os, os.lower())
+
+ if os in ("Linux", "Android"):
+ if "arm" in arch and arch != "armv8":
+ op_system += "eabi"
+
+ if arch == "armv7hf" and os == "Linux":
+ op_system += "hf"
+
+ return "%s-%s" % (machine, op_system)
+
+
try:
os_info = OSInfo()
except Exception as exc:
| AutoToolsBuildEnvironment improvements
1. Should AutoToolsBuildEnvironment.configure adjust --prefix to self.package_folder?
The same way we are doing it with CMake.
Be aware: Could be breaking, opt-in?
2. The "vars" should be a real list (not a dynamic property) precalculated to let the user change it before calling the configure
3. The triplets for cross-building should be also pre-calculated and we should allow to change them. Probably to a "args" list.
In general, if someone doesn't want to use the ".configure()":
```python
env_build = AutoToolsBuildEnvironment(self)
# Here I modify the env_build.vars
# Here I modify the env_buidl.args
with #tools.environment_append(env_build.vars):
self.run("./myconfigure %s" % env_build.args)
```
Note: Inspired by the libcurl recipe, where the above issues make a lot more complex the recipe. | conan-io/conan | diff --git a/conans/test/build_helpers/autotools_configure_test.py b/conans/test/build_helpers/autotools_configure_test.py
index 83eced2eb..f89722174 100644
--- a/conans/test/build_helpers/autotools_configure_test.py
+++ b/conans/test/build_helpers/autotools_configure_test.py
@@ -379,142 +379,14 @@ class AutoToolsConfigureTest(unittest.TestCase):
def test_previous_env(self):
settings = MockSettings({"arch": "x86",
- "os": "Linux"})
+ "os": "Linux",
+ "compiler": "gcc"})
conanfile = MockConanfile(settings)
with tools.environment_append({"CPPFLAGS": "MyCppFlag"}):
be = AutoToolsBuildEnvironment(conanfile)
self.assertEquals(be.vars["CPPFLAGS"], "MyCppFlag")
- def cross_build_flags_test(self):
-
- def get_values(this_os, this_arch, setting_os, setting_arch, compiler=None):
- settings = MockSettings({"arch": setting_arch,
- "os": setting_os,
- "compiler": compiler})
- conanfile = MockConanfile(settings)
- conanfile.settings = settings
- be = AutoToolsBuildEnvironment(conanfile)
- return be._get_host_build_target_flags(this_arch, this_os)
-
- build, host, target = get_values("Linux", "x86_64", "Linux", "armv7hf")
- self.assertEquals(build, "x86_64-linux-gnu")
- self.assertEquals(host, "arm-linux-gnueabihf")
-
- build, host, target = get_values("Linux", "x86", "Linux", "armv7hf")
- self.assertEquals(build, "x86-linux-gnu")
- self.assertEquals(host, "arm-linux-gnueabihf")
-
- build, host, target = get_values("Linux", "x86", "Linux", "x86")
- self.assertFalse(build)
- self.assertFalse(host)
- self.assertFalse(target)
-
- build, host, target = get_values("Linux", "x86_64", "Linux", "x86_64")
- self.assertFalse(build)
- self.assertFalse(host)
- self.assertFalse(target)
-
- build, host, target = get_values("Linux", "x86_64", "Linux", "x86")
- self.assertEquals(build, "x86_64-linux-gnu")
- self.assertEquals(host, "x86-linux-gnu")
- self.assertFalse(target)
-
- build, host, target = get_values("Linux", "x86_64", "Windows", "x86", compiler="gcc")
- self.assertEquals(build, "x86_64-linux-gnu")
- self.assertEquals(host, "i686-w64-mingw32")
- self.assertFalse(target)
-
- build, host, target = get_values("Linux", "x86_64", "Windows", "x86", compiler="Visual Studio")
- self.assertEquals(build, "x86_64-linux-gnu")
- self.assertEquals(host, "i686-windows-msvc") # Not very common but exists sometimes
- self.assertFalse(target)
-
- build, host, target = get_values("Linux", "x86_64", "Linux", "armv7hf")
- self.assertEquals(build, "x86_64-linux-gnu")
- self.assertEquals(host, "arm-linux-gnueabihf")
-
- build, host, target = get_values("Linux", "x86_64", "Linux", "armv7")
- self.assertEquals(build, "x86_64-linux-gnu")
- self.assertEquals(host, "arm-linux-gnueabi")
-
- build, host, target = get_values("Linux", "x86_64", "Linux", "armv6")
- self.assertEquals(build, "x86_64-linux-gnu")
- self.assertEquals(host, "arm-linux-gnueabi")
-
- build, host, target = get_values("Linux", "x86_64", "Android", "x86")
- self.assertEquals(build, "x86_64-linux-gnu")
- self.assertEquals(host, "i686-linux-android")
-
- build, host, target = get_values("Linux", "x86_64", "Android", "x86_64")
- self.assertEquals(build, "x86_64-linux-gnu")
- self.assertEquals(host, "x86_64-linux-android")
-
- build, host, target = get_values("Linux", "x86_64", "Android", "armv7")
- self.assertEquals(build, "x86_64-linux-gnu")
- self.assertEquals(host, "arm-linux-androideabi")
-
- build, host, target = get_values("Linux", "x86_64", "Android", "armv7hf")
- self.assertEquals(build, "x86_64-linux-gnu")
- self.assertEquals(host, "arm-linux-androideabi")
-
- build, host, target = get_values("Linux", "x86_64", "Android", "armv8")
- self.assertEquals(build, "x86_64-linux-gnu")
- self.assertEquals(host, "aarch64-linux-android")
-
- build, host, target = get_values("Linux", "x86_64", "Android", "armv6")
- self.assertEquals(build, "x86_64-linux-gnu")
- self.assertEquals(host, "arm-linux-androideabi")
-
- build, host, target = get_values("Linux", "x86_64", "Windows", "x86", compiler="gcc")
- self.assertEquals(build, "x86_64-linux-gnu")
- self.assertEquals(host, "i686-w64-mingw32")
-
- build, host, target = get_values("Linux", "x86_64", "Windows", "x86_64", compiler="gcc")
- self.assertEquals(build, "x86_64-linux-gnu")
- self.assertEquals(host, "x86_64-w64-mingw32")
-
- build, host, target = get_values("Windows", "x86_64", "Windows", "x86_64")
- self.assertFalse(build)
- self.assertFalse(host)
- self.assertFalse(target)
-
- build, host, target = get_values("Windows", "x86", "Windows", "x86")
- self.assertFalse(build)
- self.assertFalse(host)
- self.assertFalse(target)
-
- build, host, target = get_values("Windows", "x86_64", "Windows", "x86", compiler="gcc")
- self.assertEquals(build, "x86_64-w64-mingw32")
- self.assertEquals(host, "i686-w64-mingw32")
- self.assertFalse(target)
-
- build, host, target = get_values("Windows", "x86_64", "Linux", "armv7hf", compiler="gcc")
- self.assertEquals(build, "x86_64-w64-mingw32")
- self.assertEquals(host, "arm-linux-gnueabihf")
- self.assertFalse(target)
-
- build, host, target = get_values("Darwin", "x86_64", "Android", "armv7hf")
-
- self.assertEquals(build, "x86_64-apple-darwin")
- self.assertEquals(host, "arm-linux-androideabi")
-
- build, host, target = get_values("Darwin", "x86_64", "Macos", "x86")
- self.assertEquals(build, "x86_64-apple-darwin")
- self.assertEquals(host, "i686-apple-darwin")
-
- build, host, target = get_values("Darwin", "x86_64", "iOS", "armv7")
- self.assertEquals(build, "x86_64-apple-darwin")
- self.assertEquals(host, "arm-apple-darwin")
-
- build, host, target = get_values("Darwin", "x86_64", "watchOS", "armv7k")
- self.assertEquals(build, "x86_64-apple-darwin")
- self.assertEquals(host, "arm-apple-darwin")
-
- build, host, target = get_values("Darwin", "x86_64", "tvOS", "armv8")
- self.assertEquals(build, "x86_64-apple-darwin")
- self.assertEquals(host, "aarch64-apple-darwin")
-
def test_pkg_config_paths(self):
if platform.system() == "Windows":
return
@@ -552,6 +424,10 @@ class HelloConan(ConanFile):
runner = RunnerMock()
conanfile = MockConanfile(MockSettings({}), None, runner)
ab = AutoToolsBuildEnvironment(conanfile)
+ self.assertFalse(ab.build)
+ self.assertFalse(ab.host)
+ self.assertFalse(ab.target)
+
ab.configure()
self.assertEquals(runner.command_called, "./configure ")
@@ -564,13 +440,106 @@ class HelloConan(ConanFile):
ab.configure(target="i686-apple-darwin")
self.assertEquals(runner.command_called, "./configure --target=i686-apple-darwin")
- def test_make_targets(self):
+ conanfile = MockConanfile(MockSettings({"build_type": "Debug",
+ "arch": "x86_64",
+ "os": "Windows",
+ "compiler": "gcc",
+ "compiler.libcxx": "libstdc++"}),
+ None, runner)
+ ab = AutoToolsBuildEnvironment(conanfile)
+ ab.configure()
+ if platform.system() == "Windows":
+ # Not crossbuilding
+ self.assertFalse(ab.host)
+ self.assertFalse(ab.build)
+ self.assertIn("./configure", runner.command_called)
+ self.assertNotIn("--build=x86_64-w64-mingw32 --host=x86_64-w64-mingw32",
+ runner.command_called)
+ elif platform.system() == "Linux":
+ self.assertIn("x86_64-w64-mingw32", ab.host)
+ self.assertIn("x86_64-linux-gnu", ab.build)
+ self.assertIn("./configure --build=x86_64-linux-gnu --host=x86_64-w64-mingw32",
+ runner.command_called)
+ else:
+ self.assertIn("x86_64-w64-mingw32", ab.host)
+ self.assertIn("x86_64-apple-darwin", ab.build)
+ self.assertIn("./configure --build=x86_64-apple-darwin --host=x86_64-w64-mingw32",
+ runner.command_called)
+
+ ab.configure(build="fake_build_triplet", host="fake_host_triplet")
+ self.assertIn("./configure --build=fake_build_triplet --host=fake_host_triplet",
+ runner.command_called)
+
+ ab.build = "superfake_build_triplet"
+ ab.host = "superfake_host_triplet"
+ ab.configure()
+ self.assertIn("./configure --build=superfake_build_triplet --host=superfake_host_triplet",
+ runner.command_called)
+
+ def test_make_targets_install(self):
runner = RunnerMock()
conanfile = MockConanfile(MockSettings({}), None, runner)
-
+
ab = AutoToolsBuildEnvironment(conanfile)
ab.configure()
-
+
ab.make(target="install")
- self.assertEquals(runner.command_called,"make install -j%s" % cpu_count())
+ self.assertEquals(runner.command_called, "make install -j%s" % cpu_count())
+ ab.install()
+ self.assertEquals(runner.command_called, "make install -j%s" % cpu_count())
+
+ def autotools_prefix_test(self):
+ runner = RunnerMock()
+ conanfile = MockConanfile(MockSettings({}), None, runner)
+ # Package folder is not defined
+ ab = AutoToolsBuildEnvironment(conanfile)
+ ab.configure()
+ self.assertNotIn("--prefix", runner.command_called)
+ # package folder defined
+ conanfile.package_folder = "/package_folder"
+ ab.configure()
+ if platform.system() == "Windows":
+ self.assertIn("./configure --prefix=/package_folder", runner.command_called)
+ else:
+ self.assertIn("./configure '--prefix=/package_folder'", runner.command_called)
+ # --prefix already used in args
+ ab.configure(args=["--prefix=/my_package_folder"])
+ if platform.system() == "Windows":
+ self.assertIn("./configure --prefix=/my_package_folder", runner.command_called)
+ self.assertNotIn("--prefix=/package_folder", runner.command_called)
+ else:
+ self.assertIn("./configure '--prefix=/my_package_folder'", runner.command_called)
+ self.assertNotIn("'--prefix=/package_folder'", runner.command_called)
+
+ def autotools_configure_vars_test(self):
+ from mock import patch
+
+ runner = RunnerMock()
+ settings = MockSettings({"build_type": "Debug",
+ "arch": "x86_64",
+ "compiler": "gcc",
+ "compiler.libcxx": "libstdc++"})
+ conanfile = MockConanfile(settings, None, runner)
+ conanfile.settings = settings
+ self._set_deps_info(conanfile)
+
+ def custom_configure(obj, configure_dir=None, args=None, build=None, host=None, target=None,
+ pkg_config_paths=None, vars=None): # @UnusedVariable
+ self.assertNotEqual(obj.vars, vars)
+ return vars or obj.vars
+
+ with patch.object(AutoToolsBuildEnvironment, 'configure', new=custom_configure):
+ be = AutoToolsBuildEnvironment(conanfile)
+
+ # Get vars and modify them
+ my_vars = be.vars
+ my_vars["fake_var"] = "fake"
+ my_vars["super_fake_var"] = "fakefake"
+
+ # TEST with default vars
+ mocked_result = be.configure()
+ self.assertEqual(mocked_result, be.vars)
+ # TEST with custom vars
+ mocked_result = be.configure(vars=my_vars)
+ self.assertEqual(mocked_result, my_vars)
diff --git a/conans/test/util/tools_test.py b/conans/test/util/tools_test.py
index 78da180b2..f8bc63a4b 100644
--- a/conans/test/util/tools_test.py
+++ b/conans/test/util/tools_test.py
@@ -685,6 +685,126 @@ class MyConan(ConanFile):
tools.download("https://httpbin.org/basic-auth/user/passwd", dest,
headers={"Authorization": "Basic dXNlcjpwYXNzd2Q="}, overwrite=True)
+ def get_gnu_triplet_test(self):
+ def get_values(this_os, this_arch, setting_os, setting_arch, compiler=None):
+ build = tools.get_gnu_triplet(this_os, this_arch, compiler)
+ host = tools.get_gnu_triplet(setting_os, setting_arch, compiler)
+ return build, host
+
+ build, host = get_values("Linux", "x86_64", "Linux", "armv7hf")
+ self.assertEquals(build, "x86_64-linux-gnu")
+ self.assertEquals(host, "arm-linux-gnueabihf")
+
+ build, host = get_values("Linux", "x86", "Linux", "armv7hf")
+ self.assertEquals(build, "x86-linux-gnu")
+ self.assertEquals(host, "arm-linux-gnueabihf")
+
+ build, host = get_values("Linux", "x86_64", "Linux", "x86")
+ self.assertEquals(build, "x86_64-linux-gnu")
+ self.assertEquals(host, "x86-linux-gnu")
+
+ build, host = get_values("Linux", "x86_64", "Windows", "x86", compiler="gcc")
+ self.assertEquals(build, "x86_64-linux-gnu")
+ self.assertEquals(host, "i686-w64-mingw32")
+
+ build, host = get_values("Linux", "x86_64", "Windows", "x86", compiler="Visual Studio")
+ self.assertEquals(build, "x86_64-linux-gnu")
+ self.assertEquals(host, "i686-windows-msvc") # Not very common but exists sometimes
+
+ build, host = get_values("Linux", "x86_64", "Linux", "armv7hf")
+ self.assertEquals(build, "x86_64-linux-gnu")
+ self.assertEquals(host, "arm-linux-gnueabihf")
+
+ build, host = get_values("Linux", "x86_64", "Linux", "armv7")
+ self.assertEquals(build, "x86_64-linux-gnu")
+ self.assertEquals(host, "arm-linux-gnueabi")
+
+ build, host = get_values("Linux", "x86_64", "Linux", "armv6")
+ self.assertEquals(build, "x86_64-linux-gnu")
+ self.assertEquals(host, "arm-linux-gnueabi")
+
+ build, host = get_values("Linux", "x86_64", "Android", "x86")
+ self.assertEquals(build, "x86_64-linux-gnu")
+ self.assertEquals(host, "i686-linux-android")
+
+ build, host = get_values("Linux", "x86_64", "Android", "x86_64")
+ self.assertEquals(build, "x86_64-linux-gnu")
+ self.assertEquals(host, "x86_64-linux-android")
+
+ build, host = get_values("Linux", "x86_64", "Android", "armv7")
+ self.assertEquals(build, "x86_64-linux-gnu")
+ self.assertEquals(host, "arm-linux-androideabi")
+
+ build, host = get_values("Linux", "x86_64", "Android", "armv7hf")
+ self.assertEquals(build, "x86_64-linux-gnu")
+ self.assertEquals(host, "arm-linux-androideabi")
+
+ build, host = get_values("Linux", "x86_64", "Android", "armv8")
+ self.assertEquals(build, "x86_64-linux-gnu")
+ self.assertEquals(host, "aarch64-linux-android")
+
+ build, host = get_values("Linux", "x86_64", "Android", "armv6")
+ self.assertEquals(build, "x86_64-linux-gnu")
+ self.assertEquals(host, "arm-linux-androideabi")
+
+ build, host = get_values("Linux", "x86_64", "Windows", "x86", compiler="gcc")
+ self.assertEquals(build, "x86_64-linux-gnu")
+ self.assertEquals(host, "i686-w64-mingw32")
+
+ build, host = get_values("Linux", "x86_64", "Windows", "x86_64", compiler="gcc")
+ self.assertEquals(build, "x86_64-linux-gnu")
+ self.assertEquals(host, "x86_64-w64-mingw32")
+
+ build, host = get_values("Windows", "x86_64", "Windows", "x86", compiler="gcc")
+ self.assertEquals(build, "x86_64-w64-mingw32")
+ self.assertEquals(host, "i686-w64-mingw32")
+
+ build, host = get_values("Windows", "x86_64", "Linux", "armv7hf", compiler="gcc")
+ self.assertEquals(build, "x86_64-w64-mingw32")
+ self.assertEquals(host, "arm-linux-gnueabihf")
+
+ build, host = get_values("Darwin", "x86_64", "Android", "armv7hf")
+ self.assertEquals(build, "x86_64-apple-darwin")
+ self.assertEquals(host, "arm-linux-androideabi")
+
+ build, host = get_values("Darwin", "x86_64", "Macos", "x86")
+ self.assertEquals(build, "x86_64-apple-darwin")
+ self.assertEquals(host, "i686-apple-darwin")
+
+ build, host = get_values("Darwin", "x86_64", "iOS", "armv7")
+ self.assertEquals(build, "x86_64-apple-darwin")
+ self.assertEquals(host, "arm-apple-darwin")
+
+ build, host = get_values("Darwin", "x86_64", "watchOS", "armv7k")
+ self.assertEquals(build, "x86_64-apple-darwin")
+ self.assertEquals(host, "arm-apple-darwin")
+
+ build, host = get_values("Darwin", "x86_64", "tvOS", "armv8")
+ self.assertEquals(build, "x86_64-apple-darwin")
+ self.assertEquals(host, "aarch64-apple-darwin")
+
+ for os in ["Windows", "Linux"]:
+ for arch in ["x86_64", "x86"]:
+ triplet = tools.get_gnu_triplet(os, arch, "gcc")
+
+ output = ""
+ if arch == "x86_64":
+ output += "x86_64"
+ else:
+ output += "i686" if os != "Linux" else "x86"
+
+ output += "-"
+ if os == "Windows":
+ output += "w64-mingw32"
+ else:
+ output += "linux-gnu"
+
+ self.assertIn(output, triplet)
+
+ # Compiler not specified for os="Windows"
+ with self.assertRaises(ConanException):
+ tools.get_gnu_triplet("Windows", "x86")
+
def detect_windows_subsystem_test(self):
# Dont raise test
result = tools.os_info.detect_windows_subsystem()
diff --git a/conans/test/utils/conanfile.py b/conans/test/utils/conanfile.py
index aa42e7c3e..409d10cb7 100644
--- a/conans/test/utils/conanfile.py
+++ b/conans/test/utils/conanfile.py
@@ -41,6 +41,8 @@ class MockConanfile(object):
self.should_build = True
self.should_install = True
+ self.package_folder = None
+
def run(self, *args, **kwargs):
if self.runner:
kwargs["output"] = None
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"nose-cov",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"conans/requirements.txt",
"conans/requirements_osx.txt",
"conans/requirements_server.txt",
"conans/requirements_dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asn1crypto==1.5.1
astroid==1.6.6
attrs==22.2.0
beautifulsoup4==4.12.3
bottle==0.12.25
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
codecov==2.1.13
colorama==0.3.9
-e git+https://github.com/conan-io/conan.git@760a071260bc1c3b777356a7e4f9c639a5cd3eaa#egg=conan
cov-core==1.15.0
coverage==4.2
cryptography==2.1.4
deprecation==2.0.7
distro==1.1.0
fasteners==0.19
future==0.16.0
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
mock==1.3.0
ndg-httpsclient==0.4.4
node-semver==0.2.0
nose==1.3.7
nose-cov==1.6
packaging==21.3
parameterized==0.8.1
patch==1.16
pbr==6.1.1
pluggy==1.0.0
pluginbase==0.7
py==1.11.0
pyasn==1.5.0b7
pyasn1==0.5.1
pycparser==2.21
Pygments==2.14.0
PyJWT==1.7.1
pylint==1.8.4
pyOpenSSL==17.5.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==3.12
requests==2.27.1
six==1.17.0
soupsieve==2.3.2.post1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
waitress==2.0.0
WebOb==1.8.9
WebTest==2.0.35
wrapt==1.16.0
zipp==3.6.0
| name: conan
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asn1crypto==1.5.1
- astroid==1.6.6
- attrs==22.2.0
- beautifulsoup4==4.12.3
- bottle==0.12.25
- cffi==1.15.1
- charset-normalizer==2.0.12
- codecov==2.1.13
- colorama==0.3.9
- cov-core==1.15.0
- coverage==4.2
- cryptography==2.1.4
- deprecation==2.0.7
- distro==1.1.0
- fasteners==0.19
- future==0.16.0
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- mock==1.3.0
- ndg-httpsclient==0.4.4
- node-semver==0.2.0
- nose==1.3.7
- nose-cov==1.6
- packaging==21.3
- parameterized==0.8.1
- patch==1.16
- pbr==6.1.1
- pluggy==1.0.0
- pluginbase==0.7
- py==1.11.0
- pyasn==1.5.0b7
- pyasn1==0.5.1
- pycparser==2.21
- pygments==2.14.0
- pyjwt==1.7.1
- pylint==1.8.4
- pyopenssl==17.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==3.12
- requests==2.27.1
- six==1.17.0
- soupsieve==2.3.2.post1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- waitress==2.0.0
- webob==1.8.9
- webtest==2.0.35
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/conan
| [
"conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_make_targets_install"
]
| [
"conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_pkg_config_paths",
"conans/test/util/tools_test.py::ToolsTest::test_get_env_in_conanfile",
"conans/test/util/tools_test.py::ToolsTest::test_global_tools_overrided"
]
| [
"conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_cppstd",
"conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_mocked_methods",
"conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_previous_env",
"conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_variables",
"conans/test/util/tools_test.py::ReplaceInFileTest::test_replace_in_file",
"conans/test/util/tools_test.py::ToolsTest::test_environment_nested"
]
| []
| MIT License | 2,365 | [
"conans/client/tools/oss.py",
"conans/client/build/autotools_environment.py"
]
| [
"conans/client/tools/oss.py",
"conans/client/build/autotools_environment.py"
]
|
jmwri__simplejwt-5 | 6ffffc92a281010ae753dbbb23de0ef697da8797 | 2018-04-05 14:47:51 | 6ffffc92a281010ae753dbbb23de0ef697da8797 | diff --git a/README.md b/README.md
index 2b7b764..e8506f6 100644
--- a/README.md
+++ b/README.md
@@ -7,6 +7,12 @@
A dead simple JWT library.
+# Supported algorithms
+
+* HS256
+* HS384
+* HS512
+
# Usage
## Encode
Returns a new token.
diff --git a/setup.py b/setup.py
index 18699b4..d39a525 100644
--- a/setup.py
+++ b/setup.py
@@ -1,7 +1,7 @@
from setuptools import setup, find_packages
github = 'https://github.com/jmwri/simplejwt'
-version = '0.3.0'
+version = '0.3.1'
setup(
name='simplejwt',
diff --git a/simplejwt/jwt.py b/simplejwt/jwt.py
index c776ec4..4ff7097 100644
--- a/simplejwt/jwt.py
+++ b/simplejwt/jwt.py
@@ -2,6 +2,7 @@ from typing import Union
import json
import hmac
import hashlib
+from datetime import datetime
from simplejwt import util
from simplejwt.exception import InvalidSignatureError
@@ -12,6 +13,18 @@ algorithms = {
'HS512': hashlib.sha512,
}
+default_alg = 'HS256'
+
+registered_claims = {
+ 'issuer': 'iss',
+ 'subject': 'sub',
+ 'audience': 'aud',
+ 'valid_to': 'exp',
+ 'valid_from': 'nbf',
+ 'issued_at': 'iat',
+ 'id': 'jti',
+}
+
def get_algorithm(alg: str):
if alg not in algorithms:
@@ -26,35 +39,136 @@ def _hash(secret: bytes, data: bytes, alg: str) -> bytes:
.digest()
-def make(secret: Union[str, bytes], payload: dict, alg='HS256',
- issuer: str = None, subject: str = None, audience: str = None,
- valid_to: int = None, valid_from: int = None, issued_at: int = None,
- id: str = None):
- new_payload = payload.copy()
- if issuer and 'iss' not in new_payload:
- new_payload['iss'] = issuer
- if subject and 'sub' not in new_payload:
- new_payload['sub'] = subject
- if audience and 'aud' not in new_payload:
- new_payload['aud'] = audience
- if valid_to and 'exp' not in new_payload:
- new_payload['exp'] = valid_to
- if valid_from and 'nbf' not in new_payload:
- new_payload['nbf'] = valid_from
- if issued_at and 'iat' not in new_payload:
- new_payload['iat'] = issued_at
- if id and 'jti' not in new_payload:
- new_payload['jti'] = id
- return encode(secret, new_payload, alg)
-
-
-def encode(secret: Union[str, bytes], payload: dict, alg='HS256'):
+class Jwt:
+ def __init__(self, secret: Union[str, bytes], payload: dict = None,
+ alg: str = default_alg, header: dict = None,
+ issuer: str = None, subject: str = None, audience: str = None,
+ valid_to: int = None, valid_from: int = None,
+ issued_at: int = None, id: str = None):
+ self.secret = secret
+ self.payload = payload or {}
+ self.alg = alg
+ self.header = header or {}
+ self.registered_claims = {}
+ if issuer:
+ self.issuer = issuer
+ if subject:
+ self.subject = subject
+ if audience:
+ self.audience = audience
+ if valid_to:
+ self.valid_to = valid_to
+ if valid_from:
+ self.valid_from = valid_from
+ if issued_at:
+ self.issued_at = issued_at
+ if id:
+ self.id = id
+ self._pop_claims_from_payload()
+
+ @property
+ def issuer(self):
+ return self.registered_claims.get('iss')
+
+ @issuer.setter
+ def issuer(self, issuer: str):
+ self.registered_claims['iss'] = issuer
+
+ @property
+ def subject(self):
+ return self.registered_claims.get('sub')
+
+ @subject.setter
+ def subject(self, subject: str):
+ self.registered_claims['sub'] = subject
+
+ @property
+ def audience(self):
+ return self.registered_claims.get('aud')
+
+ @audience.setter
+ def audience(self, audience: str):
+ self.registered_claims['aud'] = audience
+
+ @property
+ def valid_to(self):
+ return self.registered_claims.get('exp')
+
+ @valid_to.setter
+ def valid_to(self, valid_to: int):
+ self.registered_claims['exp'] = valid_to
+
+ @property
+ def valid_from(self):
+ return self.registered_claims.get('nbf')
+
+ @valid_from.setter
+ def valid_from(self, valid_from: int):
+ self.registered_claims['nbf'] = valid_from
+
+ @property
+ def issued_at(self):
+ return self.registered_claims.get('iat')
+
+ @issued_at.setter
+ def issued_at(self, issued_at: int):
+ self.registered_claims['iat'] = issued_at
+
+ @property
+ def id(self):
+ return self.registered_claims.get('jti')
+
+ @id.setter
+ def id(self, id: str):
+ self.registered_claims['jti'] = id
+
+ def valid(self, time: int = None):
+ time = time or int(datetime.utcnow().timestamp())
+ if time < self.valid_from:
+ return False
+ if time > self.valid_to:
+ return False
+ return True
+
+ def _pop_claims_from_payload(self):
+ claims_in_payload = [k for k in self.payload.keys() if
+ k in registered_claims.values()]
+ for name in claims_in_payload:
+ self.registered_claims[name] = self.payload.pop(name)
+
+ def encode(self):
+ payload = {}
+ payload.update(self.registered_claims)
+ payload.update(self.payload)
+ return encode(self.secret, payload, self.alg, self.header)
+
+ @staticmethod
+ def decode(secret: Union[str, bytes], token: Union[str, bytes],
+ alg: str = default_alg):
+ header, payload = _decode(secret, token, alg)
+ return Jwt(secret, payload, alg, header)
+
+
+def make(secret: Union[str, bytes], payload: dict, alg: str = default_alg,
+ **kwargs):
+ jwt = Jwt(secret, payload, alg, **kwargs)
+ return jwt.encode()
+
+
+def encode(secret: Union[str, bytes], payload: dict = None,
+ alg: str = default_alg, header: dict = None):
secret = util.to_bytes(secret)
- header = {
- 'type': 'JWT',
- 'alg': alg
- }
+ payload = payload or {}
+ header = header or {}
+
+ if isinstance(header, dict):
+ header = header.copy()
+ header.update({
+ 'type': 'JWT',
+ 'alg': alg
+ })
+
header_json = util.to_bytes(json.dumps(header))
header_b64 = util.b64_encode(header_json)
payload_json = util.to_bytes(json.dumps(payload))
@@ -68,14 +182,19 @@ def encode(secret: Union[str, bytes], payload: dict, alg='HS256'):
return util.from_bytes(token)
-def decode(secret: Union[str, bytes], token: Union[str, bytes], alg='HS256'):
+def _decode(secret: Union[str, bytes], token: Union[str, bytes],
+ alg: str = default_alg):
secret = util.to_bytes(secret)
token = util.to_bytes(token)
pre_signature, signature_segment = token.rsplit(b'.', 1)
- payload_b64 = pre_signature.split(b'.', 1)[1]
+ header_b64, payload_b64 = pre_signature.split(b'.')
+ header_json = util.b64_decode(header_b64)
+ header = json.loads(util.from_bytes(header_json))
payload_json = util.b64_decode(payload_b64)
payload = json.loads(util.from_bytes(payload_json))
+ if not isinstance(header, dict):
+ raise RuntimeError('Invalid header: {}'.format(header))
if not isinstance(payload, dict):
raise RuntimeError('Invalid payload: {}'.format(payload))
@@ -84,4 +203,10 @@ def decode(secret: Union[str, bytes], token: Union[str, bytes], alg='HS256'):
if not hmac.compare_digest(signature, calculated_signature):
raise InvalidSignatureError('Invalid signature')
+ return header, payload
+
+
+def decode(secret: Union[str, bytes], token: Union[str, bytes],
+ alg: str = default_alg):
+ _, payload = _decode(secret, token, alg)
return payload
| Create a Jwt object
It would be useful to have a `Jwt` object that provides helpers, such as `expired()` etc. | jmwri/simplejwt | diff --git a/tests/test_jwt.py b/tests/test_jwt.py
index cacda88..0f8665a 100644
--- a/tests/test_jwt.py
+++ b/tests/test_jwt.py
@@ -1,5 +1,6 @@
import pytest
import hashlib
+from datetime import datetime
from simplejwt import jwt, util
from simplejwt.exception import InvalidSignatureError
@@ -25,16 +26,6 @@ test_token_data = {
}
}
-registered_claims = {
- 'issuer': 'iss',
- 'subject': 'sub',
- 'audience': 'aud',
- 'valid_to': 'exp',
- 'valid_from': 'nbf',
- 'issued_at': 'iat',
- 'id': 'jti',
-}
-
test_registered_claims = {
'issuer': 'test_issuer',
'subject': 'test_subject',
@@ -84,7 +75,7 @@ def test_encode():
def test_make_claims():
- for name, abb in registered_claims.items():
+ for name, abb in jwt.registered_claims.items():
args = {
'secret': test_token_data['secret'],
'payload': test_token_data['payload'],
@@ -95,6 +86,69 @@ def test_make_claims():
assert payload[abb] == test_registered_claims[name]
+def test_jwt_registered_claims_constructor():
+ for name, abb in jwt.registered_claims.items():
+ args = {
+ 'secret': test_token_data['secret'],
+ 'payload': test_token_data['payload'],
+ name: test_registered_claims[name]
+ }
+ obj = jwt.Jwt(**args)
+ assert getattr(obj, name) == test_registered_claims[name]
+
+
+def test_jwt_registered_claims():
+ for name, abb in jwt.registered_claims.items():
+ args = {
+ 'secret': test_token_data['secret'],
+ 'payload': test_token_data['payload'],
+ }
+ obj = jwt.Jwt(**args)
+ setattr(obj, name, test_registered_claims[name])
+ token = obj.encode()
+ payload = jwt.decode(test_token_data['secret'], token)
+ assert getattr(obj, name) == test_registered_claims[name]
+ assert payload[abb] == test_registered_claims[name]
+
+
+def test_jwt_precedence():
+ obj = jwt.Jwt('secret', {'iss': 'usr_defined_iss'}, issuer='my_iss')
+ assert obj.registered_claims['iss'] == 'usr_defined_iss'
+
+
+def test_jwt_decode():
+ for alg, token in test_tokens.items():
+ obj = jwt.Jwt.decode(
+ test_token_data['secret'],
+ token,
+ alg
+ )
+ assert obj.secret == test_token_data['secret']
+ assert obj.alg == alg
+ assert obj.header == {
+ 'type': 'JWT',
+ 'alg': alg
+ }
+ assert obj.payload == test_token_data['payload']
+
+
+def test_jwt_valid():
+ obj = jwt.Jwt('secret', {}, valid_from=2, valid_to=4)
+ assert not obj.valid(1)
+ assert obj.valid(2)
+ assert obj.valid(3)
+ assert obj.valid(4)
+ assert not obj.valid(5)
+
+
+def test_jwt_valid_current_time():
+ now = int(datetime.utcnow().timestamp())
+ obj = jwt.Jwt('secret', {}, valid_from=now, valid_to=now)
+ assert obj.valid()
+ obj = jwt.Jwt('secret', {}, valid_from=now+1, valid_to=now+1)
+ assert not obj.valid()
+
+
def test_make_precedence():
token = jwt.make(test_token_data['secret'], {'iss': 'usr_defined_iss'},
issuer='my_iss')
@@ -131,6 +185,12 @@ def test_decode_invalid_signature():
)
+def test_decode_invalid_header():
+ token = jwt.encode(test_token_data['secret'], header='should be dict')
+ with pytest.raises(RuntimeError):
+ jwt.decode(test_token_data['secret'], token)
+
+
def test_decode_invalid_payload():
token = jwt.encode(test_token_data['secret'], 'should be dict')
with pytest.raises(RuntimeError):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cachetools==5.5.2
chardet==5.2.0
colorama==0.4.6
coverage==7.8.0
distlib==0.3.9
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
filelock==3.18.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pyproject-api==1.9.0
pytest @ file:///croot/pytest_1738938843180/work
-e git+https://github.com/jmwri/simplejwt.git@6ffffc92a281010ae753dbbb23de0ef697da8797#egg=simplejwt
tomli==2.2.1
tox==4.25.0
typing==3.7.4.3
typing_extensions==4.13.0
virtualenv==20.29.3
| name: simplejwt
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cachetools==5.5.2
- chardet==5.2.0
- colorama==0.4.6
- coverage==7.8.0
- distlib==0.3.9
- filelock==3.18.0
- platformdirs==4.3.7
- pyproject-api==1.9.0
- tomli==2.2.1
- tox==4.25.0
- typing==3.7.4.3
- typing-extensions==4.13.0
- virtualenv==20.29.3
prefix: /opt/conda/envs/simplejwt
| [
"tests/test_jwt.py::test_make_claims",
"tests/test_jwt.py::test_jwt_registered_claims_constructor",
"tests/test_jwt.py::test_jwt_registered_claims",
"tests/test_jwt.py::test_jwt_precedence",
"tests/test_jwt.py::test_jwt_decode",
"tests/test_jwt.py::test_jwt_valid",
"tests/test_jwt.py::test_jwt_valid_current_time",
"tests/test_jwt.py::test_decode_invalid_header"
]
| []
| [
"tests/test_jwt.py::test_get_algorithm_hs256",
"tests/test_jwt.py::test_get_algorithm_hs384",
"tests/test_jwt.py::test_get_algorithm_hs512",
"tests/test_jwt.py::test_get_algorithm_incorrect",
"tests/test_jwt.py::test_cover_all_algorithms",
"tests/test_jwt.py::test_encode",
"tests/test_jwt.py::test_make_precedence",
"tests/test_jwt.py::test_make_leaves_payload_unmodified",
"tests/test_jwt.py::test_decode",
"tests/test_jwt.py::test_decode_invalid_signature",
"tests/test_jwt.py::test_decode_invalid_payload"
]
| []
| MIT License | 2,366 | [
"setup.py",
"README.md",
"simplejwt/jwt.py"
]
| [
"setup.py",
"README.md",
"simplejwt/jwt.py"
]
|
|
evansde77__cirrus-198 | e745a2056779cb2fbc12c02d9fe2f57de6764d14 | 2018-04-05 18:09:48 | e745a2056779cb2fbc12c02d9fe2f57de6764d14 | diff --git a/requirements.txt b/requirements.txt
index f7083ec..b40b9ac 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -14,6 +14,6 @@ virtualenv-api>=2.1.16
virtualenv
twine>=1.9.1
pluggage>=0.0.4
-dockerstache>=0.0.13
+dockerstache>=0.0.14
requests-toolbelt>=0.8.0
tox>=2.7.0
diff --git a/src/cirrus/docker.py b/src/cirrus/docker.py
index cb491f0..0b74c2d 100644
--- a/src/cirrus/docker.py
+++ b/src/cirrus/docker.py
@@ -218,12 +218,17 @@ def _docker_build(path, tags, base_tag, build_helper):
command.append(path)
LOGGER.info("Executing docker build command: {}".format(' '.join(command)))
- try:
- stdout = subprocess.check_output(command)
- except subprocess.CalledProcessError as ex:
- LOGGER.error(ex.output)
- raise
- LOGGER.info(stdout)
+ p = subprocess.Popen(
+ command,
+ stdout=sys.stdout,
+ stderr=sys.stderr
+ )
+ status = p.wait()
+ if status:
+ msg = "docker build exited non-zero!"
+ LOGGER.error(msg)
+ raise RuntimeError(msg)
+
image = find_image_id(base_tag)
LOGGER.info("Image ID: {}".format(image))
| More readable/streaming output from docker-image build
The `git cirrus docker-image build` is really useful. It would be maybe even more useful if we could get at least one of these:
* more readable output; if there is an error in the output it's very hard to read
* streaming so that we can watch it scroll past the screen rather than wait until the end | evansde77/cirrus | diff --git a/tests/unit/cirrus/docker_test.py b/tests/unit/cirrus/docker_test.py
index a98b684..f6a11e3 100644
--- a/tests/unit/cirrus/docker_test.py
+++ b/tests/unit/cirrus/docker_test.py
@@ -29,6 +29,7 @@ class DockerFunctionTests(unittest.TestCase):
self.mock_popen.return_value = self.mock_popen
self.mock_popen.communicate = mock.Mock()
self.mock_popen.communicate.return_value = ('STDOUT', 'STDERR')
+ self.mock_popen.wait = mock.Mock(return_value=0)
self.opts = mock.Mock()
self.opts.login = False
@@ -65,10 +66,17 @@ class DockerFunctionTests(unittest.TestCase):
def test_docker_build(self):
"""test straight docker build call"""
dckr.docker_build(self.opts, self.config)
- self.failUnless(self.mock_check_output.called)
- self.mock_check_output.assert_has_calls(
+ self.failUnless(self.mock_popen.wait.called)
+ self.mock_popen.assert_has_calls(
mock.call(
- ['docker', 'build', '-t', 'unittesting/unittesting:latest', '-t', 'unittesting/unittesting:1.2.3', 'vm/docker_image']
+ [
+ 'docker', 'build', '-t',
+ 'unittesting/unittesting:latest', '-t',
+ 'unittesting/unittesting:1.2.3',
+ 'vm/docker_image'
+ ],
+ stderr=mock.ANY,
+ stdout=mock.ANY
)
)
@@ -77,15 +85,17 @@ class DockerFunctionTests(unittest.TestCase):
self.opts.build_arg = {"OPTION1": "VALUE1"}
self.opts.no_cache = False
dckr.docker_build(self.opts, self.config)
- self.failUnless(self.mock_check_output.called)
- self.mock_check_output.assert_has_calls(
+ self.failUnless(self.mock_popen.wait.called)
+ self.mock_popen.assert_has_calls(
mock.call(
[
'docker', 'build',
'-t', 'unittesting/unittesting:latest',
'-t', 'unittesting/unittesting:1.2.3',
'--build-arg', 'OPTION1=VALUE1',
- 'vm/docker_image']
+ 'vm/docker_image'],
+ stderr=mock.ANY,
+ stdout=mock.ANY
)
)
@@ -94,8 +104,8 @@ class DockerFunctionTests(unittest.TestCase):
self.opts.build_arg = {"OPTION1": "VALUE1"}
self.opts.no_cache = True
dckr.docker_build(self.opts, self.config)
- self.failUnless(self.mock_check_output.called)
- self.mock_check_output.assert_has_calls(
+ self.failUnless(self.mock_popen.wait.called)
+ self.mock_popen.assert_has_calls(
mock.call(
[
'docker', 'build',
@@ -103,15 +113,17 @@ class DockerFunctionTests(unittest.TestCase):
'-t', 'unittesting/unittesting:1.2.3',
'--no-cache',
'--build-arg', 'OPTION1=VALUE1',
- 'vm/docker_image']
+ 'vm/docker_image'],
+ stderr=mock.ANY,
+ stdout=mock.ANY
)
)
def test_docker_build_addl_repos(self):
self.config['docker']['additional_repos'] = "repo1:8080, repo2:8080 "
dckr.docker_build(self.opts, self.config)
- self.failUnless(self.mock_check_output.called)
- self.mock_check_output.assert_has_calls(
+ self.failUnless(self.mock_popen.wait.called)
+ self.mock_popen.assert_has_calls(
mock.call(
[
'docker', 'build',
@@ -121,7 +133,9 @@ class DockerFunctionTests(unittest.TestCase):
'-t', 'repo1:8080/unittesting:latest',
'-t', 'repo2:8080/unittesting:1.2.3',
'-t', 'repo2:8080/unittesting:latest',
- 'vm/docker_image']
+ 'vm/docker_image'],
+ stderr=mock.ANY,
+ stdout=mock.ANY
)
)
@@ -146,9 +160,16 @@ class DockerFunctionTests(unittest.TestCase):
output='vm/docker_image', context=None, defaults=None, input='template', extend_context=mock.ANY
)
)
- self.mock_check_output.assert_has_calls(
+ self.mock_popen.assert_has_calls(
mock.call(
- ['docker', 'build', '-t', 'unittesting/unittesting:latest', '-t', 'unittesting/unittesting:1.2.3', 'vm/docker_image']
+ [
+ 'docker', 'build', '-t',
+ 'unittesting/unittesting:latest', '-t',
+ 'unittesting/unittesting:1.2.3',
+ 'vm/docker_image'
+ ],
+ stderr=mock.ANY,
+ stdout=mock.ANY
)
)
@@ -167,9 +188,20 @@ class DockerFunctionTests(unittest.TestCase):
self.failUnless(self.mock_check_output.called)
self.mock_check_output.assert_has_calls(
[
- mock.call(['docker', 'login', '-u', 'steve', '-p', 'st3v3R0X', 'unittesting']),
- mock.call(['docker', 'build', '-t', 'unittesting/unittesting:latest', '-t', 'unittesting/unittesting:1.2.3', 'vm/docker_image'])
- ]
+ mock.call(
+ ['docker', 'login', '-u', 'steve', '-p', 'st3v3R0X', 'unittesting'],
+ )
+ ])
+ self.mock_popen.assert_has_calls(
+ mock.call(
+ [
+ 'docker', 'build', '-t',
+ 'unittesting/unittesting:latest', '-t',
+ 'unittesting/unittesting:1.2.3', 'vm/docker_image'
+ ],
+ stderr=mock.ANY,
+ stdout=mock.ANY
+ )
)
def test_docker_push(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 2
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | arrow==1.2.3
astroid==2.11.7
attrs==22.2.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
chevron==0.14.0
-e git+https://github.com/evansde77/cirrus.git@e745a2056779cb2fbc12c02d9fe2f57de6764d14#egg=cirrus_cli
colorama==0.4.5
cryptography==40.0.2
dill==0.3.4
distlib==0.3.9
dockerstache==0.0.17
docutils==0.18.1
filelock==3.4.1
gitdb==4.0.9
GitPython==3.1.18
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
invoke==2.2.0
isort==5.10.1
jeepney==0.7.1
keyring==23.4.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
mock==1.0.1
nose==1.3.7
packaging==21.3
pep8==1.7.1
pkginfo==1.10.0
platformdirs==2.4.0
pluggage==0.0.4
pluggy==1.0.0
py==1.11.0
PyChef==0.2.3
pycparser==2.21
Pygments==2.14.0
pylint==2.13.9
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
readme-renderer==34.0
requests==2.27.1
requests-toolbelt==1.0.0
rfc3986==1.5.0
SecretStorage==3.3.3
six==1.17.0
smmap==5.0.0
toml==0.10.2
tomli==1.2.3
tox==3.28.0
tqdm==4.64.1
twine==3.8.0
typed-ast==1.5.5
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
virtualenv-api==2.1.18
webencodings==0.5.1
wrapt==1.16.0
zipp==3.6.0
| name: cirrus
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argparse==1.4.0
- arrow==1.2.3
- astroid==2.11.7
- attrs==22.2.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- chevron==0.14.0
- colorama==0.4.5
- cryptography==40.0.2
- dill==0.3.4
- distlib==0.3.9
- dockerstache==0.0.17
- docutils==0.18.1
- filelock==3.4.1
- gitdb==4.0.9
- gitpython==3.1.18
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- invoke==2.2.0
- isort==5.10.1
- jeepney==0.7.1
- keyring==23.4.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- mock==1.0.1
- nose==1.3.7
- packaging==21.3
- pep8==1.7.1
- pkginfo==1.10.0
- platformdirs==2.4.0
- pluggage==0.0.4
- pluggy==1.0.0
- py==1.11.0
- pychef==0.2.3
- pycparser==2.21
- pygments==2.14.0
- pylint==2.13.9
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- readme-renderer==34.0
- requests==2.27.1
- requests-toolbelt==1.0.0
- rfc3986==1.5.0
- secretstorage==3.3.3
- six==1.17.0
- smmap==5.0.0
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- tqdm==4.64.1
- twine==3.8.0
- typed-ast==1.5.5
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
- virtualenv-api==2.1.18
- webencodings==0.5.1
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/cirrus
| [
"tests/unit/cirrus/docker_test.py::DockerFunctionTests::test_docker_build",
"tests/unit/cirrus/docker_test.py::DockerFunctionTests::test_docker_build_addl_repos",
"tests/unit/cirrus/docker_test.py::DockerFunctionTests::test_docker_build_args",
"tests/unit/cirrus/docker_test.py::DockerFunctionTests::test_docker_build_login",
"tests/unit/cirrus/docker_test.py::DockerFunctionTests::test_docker_build_no_cache",
"tests/unit/cirrus/docker_test.py::DockerFunctionTests::test_docker_build_template"
]
| []
| [
"tests/unit/cirrus/docker_test.py::DockerFunctionTests::test_docker_build_tag_opts",
"tests/unit/cirrus/docker_test.py::DockerFunctionTests::test_docker_connection",
"tests/unit/cirrus/docker_test.py::DockerFunctionTests::test_docker_connection_error",
"tests/unit/cirrus/docker_test.py::DockerFunctionTests::test_docker_connection_success",
"tests/unit/cirrus/docker_test.py::DockerFunctionTests::test_docker_push",
"tests/unit/cirrus/docker_test.py::DockerFunctionTests::test_docker_push_addl",
"tests/unit/cirrus/docker_test.py::DockerFunctionTests::test_docker_push_latest",
"tests/unit/cirrus/docker_test.py::DockerFunctionTests::test_docker_push_login",
"tests/unit/cirrus/docker_test.py::DockerUtilTest::test_get_docker_version",
"tests/unit/cirrus/docker_test.py::DockerUtilTest::test_get_docker_version_error",
"tests/unit/cirrus/docker_test.py::DockerUtilTest::test_is_docker_version_installed",
"tests/unit/cirrus/docker_test.py::DockerUtilTest::test_match_docker_version_error"
]
| []
| Apache License 2.0 | 2,367 | [
"src/cirrus/docker.py",
"requirements.txt"
]
| [
"src/cirrus/docker.py",
"requirements.txt"
]
|
|
andreroggeri__pynubank-14 | b315dd9b34064d16cc18fe91c4f96102d2a1444a | 2018-04-06 03:41:46 | f75f8543d662591817b6a4658724d3b98a8cb0d2 | coveralls:
[](https://coveralls.io/builds/16376687)
Coverage remained the same at 100.0% when pulling **dd960eea1cfb0ddf803605c24d8ffe897f5665d7 on janjitsu:master** into **b315dd9b34064d16cc18fe91c4f96102d2a1444a on andreroggeri:master**.
janjitsu: Você tocou num ponto interessante @andreroggeri ... na minha aplicação eu criei um cash das responses por que acabei ficando bloqueando também... E se fizessemos esse cache no `pynubank`? claro que precisariamos ver uma forma mais inteligente de invalidá-lo. Mas pra mim faz sentido. Muita coisa não vai mudar e eu ainda não entendi muito bem qual a regra que bloqueia a máquina... Se soubessemos poderíamos antecipar o bloqueio na aplicação.
andreroggeri: @janjitsu criei a #15 para discutirmos isso ! 👍 | diff --git a/.gitignore b/.gitignore
index 00099ec..8a3fedb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -45,6 +45,7 @@ nosetests.xml
coverage.xml
*.cover
.hypothesis/
+.pytest_cache/
# Translations
*.mo
diff --git a/README.md b/README.md
index b0185e1..f7db56b 100644
--- a/README.md
+++ b/README.md
@@ -17,13 +17,20 @@ Disponível via pip
from pynubank import Nubank
# Utilize o CPF sem pontos ou traços
-nu = Nubank('123456789', 'senha')
+nu = Nubank('123456789', 'senha')
# Lista de dicionários contendo todas as transações de seu cartão de crédito
card_statements = nu.get_card_statements()
# Soma de todas as compras
-print(sum([t['amount'] for t in card_statements]))
+print(sum([t['amount'] for t in card_statements]))
+
+# Lista de dicionários contendo todas as faturas do seu cartão de crédito
+bills = nu.get_bills()
+
+# Retorna um dicionário contendo os detalhes de uma fatura retornada por
+get_bills()
+bill_details = nu.get_bill_details(bills[1])
```
### NuConta
@@ -31,16 +38,16 @@ print(sum([t['amount'] for t in card_statements]))
from pynubank import Nubank
# Utilize o CPF sem pontos ou traços
-nu = Nubank('123456789', 'senha')
+nu = Nubank('123456789', 'senha')
# Lista de dicionários contendo todas as transações de seu cartão de crédito
-account_statements = nu.get_account_statements()
+account_statements = nu.get_account_statements()
# Soma de todas as transações na NuConta
# Observacão: As transações de saída não possuem o valor negativo, então deve-se olhar a propiedade "__typename".
# TransferInEvent = Entrada
# TransferOutEvent = Saída
-print(sum([t['amount'] for t in account_statements]))
+print(sum([t['amount'] for t in account_statements]))
# Saldo atual
print(nu.get_account_balance())
@@ -59,7 +66,7 @@ print(nu.get_account_balance())
>>> df = pd.DataFrame(transactions, columns=['time', 'amount'])
>>> df['time'] = pd.to_datetime(df['time'])
>>> df.groupby([df.time.dt.year, df.time.dt.month]).sum() # Agrupado por Ano/Mês
-Year Month Amount
+Year Month Amount
2016 6 20000
7 20000
8 20000
@@ -67,14 +74,14 @@ Year Month Amount
10 20000
11 40000
12 40000
-
+
2017 1 100000
2 20000
3 30000
4 35000
5 12000
6 22000
-
+
>>> df.groupby([df.title]).sum() # Agrupado por categoria
title amount
casa 13000
@@ -105,4 +112,4 @@ $ pytest
## Contribuindo
-Envie sua PR para melhorar esse projeto ! 😋
\ No newline at end of file
+Envie sua PR para melhorar esse projeto ! 😋
diff --git a/pynubank/nubank.py b/pynubank/nubank.py
index ae31690..a429e4e 100644
--- a/pynubank/nubank.py
+++ b/pynubank/nubank.py
@@ -64,8 +64,12 @@ class Nubank:
feed = self.get_card_feed()
return list(filter(lambda x: x['category'] == 'transaction', feed['events']))
- def get_card_bills(self):
+ def get_bills(self):
request = requests.get(self.bills_url, headers=self.headers)
+ return json.loads(request.content.decode('utf-8'))['bills']
+
+ def get_bill_details(self, bill):
+ request = requests.get(bill['_links']['self']['href'], headers=self.headers)
return json.loads(request.content.decode('utf-8'))
def get_account_feed(self):
| Adicionar endpoint para detalhes da fatura
Olá! Gostaria de obter os detalhes da fatura. Até já implementei isso, porém tenho dúvidas sobre a melhor forma de implementação, uma forma simples seria:
```
def get_bill_details(self, bill):
request = requests.get(bill['_links']['self']['href'], headers=self.headers)
return json.loads(request.content.decode('utf-8'))
```
`bill` seria um elemento do array retornado por `get_card_bills()` mas talvez fica estranho passar para a função elementos de um resultado de uma response externa, talvez o resultado poderia se tornar um array de elementos definidos na aplicação, algo como:
```
class Bill
(...)
```
Gostaria de umas dicas pois não tenho muita experiência com python profissionalmente, quais as melhores práticas, etc talvez links de projetos interessantes para seguir. #ajudaluciano | andreroggeri/pynubank | diff --git a/tests/test_nubank_client.py b/tests/test_nubank_client.py
index 787a858..1c1108a 100644
--- a/tests/test_nubank_client.py
+++ b/tests/test_nubank_client.py
@@ -231,6 +231,89 @@ def bills_return():
]
}
[email protected]
+def bill_details_return():
+ return {
+ 'bill': {
+ '_links': {
+ 'barcode': {
+ 'href': 'https://prod-s0-billing.nubank.com.br/api/bills/abcde-fghi-jklmn-opqrst-uvxz/boleto/barcode'
+ },
+ 'boleto_email': {
+ 'href': 'https://prod-s0-billing.nubank.com.br/api/bills/abcde-fghi-jklmn-opqrst-uvxz/boleto/email'
+ },
+ 'invoice_email': {
+ 'href': 'https://prod-s0-billing.nubank.com.br/api/bills/abcde-fghi-jklmn-opqrst-uvxz/invoice/email'
+ },
+ 'self': {
+ 'href': 'https://prod-s0-billing.nubank.com.br/api/bills/abcde-fghi-jklmn-opqrst-uvxz'
+ }
+ },
+ 'account_id': 'abcde-fghi-jklmn-opqrst-uvxz',
+ 'auto_debit_failed': False,
+ 'barcode': '',
+ 'id': 'abcde-fghi-jklmn-opqrst-uvxz',
+ 'line_items': [
+ {
+ 'amount': 2390,
+ 'category': 'Eletrônicos',
+ 'charges': 1,
+ 'href': 'nuapp://transaction/abcde-fghi-jklmn-opqrst-uvxz',
+ 'id': 'abcde-fghi-jklmn-opqrst-uvxz',
+ 'index': 0,
+ 'post_date': '2015-09-09',
+ 'title': 'Mercadopago Mlivre'
+ },
+ {
+ 'amount': 5490,
+ 'category': 'Eletrônicos',
+ 'charges': 1,
+ 'href': 'nuapp://transaction/abcde-fghi-jklmn-opqrst-uvxz',
+ 'id': 'abcde-fghi-jklmn-opqrst-uvxz',
+ 'index': 0,
+ 'post_date': '2015-09-09',
+ 'title': 'Mercadopago Mlivre'
+ }
+ ],
+ 'linha_digitavel': '',
+ 'payment_method': 'boleto',
+ 'state': 'overdue',
+ 'status': 'paid',
+ 'summary': {
+ 'adjustments': '0',
+ 'close_date': '2015-09-25',
+ 'due_date': '2015-10-10',
+ 'effective_due_date': '2015-10-13',
+ 'expenses': '78.8000',
+ 'fees': '0',
+ 'interest': 0,
+ 'interest_charge': '0',
+ 'interest_rate': '0.0775',
+ 'interest_reversal': '0',
+ 'international_tax': '0',
+ 'late_fee': '0.02',
+ 'late_interest_rate': '0.0875',
+ 'minimum_payment': 7005,
+ 'open_date': '2015-07-23',
+ 'paid': 7880,
+ 'past_balance': 0,
+ 'payments': '0',
+ 'precise_minimum_payment': '70.054500',
+ 'precise_total_balance': '78.8000',
+ 'previous_bill_balance': '0',
+ 'tax': '0',
+ 'total_accrued': '0',
+ 'total_balance': 7880,
+ 'total_credits': '0',
+ 'total_cumulative': 7880,
+ 'total_financed': '0',
+ 'total_international': '0',
+ 'total_national': '78.8000',
+ 'total_payments': '0'
+ }
+ }
+ }
+
@pytest.fixture
def account_balance_return():
return {'data': {'viewer': {'savingsAccount': {'currentSavingsBalance': {'netAmount': 127.33}}}}}
@@ -318,7 +401,7 @@ def test_get_card_feed(monkeypatch, authentication_return, events_return):
assert events[0]['href'] == 'nuapp://transaction/abcde-fghi-jklmn-opqrst-uvxz'
assert events[0]['_links']['self']['href'] == 'https://prod-s0-webapp-proxy.nubank.com.br/api/proxy/_links_123'
-def test_get_card_bills(monkeypatch, authentication_return, bills_return):
+def test_get_bills(monkeypatch, authentication_return, bills_return):
response = create_fake_response(authentication_return)
monkeypatch.setattr('requests.post', MagicMock(return_value=response))
nubank_client = Nubank('12345678909', '12345678')
@@ -326,11 +409,8 @@ def test_get_card_bills(monkeypatch, authentication_return, bills_return):
response = create_fake_response(bills_return)
monkeypatch.setattr('requests.get', MagicMock(return_value=response))
- bills_response = nubank_client.get_card_bills()
- assert bills_response['_links']['future']['href'] == 'https://prod-s0-billing.nubank.com.br/api/accounts/abcde-fghi-jklmn-opqrst-uvxz/bills/future'
- assert bills_response['_links']['open']['href'] == 'https://prod-s0-billing.nubank.com.br/api/accounts/abcde-fghi-jklmn-opqrst-uvxz/bills/open'
+ bills = nubank_client.get_bills()
- bills = bills_response['bills']
assert len(bills) == 3
assert bills[2]['_links']['self']['href'] == "https://prod-s0-billing.nubank.com.br/api/bills/abcde-fghi-jklmn-opqrst-uvxz"
assert bills[2]['href'] == 'nuapp://bill/abcde-fghi-jklmn-opqrst-uvxz'
@@ -368,6 +448,70 @@ def test_get_card_bills(monkeypatch, authentication_return, bills_return):
assert summary["total_national"] == "364.32893934"
assert summary["total_payments"] == "-960.47"
+def test_get_bill_details(monkeypatch, authentication_return, bill_details_return):
+ response = create_fake_response(authentication_return)
+ monkeypatch.setattr('requests.post', MagicMock(return_value=response))
+ nubank_client = Nubank('12345678909', '12345678')
+
+ response = create_fake_response(bill_details_return)
+ monkeypatch.setattr('requests.get', MagicMock(return_value=response))
+
+ bill_mock = {'_links':{'self':{'href':'https://prod-s0-billing.nubank.com.br/api/bills/abcde-fghi-jklmn-opqrst-uvxz'}}}
+ bill_response = nubank_client.get_bill_details(bill_mock)
+
+ bill = bill_response['bill']
+
+ assert bill['_links']['barcode']['href'] == 'https://prod-s0-billing.nubank.com.br/api/bills/abcde-fghi-jklmn-opqrst-uvxz/boleto/barcode'
+ assert bill['_links']['boleto_email']['href'] == 'https://prod-s0-billing.nubank.com.br/api/bills/abcde-fghi-jklmn-opqrst-uvxz/boleto/email'
+ assert bill['_links']['invoice_email']['href'] == 'https://prod-s0-billing.nubank.com.br/api/bills/abcde-fghi-jklmn-opqrst-uvxz/invoice/email'
+ assert bill['_links']['self']['href'] == 'https://prod-s0-billing.nubank.com.br/api/bills/abcde-fghi-jklmn-opqrst-uvxz'
+ assert bill['account_id'] == 'abcde-fghi-jklmn-opqrst-uvxz'
+ assert bill['auto_debit_failed'] == False
+ assert bill['barcode'] == ''
+ assert bill['id'] == 'abcde-fghi-jklmn-opqrst-uvxz'
+ assert bill['line_items'][0]['amount'] == 2390
+ assert bill['line_items'][0]['category'] == 'Eletrônicos'
+ assert bill['line_items'][0]['charges'] == 1
+ assert bill['line_items'][0]['href'] == 'nuapp://transaction/abcde-fghi-jklmn-opqrst-uvxz'
+ assert bill['line_items'][0]['id'] == 'abcde-fghi-jklmn-opqrst-uvxz'
+ assert bill['line_items'][0]['index'] == 0
+ assert bill['line_items'][0]['post_date'] == '2015-09-09'
+ assert bill['line_items'][0]['title'] == 'Mercadopago Mlivre'
+ assert bill['linha_digitavel'] == ''
+ assert bill['payment_method'] == 'boleto'
+ assert bill['state'] == 'overdue'
+ assert bill['status'] == 'paid'
+ assert bill['summary']['adjustments'] == '0'
+ assert bill['summary']['close_date'] == '2015-09-25'
+ assert bill['summary']['due_date'] == '2015-10-10'
+ assert bill['summary']['effective_due_date'] == '2015-10-13'
+ assert bill['summary']['expenses'] == '78.8000'
+ assert bill['summary']['fees'] == '0'
+ assert bill['summary']['interest'] == 0
+ assert bill['summary']['interest_charge'] == '0'
+ assert bill['summary']['interest_rate'] == '0.0775'
+ assert bill['summary']['interest_reversal'] == '0'
+ assert bill['summary']['international_tax'] == '0'
+ assert bill['summary']['late_fee'] == '0.02'
+ assert bill['summary']['late_interest_rate'] == '0.0875'
+ assert bill['summary']['minimum_payment'] == 7005
+ assert bill['summary']['open_date'] == '2015-07-23'
+ assert bill['summary']['paid'] == 7880
+ assert bill['summary']['past_balance'] == 0
+ assert bill['summary']['payments'] == '0'
+ assert bill['summary']['precise_minimum_payment'] == '70.054500'
+ assert bill['summary']['precise_total_balance'] == '78.8000'
+ assert bill['summary']['previous_bill_balance'] == '0'
+ assert bill['summary']['tax'] == '0'
+ assert bill['summary']['total_accrued'] == '0'
+ assert bill['summary']['total_balance'] == 7880
+ assert bill['summary']['total_credits'] == '0'
+ assert bill['summary']['total_cumulative'] == 7880
+ assert bill['summary']['total_financed'] == '0'
+ assert bill['summary']['total_international'] == '0'
+ assert bill['summary']['total_national'] == '78.8000'
+ assert bill['summary']['total_payments'] == '0'
+
def test_get_card_statements(monkeypatch, authentication_return, events_return):
response = create_fake_response(authentication_return)
monkeypatch.setattr('requests.post', MagicMock(return_value=response))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
chardet==3.0.4
coverage==6.2
idna==2.5
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/andreroggeri/pynubank.git@b315dd9b34064d16cc18fe91c4f96102d2a1444a#egg=pynubank
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
requests==2.18.1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.21.1
zipp==3.6.0
| name: pynubank
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- chardet==3.0.4
- coverage==6.2
- idna==2.5
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- requests==2.18.1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.21.1
- zipp==3.6.0
prefix: /opt/conda/envs/pynubank
| [
"tests/test_nubank_client.py::test_get_bills",
"tests/test_nubank_client.py::test_get_bill_details"
]
| []
| [
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[100]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[101]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[102]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[103]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[201]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[202]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[203]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[204]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[205]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[206]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[207]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[208]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[226]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[300]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[301]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[302]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[303]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[304]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[305]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[306]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[307]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[308]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[400]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[401]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[402]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[403]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[404]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[405]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[406]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[407]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[408]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[409]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[410]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[411]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[412]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[413]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[414]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[415]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[416]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[417]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[418]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[420]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[421]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[422]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[423]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[424]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[426]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[428]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[429]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[431]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[440]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[444]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[449]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[450]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[451]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[495]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[496]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[497]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[498]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[499]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[500]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[501]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[502]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[503]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[504]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[505]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[506]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[507]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[508]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[509]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[510]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[511]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[520]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[521]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[522]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[523]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[524]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[525]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[526]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[527]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[530]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[598]",
"tests/test_nubank_client.py::test_authentication_succeeds",
"tests/test_nubank_client.py::test_get_card_feed",
"tests/test_nubank_client.py::test_get_card_statements",
"tests/test_nubank_client.py::test_get_account_balance",
"tests/test_nubank_client.py::test_get_account_feed",
"tests/test_nubank_client.py::test_get_account_statements",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[100]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[101]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[102]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[103]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[201]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[202]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[203]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[204]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[205]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[206]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[207]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[208]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[226]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[300]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[301]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[302]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[303]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[304]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[305]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[306]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[307]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[308]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[400]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[401]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[402]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[403]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[404]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[405]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[406]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[407]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[408]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[409]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[410]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[411]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[412]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[413]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[414]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[415]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[416]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[417]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[418]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[420]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[421]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[422]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[423]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[424]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[426]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[428]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[429]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[431]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[440]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[444]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[449]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[450]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[451]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[495]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[496]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[497]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[498]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[499]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[500]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[501]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[502]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[503]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[504]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[505]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[506]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[507]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[508]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[509]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[510]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[511]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[520]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[521]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[522]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[523]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[524]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[525]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[526]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[527]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[530]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[598]"
]
| []
| MIT License | 2,368 | [
".gitignore",
"README.md",
"pynubank/nubank.py"
]
| [
".gitignore",
"README.md",
"pynubank/nubank.py"
]
|
elastic__rally-461 | f245f685dcd625ac4d7f0fa151a885a4366f85a6 | 2018-04-06 11:06:12 | a5408e0d0d07b271b509df8057a7c73303604c10 | danielmitterdorfer: Note: There is no need to adapt our default tracks because this has already been done in https://github.com/elastic/rally-tracks/pull/41. | diff --git a/docs/command_line_reference.rst b/docs/command_line_reference.rst
index dd1855d5..afecc539 100644
--- a/docs/command_line_reference.rst
+++ b/docs/command_line_reference.rst
@@ -82,16 +82,15 @@ Consider the following track snippet showing a single challenge::
{
"name": "index-only",
- "index-settings": {
- "index.number_of_replicas": {{ replica_count|default(0) }},
- "index.number_of_shards": {{ shard_count|default(5) }},
- },
"schedule": [
- {
- "operation": "bulk-index",
- "warmup-time-period": 120,
- "clients": 8
- }
+ {
+ "operation": {
+ "operation-type": "bulk",
+ "bulk-size": {{ bulk_size|default(5000) }}
+ },
+ "warmup-time-period": 120,
+ "clients": {{ clients|default(8) }}
+ }
]
}
@@ -99,28 +98,28 @@ Rally tracks can use the Jinja templating language and the construct ``{{ some_v
We can see that it defines two variables:
-* ``replica_count`` with a default value of 0
-* ``shard_count`` with a default value of 5
+* ``bulk_size`` with a default value of 5000
+* ``clients`` with a default value of 8
When we run this track, we can override these defaults:
-* ``--track-params="replica_count:1,shard_count:3"`` will set the number of replicas to 1 and the number of shards to 3.
-* ``--track-params="replica_count:1"`` will just set the number of replicas to 1 and just keep the default value of 5 shards.
+* ``--track-params="bulk_size:2000,clients:16"`` will set the bulk size to 2000 and the number of clients for bulk indexing to 16.
+* ``--track-params="bulk_size:8000"`` will just set the bulk size to 8000 and keep the default value of 8 clients.
* ``--track-params="params.json"`` will read the track parameters from a JSON file (defined below)
Example JSON file::
{
- "replica_count": 1,
- "shard_count": 3
+ "bulk_size": 2000,
+ "clients": 16
}
All track parameters are recorded for each metrics record in the metrics store. Also, when you run ``esrally list races``, it will show all track parameters::
- Race Timestamp Track Track Parameters Challenge Car User Tag
- ---------------- ------- ------------------------------ ------------------- -------- ---------
- 20160518T122341Z pmc replica_count=1 append-no-conflicts defaults
- 20160518T112341Z pmc replica_count=1,shard_count=3 append-no-conflicts defaults
+ Race Timestamp Track Track Parameters Challenge Car User Tag
+ ---------------- ------- ------------------------- ------------------- -------- ---------
+ 20160518T122341Z pmc bulk_size=8000 append-no-conflicts defaults
+ 20160518T112341Z pmc bulk_size=2000,clients=16 append-no-conflicts defaults
Note that the default values are not recorded or shown (Rally does not know about them).
diff --git a/docs/metrics.rst b/docs/metrics.rst
index 9f952e04..d6ba4a61 100644
--- a/docs/metrics.rst
+++ b/docs/metrics.rst
@@ -149,4 +149,6 @@ Rally stores the following metrics:
* ``indexing_throttle_time``: Total time that indexing has been throttled as reported by the indices stats API. Note that this is not Wall clock time.
* ``refresh_total_time``: Total time used for index refresh as reported by the indices stats API. Note that this is not Wall clock time.
* ``flush_total_time``: Total time used for index flush as reported by the indices stats API. Note that this is not Wall clock time.
-* ``final_index_size_bytes``: Final resulting index size after the benchmark.
+* ``final_index_size_bytes``: Final resulting index size on the file system after all nodes have been shutdown at the end of the benchmark. It includes all files in the nodes' data directories (actual index files and translog).
+* ``store_size_in_bytes``: The size in bytes of the index (excluding the translog) as reported by the indices stats API.
+* ``translog_size_in_bytes``: The size in bytes of the translog as reported by the indices stats API.
diff --git a/docs/summary_report.rst b/docs/summary_report.rst
index e1edb140..46e9c16e 100644
--- a/docs/summary_report.rst
+++ b/docs/summary_report.rst
@@ -81,9 +81,21 @@ Total Old Gen GC
Index size
----------
-* **Definition**: Final resulting index size after the benchmark.
+* **Definition**: Final resulting index size on the file system after all nodes have been shutdown at the end of the benchmark. It includes all files in the nodes' data directories (actual index files and translog).
* **Corresponding metrics key**: ``final_index_size_bytes``
+Store size
+----------
+
+* **Definition**: The size in bytes of the index (excluding the translog) as reported by the indices stats API.
+* **Corresponding metrics key**: ``store_size_in_bytes``
+
+Translog size
+-------------
+
+* **Definition**: The size in bytes of the translog as reported by the indices stats API.
+* **Corresponding metrics key**: ``translog_size_in_bytes``
+
Totally written
---------------
diff --git a/docs/track.rst b/docs/track.rst
index 7af77b92..0bea3c76 100644
--- a/docs/track.rst
+++ b/docs/track.rst
@@ -665,7 +665,6 @@ Each challenge consists of the following properties:
* ``name`` (mandatory): A descriptive name of the challenge. Should not contain spaces in order to simplify handling on the command line for users.
* ``description`` (optional): A human readable description of the challenge.
* ``default`` (optional): If true, Rally selects this challenge by default if the user did not specify a challenge on the command line. If your track only defines one challenge, it is implicitly selected as default, otherwise you need define ``"default": true`` on exactly one challenge.
-* ``index-settings`` (optional): Defines the index settings of the benchmark candidate when an index is created.
* ``schedule`` (mandatory): Defines the concrete execution order of operations. It is described in more detail below.
.. note::
diff --git a/esrally/client.py b/esrally/client.py
index f81d432b..23677f52 100644
--- a/esrally/client.py
+++ b/esrally/client.py
@@ -12,7 +12,12 @@ class EsClientFactory:
Abstracts how the Elasticsearch client is created. Intended for testing.
"""
def __init__(self, hosts, client_options):
- logger.info("Creating ES client connected to %s with options [%s]" % (hosts, client_options))
+ masked_client_options = dict(client_options)
+ if "basic_auth_password" in masked_client_options:
+ masked_client_options["basic_auth_password"] = "*****"
+ if "http_auth" in masked_client_options:
+ masked_client_options["http_auth"] = (client_options["http_auth"][0], "*****")
+ logger.info("Creating ES client connected to %s with options [%s]", hosts, masked_client_options)
self.hosts = hosts
self.client_options = client_options
diff --git a/esrally/mechanic/telemetry.py b/esrally/mechanic/telemetry.py
index 6d8ae9c0..27bcdd82 100644
--- a/esrally/mechanic/telemetry.py
+++ b/esrally/mechanic/telemetry.py
@@ -708,7 +708,7 @@ class IndexStats(InternalTelemetryDevice):
# the pipeline "benchmark-only" where we don't have control over the cluster and the user might not have restarted
# the cluster so we can at least tell them.
if self.first_time:
- index_times = self.index_times(self.primaries_index_stats())
+ index_times = self.index_times(self.index_stats()["primaries"])
for k, v in index_times.items():
if v > 0:
console.warn("%s is %d ms indicating that the cluster is not in a defined clean state. Recorded index time "
@@ -718,8 +718,9 @@ class IndexStats(InternalTelemetryDevice):
def on_benchmark_stop(self):
import json
logger.info("Gathering indices stats for all primaries on benchmark stop.")
- p = self.primaries_index_stats()
- logger.info("Returned indices stats:\n%s" % json.dumps(p, indent=2))
+ index_stats = self.index_stats()
+ logger.info("Returned indices stats:\n%s" % json.dumps(index_stats, indent=2))
+ p = index_stats["primaries"]
# actually this is add_count
self.add_metrics(self.extract_value(p, ["segments", "count"]), "segments_count")
self.add_metrics(self.extract_value(p, ["segments", "memory_in_bytes"]), "segments_memory_in_bytes", "byte")
@@ -733,12 +734,14 @@ class IndexStats(InternalTelemetryDevice):
self.add_metrics(self.extract_value(p, ["segments", "terms_memory_in_bytes"]), "segments_terms_memory_in_bytes", "byte")
self.add_metrics(self.extract_value(p, ["segments", "norms_memory_in_bytes"]), "segments_norms_memory_in_bytes", "byte")
self.add_metrics(self.extract_value(p, ["segments", "points_memory_in_bytes"]), "segments_points_memory_in_bytes", "byte")
+ self.add_metrics(self.extract_value(index_stats, ["total", "store", "size_in_bytes"]), "store_size_in_bytes", "byte")
+ self.add_metrics(self.extract_value(index_stats, ["total", "translog", "size_in_bytes"]), "translog_size_in_bytes", "byte")
- def primaries_index_stats(self):
+ def index_stats(self):
# noinspection PyBroadException
try:
stats = self.client.indices.stats(metric="_all", level="shards")
- return stats["_all"]["primaries"]
+ return stats["_all"]
except BaseException:
logger.exception("Could not retrieve index stats.")
return {}
diff --git a/esrally/reporter.py b/esrally/reporter.py
index ee0b997a..c1bcfdca 100644
--- a/esrally/reporter.py
+++ b/esrally/reporter.py
@@ -168,9 +168,13 @@ class StatsCalculator:
result.memory_points = self.median("segments_points_memory_in_bytes")
result.memory_stored_fields = self.median("segments_stored_fields_memory_in_bytes")
- # This metric will only be written for the last iteration (as it can only be determined after the cluster has been shut down)
logger.debug("Gathering disk metrics.")
+ # This metric will only be written for the last iteration (as it can only be determined after the cluster has been shut down)
result.index_size = self.sum("final_index_size_bytes")
+ # we need to use the median here because these two are captured with the indices stats API and thus once per lap. If we'd
+ # sum up the values we'd get wrong results for benchmarks that ran for multiple laps.
+ result.store_size = self.median("store_size_in_bytes")
+ result.translog_size = self.median("translog_size_in_bytes")
result.bytes_written = self.sum("disk_io_write_bytes")
# convert to int, fraction counts are senseless
@@ -264,6 +268,8 @@ class Stats:
self.memory_stored_fields = self.v(d, "memory_stored_fields")
self.index_size = self.v(d, "index_size")
+ self.store_size = self.v(d, "store_size")
+ self.translog_size = self.v(d, "translog_size")
self.bytes_written = self.v(d, "bytes_written")
self.segment_count = self.v(d, "segment_count")
@@ -497,6 +503,8 @@ class SummaryReporter:
def report_disk_usage(self, stats):
return self.join(
+ self.line("Store size", "", stats.store_size, "GB", convert.bytes_to_gb),
+ self.line("Translog size", "", stats.translog_size, "GB", convert.bytes_to_gb),
self.line("Index size", "", stats.index_size, "GB", convert.bytes_to_gb),
self.line("Totally written", "", stats.bytes_written, "GB", convert.bytes_to_gb)
)
@@ -688,6 +696,10 @@ class ComparisonReporter:
def report_disk_usage(self, baseline_stats, contender_stats):
return self.join(
+ self.line("Store size", baseline_stats.store_size, contender_stats.store_size, "", "GB",
+ treat_increase_as_improvement=False, formatter=convert.bytes_to_gb),
+ self.line("Translog size", baseline_stats.translog_size, contender_stats.translog_size, "", "GB",
+ treat_increase_as_improvement=False, formatter=convert.bytes_to_gb),
self.line("Index size", baseline_stats.index_size, contender_stats.index_size, "", "GB",
treat_increase_as_improvement=False, formatter=convert.bytes_to_gb),
self.line("Totally written", baseline_stats.bytes_written, contender_stats.bytes_written, "", "GB",
diff --git a/esrally/resources/track-schema.json b/esrally/resources/track-schema.json
index f148778f..8e9c3829 100644
--- a/esrally/resources/track-schema.json
+++ b/esrally/resources/track-schema.json
@@ -29,10 +29,6 @@
"type": "object",
"description": "Defines the cluster settings of the benchmark candidate."
},
- "index-settings": {
- "type": "object",
- "description": "Defines the index settings of the benchmark candidate when an index is created."
- },
"schedule": {
"type": "array",
"minItems": 1,
diff --git a/esrally/track/loader.py b/esrally/track/loader.py
index 5adf6d71..d04c516d 100644
--- a/esrally/track/loader.py
+++ b/esrally/track/loader.py
@@ -829,14 +829,8 @@ class TrackSpecificationReader:
meta_data = self._r(challenge_spec, "meta", error_ctx=name, mandatory=False)
# if we only have one challenge it is treated as default challenge, no matter what the user has specified
default = number_of_challenges == 1 or self._r(challenge_spec, "default", error_ctx=name, mandatory=False)
- # TODO #381: Remove this setting
- index_settings = self._r(challenge_spec, "index-settings", error_ctx=name, mandatory=False)
cluster_settings = self._r(challenge_spec, "cluster-settings", error_ctx=name, mandatory=False)
- if index_settings and self.name not in DEFAULT_TRACKS:
- console.warn("Challenge [%s] in track [%s] defines the [index-settings] property which will be removed soon. For details "
- "please see the migration guide in the docs." % (name, self.name))
-
if default and default_challenge is not None:
self._error("Both '%s' and '%s' are defined as default challenges. Please define only one of them as default."
% (default_challenge.name, name))
@@ -867,7 +861,6 @@ class TrackSpecificationReader:
meta_data=meta_data,
description=description,
user_info=user_info,
- index_settings=index_settings,
cluster_settings=cluster_settings,
default=default,
schedule=schedule)
diff --git a/esrally/track/track.py b/esrally/track/track.py
index 2bd67231..1b9dde50 100644
--- a/esrally/track/track.py
+++ b/esrally/track/track.py
@@ -349,7 +349,6 @@ class Challenge:
name,
description=None,
user_info=None,
- index_settings=None,
cluster_settings=None,
default=False,
meta_data=None,
@@ -358,7 +357,6 @@ class Challenge:
self.meta_data = meta_data if meta_data else {}
self.description = description
self.user_info = user_info
- self.index_settings = index_settings if index_settings else {}
self.cluster_settings = cluster_settings if cluster_settings else {}
self.default = default
self.schedule = schedule if schedule else []
@@ -379,13 +377,13 @@ class Challenge:
return ", ".join(r)
def __hash__(self):
- return hash(self.name) ^ hash(self.description) ^ hash(self.index_settings) ^ hash(self.cluster_settings) ^ hash(self.default) ^ \
+ return hash(self.name) ^ hash(self.description) ^ hash(self.cluster_settings) ^ hash(self.default) ^ \
hash(self.meta_data) ^ hash(self.schedule)
def __eq__(self, othr):
return (isinstance(othr, type(self)) and
- (self.name, self.description, self.index_settings, self.cluster_settings, self.default, self.meta_data, self.schedule) ==
- (othr.name, othr.description, othr.index_settings, othr.cluster_settings, othr.default, othr.meta_data, othr.schedule))
+ (self.name, self.description, self.cluster_settings, self.default, self.meta_data, self.schedule) ==
+ (othr.name, othr.description, othr.cluster_settings, othr.default, othr.meta_data, othr.schedule))
@unique
diff --git a/run.sh b/run.sh
index 6fdd7311..5c952382 100755
--- a/run.sh
+++ b/run.sh
@@ -18,9 +18,9 @@ install_esrally_with_setuptools () {
fi
if [[ ${IN_VIRTUALENV} == 0 ]]; then
- python3 setup.py -q develop --user
+ python3 setup.py -q develop --user --upgrade
else
- python3 setup.py -q develop
+ python3 setup.py -q develop --upgrade
fi
}
| Remove 'index-settings' property
With Rally 0.9.0 we have introduced a new "create-index" operation which allows to define a "settings" property which makes the specific "index-settings" property obsolete. We'll hence remove it. | elastic/rally | diff --git a/tests/mechanic/telemetry_test.py b/tests/mechanic/telemetry_test.py
index 28a2aae5..58f6e1eb 100644
--- a/tests/mechanic/telemetry_test.py
+++ b/tests/mechanic/telemetry_test.py
@@ -764,6 +764,17 @@ class IndexStatsTests(TestCase):
"flush": {
"total_time_in_millis": 100
}
+ },
+ "total": {
+ "store": {
+ "size_in_bytes": 2113867510
+ },
+ "translog": {
+ "operations": 6840000,
+ "size_in_bytes": 2647984713,
+ "uncommitted_operations": 0,
+ "uncommitted_size_in_bytes": 430
+ }
}
}
})
@@ -784,7 +795,8 @@ class IndexStatsTests(TestCase):
mock.call("segments_stored_fields_memory_in_bytes", 1024, "byte"),
mock.call("segments_terms_memory_in_bytes", 256, "byte"),
# we don't have norms, so nothing should have been called
- mock.call("segments_points_memory_in_bytes", 512, "byte"),
+ mock.call("store_size_in_bytes", 2113867510, "byte"),
+ mock.call("translog_size_in_bytes", 2647984713, "byte"),
], any_order=True)
@mock.patch("esrally.metrics.EsMetricsStore.put_value_cluster_level")
diff --git a/tests/metrics_test.py b/tests/metrics_test.py
index 1c676b4a..ea1224ca 100644
--- a/tests/metrics_test.py
+++ b/tests/metrics_test.py
@@ -668,7 +668,7 @@ class EsResultsStoreTests(TestCase):
t = track.Track(name="unittest-track",
indices=[track.Index(name="tests", types=["test-type"])],
- challenges=[track.Challenge(name="index", default=True, index_settings=None, schedule=schedule)])
+ challenges=[track.Challenge(name="index", default=True, schedule=schedule)])
c = cluster.Cluster([], [], None)
c.distribution_version = "5.0.0"
diff --git a/tests/track/loader_test.py b/tests/track/loader_test.py
index 85f8bb31..7afea9a7 100644
--- a/tests/track/loader_test.py
+++ b/tests/track/loader_test.py
@@ -691,7 +691,6 @@ class TrackPostProcessingTests(TestCase):
{
"name": "default-challenge",
"description": "Default challenge",
- "index-settings": {},
"schedule": [
{
"clients": 8,
@@ -764,7 +763,6 @@ class TrackPostProcessingTests(TestCase):
{
"name": "default-challenge",
"description": "Default challenge",
- "index-settings": {},
"schedule": [
{
"clients": 8,
@@ -1024,7 +1022,6 @@ class TrackSpecificationReaderTests(TestCase):
"challenges": [
{
"name": "default-challenge",
- "index-settings": {},
"schedule": [
{
"clients": 8,
@@ -1148,7 +1145,6 @@ class TrackSpecificationReaderTests(TestCase):
"challenges": [
{
"name": "default-challenge",
- "index-settings": {},
"schedule": [
{
"clients": 8,
@@ -1327,9 +1323,6 @@ class TrackSpecificationReaderTests(TestCase):
"mixed": True,
"max-clients": 8
},
- "index-settings": {
- "index.number_of_replicas": 2
- },
"schedule": [
{
"clients": 8,
@@ -1414,8 +1407,6 @@ class TrackSpecificationReaderTests(TestCase):
self.assertEqual(1, len(resulting_track.challenges))
self.assertEqual("default-challenge", resulting_track.challenges[0].name)
self.assertEqual("Default challenge", resulting_track.challenges[0].description)
- self.assertEqual(1, len(resulting_track.challenges[0].index_settings))
- self.assertEqual(2, resulting_track.challenges[0].index_settings["index.number_of_replicas"])
self.assertEqual({"mixed": True, "max-clients": 8}, resulting_track.challenges[0].meta_data)
self.assertEqual({"append": True}, resulting_track.challenges[0].schedule[0].operation.meta_data)
self.assertEqual({"operation-index": 0}, resulting_track.challenges[0].schedule[0].meta_data)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 11
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"tox",
"pytest",
"pytest-benchmark"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
distlib==0.3.9
elasticsearch==6.0.0
-e git+https://github.com/elastic/rally.git@f245f685dcd625ac4d7f0fa151a885a4366f85a6#egg=esrally
filelock==3.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==2.9.5
jsonschema==2.5.1
MarkupSafe==2.0.1
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==5.4.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
py-cpuinfo==3.2.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-benchmark==3.4.1
six==1.17.0
tabulate==0.8.1
thespian==3.9.2
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tox==3.28.0
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.22
virtualenv==20.17.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: rally
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- distlib==0.3.9
- elasticsearch==6.0.0
- filelock==3.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- jinja2==2.9.5
- jsonschema==2.5.1
- markupsafe==2.0.1
- platformdirs==2.4.0
- psutil==5.4.0
- py-cpuinfo==3.2.0
- pytest-benchmark==3.4.1
- six==1.17.0
- tabulate==0.8.1
- thespian==3.9.2
- tox==3.28.0
- urllib3==1.22
- virtualenv==20.17.1
prefix: /opt/conda/envs/rally
| [
"tests/mechanic/telemetry_test.py::IndexStatsTests::test_stores_available_index_stats"
]
| []
| [
"tests/mechanic/telemetry_test.py::TelemetryTests::test_merges_options_set_by_different_devices",
"tests/mechanic/telemetry_test.py::StartupTimeTests::test_store_calculated_metrics",
"tests/mechanic/telemetry_test.py::MergePartsDeviceTests::test_store_calculated_metrics",
"tests/mechanic/telemetry_test.py::MergePartsDeviceTests::test_store_nothing_if_no_metrics_present",
"tests/mechanic/telemetry_test.py::JfrTests::test_sets_options_for_java_9_or_above_custom_recording_template",
"tests/mechanic/telemetry_test.py::JfrTests::test_sets_options_for_java_9_or_above_default_recording_template",
"tests/mechanic/telemetry_test.py::JfrTests::test_sets_options_for_pre_java_9_custom_recording_template",
"tests/mechanic/telemetry_test.py::JfrTests::test_sets_options_for_pre_java_9_default_recording_template",
"tests/mechanic/telemetry_test.py::GcTests::test_sets_options_for_java_9_or_above",
"tests/mechanic/telemetry_test.py::GcTests::test_sets_options_for_pre_java_9",
"tests/mechanic/telemetry_test.py::ClusterEnvironmentInfoTests::test_stores_cluster_level_metrics_on_attach",
"tests/mechanic/telemetry_test.py::NodeEnvironmentInfoTests::test_stores_node_level_metrics_on_attach",
"tests/mechanic/telemetry_test.py::ExternalEnvironmentInfoTests::test_fallback_when_host_not_available",
"tests/mechanic/telemetry_test.py::ExternalEnvironmentInfoTests::test_stores_all_node_metrics_on_attach",
"tests/mechanic/telemetry_test.py::ClusterMetaDataInfoTests::test_enriches_cluster_nodes_for_elasticsearch_1_x",
"tests/mechanic/telemetry_test.py::ClusterMetaDataInfoTests::test_enriches_cluster_nodes_for_elasticsearch_after_1_x",
"tests/mechanic/telemetry_test.py::NodeStatsTests::test_stores_only_diff_of_gc_times",
"tests/mechanic/telemetry_test.py::IndexStatsTests::test_index_stats_are_per_lap",
"tests/mechanic/telemetry_test.py::IndexSizeTests::test_stores_index_size_for_data_paths",
"tests/mechanic/telemetry_test.py::IndexSizeTests::test_stores_nothing_if_no_data_path",
"tests/metrics_test.py::ExtractUserTagsTests::test_extracts_proper_user_tags",
"tests/metrics_test.py::ExtractUserTagsTests::test_missing_comma_raises_error",
"tests/metrics_test.py::ExtractUserTagsTests::test_missing_value_raises_error",
"tests/metrics_test.py::ExtractUserTagsTests::test_no_tags_returns_empty_dict",
"tests/metrics_test.py::EsClientTests::test_fails_after_too_many_timeouts",
"tests/metrics_test.py::EsClientTests::test_raises_rally_error_on_unknown_problems",
"tests/metrics_test.py::EsClientTests::test_raises_sytem_setup_error_on_authentication_problems",
"tests/metrics_test.py::EsClientTests::test_raises_sytem_setup_error_on_authorization_problems",
"tests/metrics_test.py::EsClientTests::test_raises_sytem_setup_error_on_connection_problems",
"tests/metrics_test.py::EsClientTests::test_retries_on_timeouts",
"tests/metrics_test.py::EsMetricsTests::test_get_error_rate_additional_unknown_key",
"tests/metrics_test.py::EsMetricsTests::test_get_error_rate_explicit_one",
"tests/metrics_test.py::EsMetricsTests::test_get_error_rate_explicit_zero",
"tests/metrics_test.py::EsMetricsTests::test_get_error_rate_implicit_one",
"tests/metrics_test.py::EsMetricsTests::test_get_error_rate_implicit_zero",
"tests/metrics_test.py::EsMetricsTests::test_get_error_rate_mixed",
"tests/metrics_test.py::EsMetricsTests::test_get_median",
"tests/metrics_test.py::EsMetricsTests::test_get_value",
"tests/metrics_test.py::EsMetricsTests::test_put_value_with_explicit_timestamps",
"tests/metrics_test.py::EsMetricsTests::test_put_value_with_meta_info",
"tests/metrics_test.py::EsMetricsTests::test_put_value_without_meta_info",
"tests/metrics_test.py::EsRaceStoreTests::test_store_race",
"tests/metrics_test.py::EsResultsStoreTests::test_store_results",
"tests/metrics_test.py::InMemoryMetricsStoreTests::test_externalize_and_bulk_add",
"tests/metrics_test.py::InMemoryMetricsStoreTests::test_get_error_rate_by_sample_type",
"tests/metrics_test.py::InMemoryMetricsStoreTests::test_get_error_rate_mixed",
"tests/metrics_test.py::InMemoryMetricsStoreTests::test_get_error_rate_zero_without_samples",
"tests/metrics_test.py::InMemoryMetricsStoreTests::test_get_median",
"tests/metrics_test.py::InMemoryMetricsStoreTests::test_get_percentile",
"tests/metrics_test.py::InMemoryMetricsStoreTests::test_get_value",
"tests/metrics_test.py::InMemoryMetricsStoreTests::test_meta_data_per_document",
"tests/metrics_test.py::FileRaceStoreTests::test_store_race",
"tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_directory",
"tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_directory_without_track",
"tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_file",
"tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_file_but_not_json",
"tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_named_pipe",
"tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_non_existing_path",
"tests/track/loader_test.py::GitRepositoryTests::test_track_from_existing_repo",
"tests/track/loader_test.py::TrackPreparationTests::test_decompresses_if_archive_available",
"tests/track/loader_test.py::TrackPreparationTests::test_does_nothing_if_document_file_available",
"tests/track/loader_test.py::TrackPreparationTests::test_download_document_archive_if_no_file_available",
"tests/track/loader_test.py::TrackPreparationTests::test_download_document_file_if_no_file_available",
"tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_decompresses_compressed_docs",
"tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_does_nothing_if_no_document_files",
"tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_error_compressed_docs_wrong_size",
"tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_if_document_file_available",
"tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_uncompressed_docs_wrong_size",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_if_no_url_provided_and_file_missing",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_if_no_url_provided_and_wrong_file_size",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_if_offline",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_no_test_mode_file",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_on_connection_problems",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_error_if_compressed_does_not_contain_expected_document_file",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_error_on_wrong_uncompressed_file_size",
"tests/track/loader_test.py::TemplateRenderTests::test_render_simple_template",
"tests/track/loader_test.py::TemplateRenderTests::test_render_template_with_external_variables",
"tests/track/loader_test.py::TemplateRenderTests::test_render_template_with_globbing",
"tests/track/loader_test.py::TemplateRenderTests::test_render_template_with_variables",
"tests/track/loader_test.py::TrackPostProcessingTests::test_post_processes_track_spec",
"tests/track/loader_test.py::TrackPathTests::test_sets_absolute_path",
"tests/track/loader_test.py::TrackFilterTests::test_create_filters_from_empty_included_tasks",
"tests/track/loader_test.py::TrackFilterTests::test_create_filters_from_mixed_included_tasks",
"tests/track/loader_test.py::TrackFilterTests::test_filters_tasks",
"tests/track/loader_test.py::TrackFilterTests::test_rejects_invalid_syntax",
"tests/track/loader_test.py::TrackFilterTests::test_rejects_unknown_filter_type",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_at_least_one_default_challenge",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_can_read_track_info",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_description_is_optional",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_document_count_mandatory_if_file_present",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_exactly_one_default_challenge",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_inline_operations",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_not_more_than_one_default_challenge_possible",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_completed_by_set",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_completed_by_set_multiple_tasks_match",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_completed_by_set_no_task_matches",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_default_clients_does_not_propagate",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_default_values",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_challenge_and_challenges_are_defined",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_duplicate_explicit_task_names",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_duplicate_implicit_task_names",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_missing_challenge_or_challenges",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_unique_task_names",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_valid_track_specification",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_valid_track_specification_with_index_template",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_with_mixed_warmup_iterations_and_measurement",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_with_mixed_warmup_time_period_and_iterations",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_selects_sole_challenge_implicitly_as_default",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_supports_target_interval",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_supports_target_throughput",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_unique_challenge_names"
]
| []
| Apache License 2.0 | 2,369 | [
"docs/command_line_reference.rst",
"docs/metrics.rst",
"esrally/client.py",
"esrally/track/track.py",
"esrally/resources/track-schema.json",
"docs/track.rst",
"run.sh",
"esrally/reporter.py",
"esrally/mechanic/telemetry.py",
"docs/summary_report.rst",
"esrally/track/loader.py"
]
| [
"docs/command_line_reference.rst",
"docs/metrics.rst",
"esrally/client.py",
"esrally/track/track.py",
"esrally/resources/track-schema.json",
"docs/track.rst",
"run.sh",
"esrally/reporter.py",
"esrally/mechanic/telemetry.py",
"docs/summary_report.rst",
"esrally/track/loader.py"
]
|
elastic__rally-462 | f245f685dcd625ac4d7f0fa151a885a4366f85a6 | 2018-04-06 13:36:46 | a5408e0d0d07b271b509df8057a7c73303604c10 | diff --git a/docs/command_line_reference.rst b/docs/command_line_reference.rst
index dd1855d5..afecc539 100644
--- a/docs/command_line_reference.rst
+++ b/docs/command_line_reference.rst
@@ -82,16 +82,15 @@ Consider the following track snippet showing a single challenge::
{
"name": "index-only",
- "index-settings": {
- "index.number_of_replicas": {{ replica_count|default(0) }},
- "index.number_of_shards": {{ shard_count|default(5) }},
- },
"schedule": [
- {
- "operation": "bulk-index",
- "warmup-time-period": 120,
- "clients": 8
- }
+ {
+ "operation": {
+ "operation-type": "bulk",
+ "bulk-size": {{ bulk_size|default(5000) }}
+ },
+ "warmup-time-period": 120,
+ "clients": {{ clients|default(8) }}
+ }
]
}
@@ -99,28 +98,28 @@ Rally tracks can use the Jinja templating language and the construct ``{{ some_v
We can see that it defines two variables:
-* ``replica_count`` with a default value of 0
-* ``shard_count`` with a default value of 5
+* ``bulk_size`` with a default value of 5000
+* ``clients`` with a default value of 8
When we run this track, we can override these defaults:
-* ``--track-params="replica_count:1,shard_count:3"`` will set the number of replicas to 1 and the number of shards to 3.
-* ``--track-params="replica_count:1"`` will just set the number of replicas to 1 and just keep the default value of 5 shards.
+* ``--track-params="bulk_size:2000,clients:16"`` will set the bulk size to 2000 and the number of clients for bulk indexing to 16.
+* ``--track-params="bulk_size:8000"`` will just set the bulk size to 8000 and keep the default value of 8 clients.
* ``--track-params="params.json"`` will read the track parameters from a JSON file (defined below)
Example JSON file::
{
- "replica_count": 1,
- "shard_count": 3
+ "bulk_size": 2000,
+ "clients": 16
}
All track parameters are recorded for each metrics record in the metrics store. Also, when you run ``esrally list races``, it will show all track parameters::
- Race Timestamp Track Track Parameters Challenge Car User Tag
- ---------------- ------- ------------------------------ ------------------- -------- ---------
- 20160518T122341Z pmc replica_count=1 append-no-conflicts defaults
- 20160518T112341Z pmc replica_count=1,shard_count=3 append-no-conflicts defaults
+ Race Timestamp Track Track Parameters Challenge Car User Tag
+ ---------------- ------- ------------------------- ------------------- -------- ---------
+ 20160518T122341Z pmc bulk_size=8000 append-no-conflicts defaults
+ 20160518T112341Z pmc bulk_size=2000,clients=16 append-no-conflicts defaults
Note that the default values are not recorded or shown (Rally does not know about them).
diff --git a/docs/metrics.rst b/docs/metrics.rst
index 9f952e04..d6ba4a61 100644
--- a/docs/metrics.rst
+++ b/docs/metrics.rst
@@ -149,4 +149,6 @@ Rally stores the following metrics:
* ``indexing_throttle_time``: Total time that indexing has been throttled as reported by the indices stats API. Note that this is not Wall clock time.
* ``refresh_total_time``: Total time used for index refresh as reported by the indices stats API. Note that this is not Wall clock time.
* ``flush_total_time``: Total time used for index flush as reported by the indices stats API. Note that this is not Wall clock time.
-* ``final_index_size_bytes``: Final resulting index size after the benchmark.
+* ``final_index_size_bytes``: Final resulting index size on the file system after all nodes have been shutdown at the end of the benchmark. It includes all files in the nodes' data directories (actual index files and translog).
+* ``store_size_in_bytes``: The size in bytes of the index (excluding the translog) as reported by the indices stats API.
+* ``translog_size_in_bytes``: The size in bytes of the translog as reported by the indices stats API.
diff --git a/docs/summary_report.rst b/docs/summary_report.rst
index e1edb140..46e9c16e 100644
--- a/docs/summary_report.rst
+++ b/docs/summary_report.rst
@@ -81,9 +81,21 @@ Total Old Gen GC
Index size
----------
-* **Definition**: Final resulting index size after the benchmark.
+* **Definition**: Final resulting index size on the file system after all nodes have been shutdown at the end of the benchmark. It includes all files in the nodes' data directories (actual index files and translog).
* **Corresponding metrics key**: ``final_index_size_bytes``
+Store size
+----------
+
+* **Definition**: The size in bytes of the index (excluding the translog) as reported by the indices stats API.
+* **Corresponding metrics key**: ``store_size_in_bytes``
+
+Translog size
+-------------
+
+* **Definition**: The size in bytes of the translog as reported by the indices stats API.
+* **Corresponding metrics key**: ``translog_size_in_bytes``
+
Totally written
---------------
diff --git a/docs/track.rst b/docs/track.rst
index 7af77b92..0bea3c76 100644
--- a/docs/track.rst
+++ b/docs/track.rst
@@ -665,7 +665,6 @@ Each challenge consists of the following properties:
* ``name`` (mandatory): A descriptive name of the challenge. Should not contain spaces in order to simplify handling on the command line for users.
* ``description`` (optional): A human readable description of the challenge.
* ``default`` (optional): If true, Rally selects this challenge by default if the user did not specify a challenge on the command line. If your track only defines one challenge, it is implicitly selected as default, otherwise you need define ``"default": true`` on exactly one challenge.
-* ``index-settings`` (optional): Defines the index settings of the benchmark candidate when an index is created.
* ``schedule`` (mandatory): Defines the concrete execution order of operations. It is described in more detail below.
.. note::
diff --git a/esrally/client.py b/esrally/client.py
index f81d432b..8da59ff8 100644
--- a/esrally/client.py
+++ b/esrally/client.py
@@ -12,22 +12,47 @@ class EsClientFactory:
Abstracts how the Elasticsearch client is created. Intended for testing.
"""
def __init__(self, hosts, client_options):
- logger.info("Creating ES client connected to %s with options [%s]" % (hosts, client_options))
+ masked_client_options = dict(client_options)
+ if "basic_auth_password" in masked_client_options:
+ masked_client_options["basic_auth_password"] = "*****"
+ if "http_auth" in masked_client_options:
+ masked_client_options["http_auth"] = (client_options["http_auth"][0], "*****")
+ logger.info("Creating ES client connected to %s with options [%s]", hosts, masked_client_options)
self.hosts = hosts
self.client_options = client_options
+ self.ssl_context = None
+
+ # we're using an SSL context now and it is not allowed to have use_ssl present in client options anymore
+ if client_options.pop("use_ssl", False):
+ import ssl
+ from elasticsearch.connection import create_ssl_context
+ logger.info("SSL support: on")
+ client_options["scheme"] = "https"
+
+ self.ssl_context = create_ssl_context(cafile=client_options.pop("ca_certs", certifi.where()))
+
+ if not client_options.pop("verify_certs", True):
+ logger.info("SSL certificate verification: off")
+ self.ssl_context.check_hostname = False
+ self.ssl_context.verify_mode = ssl.CERT_NONE
+
+ logger.warning("User has enabled SSL but disabled certificate verification. This is dangerous but may be ok for a "
+ "benchmark. Disabling urllib warnings now to avoid a logging storm. "
+ "See https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings for details.")
+ # disable: "InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly \
+ # advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings"
+ urllib3.disable_warnings()
+ else:
+ logger.info("SSL certificate verification: on")
+ else:
+ logger.info("SSL support: off")
+ client_options["scheme"] = "http"
- if self._is_set(client_options, "use_ssl") and self._is_set(client_options, "verify_certs") and "ca_certs" not in client_options:
- self.client_options["ca_certs"] = certifi.where()
- elif self._is_set(client_options, "use_ssl") and not self._is_set(client_options, "verify_certs"):
- logger.warning("User has enabled SSL but disabled certificate verification. This is dangerous but may be ok for a benchmark. "
- "Disabling urllib warnings now to avoid a logging storm. "
- "See https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings for details.")
- # disable: "InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly \
- # advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings"
- urllib3.disable_warnings()
if self._is_set(client_options, "basic_auth_user") and self._is_set(client_options, "basic_auth_password"):
- # Maybe we should remove these keys from the dict?
- self.client_options["http_auth"] = (client_options["basic_auth_user"], client_options["basic_auth_password"])
+ logger.info("HTTP basic authentication: on")
+ self.client_options["http_auth"] = (client_options.pop("basic_auth_user"), client_options.pop("basic_auth_password"))
+ else:
+ logger.info("HTTP basic authentication: off")
def _is_set(self, client_opts, k):
try:
@@ -55,8 +80,12 @@ class EsClientFactory:
def __init__(self, compressed=False, **kwargs):
super(ConfigurableHttpConnection, self).__init__(**kwargs)
if compressed:
+ logger.info("HTTP compression: on")
self.headers.update(urllib3.make_headers(accept_encoding=True))
self.headers.update({"Content-Encoding": "gzip"})
+ else:
+ logger.info("HTTP compression: off")
self.pool = PoolWrap(self.pool, **kwargs)
- return elasticsearch.Elasticsearch(hosts=self.hosts, connection_class=ConfigurableHttpConnection, **self.client_options)
+ return elasticsearch.Elasticsearch(hosts=self.hosts, connection_class=ConfigurableHttpConnection,
+ ssl_context=self.ssl_context, **self.client_options)
diff --git a/esrally/mechanic/telemetry.py b/esrally/mechanic/telemetry.py
index 6d8ae9c0..27bcdd82 100644
--- a/esrally/mechanic/telemetry.py
+++ b/esrally/mechanic/telemetry.py
@@ -708,7 +708,7 @@ class IndexStats(InternalTelemetryDevice):
# the pipeline "benchmark-only" where we don't have control over the cluster and the user might not have restarted
# the cluster so we can at least tell them.
if self.first_time:
- index_times = self.index_times(self.primaries_index_stats())
+ index_times = self.index_times(self.index_stats()["primaries"])
for k, v in index_times.items():
if v > 0:
console.warn("%s is %d ms indicating that the cluster is not in a defined clean state. Recorded index time "
@@ -718,8 +718,9 @@ class IndexStats(InternalTelemetryDevice):
def on_benchmark_stop(self):
import json
logger.info("Gathering indices stats for all primaries on benchmark stop.")
- p = self.primaries_index_stats()
- logger.info("Returned indices stats:\n%s" % json.dumps(p, indent=2))
+ index_stats = self.index_stats()
+ logger.info("Returned indices stats:\n%s" % json.dumps(index_stats, indent=2))
+ p = index_stats["primaries"]
# actually this is add_count
self.add_metrics(self.extract_value(p, ["segments", "count"]), "segments_count")
self.add_metrics(self.extract_value(p, ["segments", "memory_in_bytes"]), "segments_memory_in_bytes", "byte")
@@ -733,12 +734,14 @@ class IndexStats(InternalTelemetryDevice):
self.add_metrics(self.extract_value(p, ["segments", "terms_memory_in_bytes"]), "segments_terms_memory_in_bytes", "byte")
self.add_metrics(self.extract_value(p, ["segments", "norms_memory_in_bytes"]), "segments_norms_memory_in_bytes", "byte")
self.add_metrics(self.extract_value(p, ["segments", "points_memory_in_bytes"]), "segments_points_memory_in_bytes", "byte")
+ self.add_metrics(self.extract_value(index_stats, ["total", "store", "size_in_bytes"]), "store_size_in_bytes", "byte")
+ self.add_metrics(self.extract_value(index_stats, ["total", "translog", "size_in_bytes"]), "translog_size_in_bytes", "byte")
- def primaries_index_stats(self):
+ def index_stats(self):
# noinspection PyBroadException
try:
stats = self.client.indices.stats(metric="_all", level="shards")
- return stats["_all"]["primaries"]
+ return stats["_all"]
except BaseException:
logger.exception("Could not retrieve index stats.")
return {}
diff --git a/esrally/metrics.py b/esrally/metrics.py
index 1e284dd7..44ce00c6 100644
--- a/esrally/metrics.py
+++ b/esrally/metrics.py
@@ -132,18 +132,24 @@ class EsClientFactory:
password = self._config.opts("reporting", "datastore.password")
verify = self._config.opts("reporting", "datastore.ssl.verification_mode", default_value="full", mandatory=False) != "none"
ca_path = self._config.opts("reporting", "datastore.ssl.certificate_authorities", default_value=None, mandatory=False)
- if ca_path is None and verify:
- ca_path = certifi.where()
+ from esrally import client
+
+ # Instead of duplicating code, we're just adapting the metrics store specific properties to match the regular client options.
+ client_options = {
+ "use_ssl": secure,
+ "verify_certs": verify,
+ "timeout": 120
+ }
+ if ca_path:
+ client_options["ca_certs"] = ca_path
if user and password:
- auth = (user, password)
- else:
- auth = None
- logger.info("Creating connection to metrics store at %s:%s" % (host, port))
- import elasticsearch
- self._client = elasticsearch.Elasticsearch(hosts=[{"host": host, "port": port}],
- use_ssl=secure, http_auth=auth, verify_certs=verify, ca_certs=ca_path,
- timeout=120, request_timeout=120)
+ client_options["basic_auth_user"] = user
+ client_options["basic_auth_password"] = password
+
+ logger.info("Creating connection to metrics store at %s:%s", host, port)
+ factory = client.EsClientFactory(hosts=[{"host": host, "port": port}], client_options=client_options)
+ self._client = factory.create()
def create(self):
return EsClient(self._client)
diff --git a/esrally/reporter.py b/esrally/reporter.py
index ee0b997a..c1bcfdca 100644
--- a/esrally/reporter.py
+++ b/esrally/reporter.py
@@ -168,9 +168,13 @@ class StatsCalculator:
result.memory_points = self.median("segments_points_memory_in_bytes")
result.memory_stored_fields = self.median("segments_stored_fields_memory_in_bytes")
- # This metric will only be written for the last iteration (as it can only be determined after the cluster has been shut down)
logger.debug("Gathering disk metrics.")
+ # This metric will only be written for the last iteration (as it can only be determined after the cluster has been shut down)
result.index_size = self.sum("final_index_size_bytes")
+ # we need to use the median here because these two are captured with the indices stats API and thus once per lap. If we'd
+ # sum up the values we'd get wrong results for benchmarks that ran for multiple laps.
+ result.store_size = self.median("store_size_in_bytes")
+ result.translog_size = self.median("translog_size_in_bytes")
result.bytes_written = self.sum("disk_io_write_bytes")
# convert to int, fraction counts are senseless
@@ -264,6 +268,8 @@ class Stats:
self.memory_stored_fields = self.v(d, "memory_stored_fields")
self.index_size = self.v(d, "index_size")
+ self.store_size = self.v(d, "store_size")
+ self.translog_size = self.v(d, "translog_size")
self.bytes_written = self.v(d, "bytes_written")
self.segment_count = self.v(d, "segment_count")
@@ -497,6 +503,8 @@ class SummaryReporter:
def report_disk_usage(self, stats):
return self.join(
+ self.line("Store size", "", stats.store_size, "GB", convert.bytes_to_gb),
+ self.line("Translog size", "", stats.translog_size, "GB", convert.bytes_to_gb),
self.line("Index size", "", stats.index_size, "GB", convert.bytes_to_gb),
self.line("Totally written", "", stats.bytes_written, "GB", convert.bytes_to_gb)
)
@@ -688,6 +696,10 @@ class ComparisonReporter:
def report_disk_usage(self, baseline_stats, contender_stats):
return self.join(
+ self.line("Store size", baseline_stats.store_size, contender_stats.store_size, "", "GB",
+ treat_increase_as_improvement=False, formatter=convert.bytes_to_gb),
+ self.line("Translog size", baseline_stats.translog_size, contender_stats.translog_size, "", "GB",
+ treat_increase_as_improvement=False, formatter=convert.bytes_to_gb),
self.line("Index size", baseline_stats.index_size, contender_stats.index_size, "", "GB",
treat_increase_as_improvement=False, formatter=convert.bytes_to_gb),
self.line("Totally written", baseline_stats.bytes_written, contender_stats.bytes_written, "", "GB",
diff --git a/esrally/resources/track-schema.json b/esrally/resources/track-schema.json
index f148778f..8e9c3829 100644
--- a/esrally/resources/track-schema.json
+++ b/esrally/resources/track-schema.json
@@ -29,10 +29,6 @@
"type": "object",
"description": "Defines the cluster settings of the benchmark candidate."
},
- "index-settings": {
- "type": "object",
- "description": "Defines the index settings of the benchmark candidate when an index is created."
- },
"schedule": {
"type": "array",
"minItems": 1,
diff --git a/esrally/track/loader.py b/esrally/track/loader.py
index 5adf6d71..d04c516d 100644
--- a/esrally/track/loader.py
+++ b/esrally/track/loader.py
@@ -829,14 +829,8 @@ class TrackSpecificationReader:
meta_data = self._r(challenge_spec, "meta", error_ctx=name, mandatory=False)
# if we only have one challenge it is treated as default challenge, no matter what the user has specified
default = number_of_challenges == 1 or self._r(challenge_spec, "default", error_ctx=name, mandatory=False)
- # TODO #381: Remove this setting
- index_settings = self._r(challenge_spec, "index-settings", error_ctx=name, mandatory=False)
cluster_settings = self._r(challenge_spec, "cluster-settings", error_ctx=name, mandatory=False)
- if index_settings and self.name not in DEFAULT_TRACKS:
- console.warn("Challenge [%s] in track [%s] defines the [index-settings] property which will be removed soon. For details "
- "please see the migration guide in the docs." % (name, self.name))
-
if default and default_challenge is not None:
self._error("Both '%s' and '%s' are defined as default challenges. Please define only one of them as default."
% (default_challenge.name, name))
@@ -867,7 +861,6 @@ class TrackSpecificationReader:
meta_data=meta_data,
description=description,
user_info=user_info,
- index_settings=index_settings,
cluster_settings=cluster_settings,
default=default,
schedule=schedule)
diff --git a/esrally/track/track.py b/esrally/track/track.py
index 2bd67231..1b9dde50 100644
--- a/esrally/track/track.py
+++ b/esrally/track/track.py
@@ -349,7 +349,6 @@ class Challenge:
name,
description=None,
user_info=None,
- index_settings=None,
cluster_settings=None,
default=False,
meta_data=None,
@@ -358,7 +357,6 @@ class Challenge:
self.meta_data = meta_data if meta_data else {}
self.description = description
self.user_info = user_info
- self.index_settings = index_settings if index_settings else {}
self.cluster_settings = cluster_settings if cluster_settings else {}
self.default = default
self.schedule = schedule if schedule else []
@@ -379,13 +377,13 @@ class Challenge:
return ", ".join(r)
def __hash__(self):
- return hash(self.name) ^ hash(self.description) ^ hash(self.index_settings) ^ hash(self.cluster_settings) ^ hash(self.default) ^ \
+ return hash(self.name) ^ hash(self.description) ^ hash(self.cluster_settings) ^ hash(self.default) ^ \
hash(self.meta_data) ^ hash(self.schedule)
def __eq__(self, othr):
return (isinstance(othr, type(self)) and
- (self.name, self.description, self.index_settings, self.cluster_settings, self.default, self.meta_data, self.schedule) ==
- (othr.name, othr.description, othr.index_settings, othr.cluster_settings, othr.default, othr.meta_data, othr.schedule))
+ (self.name, self.description, self.cluster_settings, self.default, self.meta_data, self.schedule) ==
+ (othr.name, othr.description, othr.cluster_settings, othr.default, othr.meta_data, othr.schedule))
@unique
diff --git a/run.sh b/run.sh
index 6fdd7311..5c952382 100755
--- a/run.sh
+++ b/run.sh
@@ -18,9 +18,9 @@ install_esrally_with_setuptools () {
fi
if [[ ${IN_VIRTUALENV} == 0 ]]; then
- python3 setup.py -q develop --user
+ python3 setup.py -q develop --user --upgrade
else
- python3 setup.py -q develop
+ python3 setup.py -q develop --upgrade
fi
}
diff --git a/setup.py b/setup.py
index 9ad27054..0accae0e 100644
--- a/setup.py
+++ b/setup.py
@@ -27,7 +27,7 @@ long_description = str_from_file("README.rst")
#
################################################################################################
install_requires = [
- "elasticsearch==6.0.0",
+ "elasticsearch==6.2.0",
"psutil==5.4.0",
"py-cpuinfo==3.2.0",
"tabulate==0.8.1",
| Upgrade Elasticsearch client to 6.2.0
The Elasticsearch 6.1.1 client adds the ability to set custom HTTP headers which is necessary for some API calls (e.g. `msearch`, see also the [respective Discuss thread](https://discuss.elastic.co/t/msearch-capability/110702/11)).
When we upgrade, we should also reimplement SSL handling as described in https://github.com/elastic/elasticsearch-py/issues/712#issuecomment-357750570. | elastic/rally | diff --git a/tests/mechanic/telemetry_test.py b/tests/mechanic/telemetry_test.py
index 28a2aae5..58f6e1eb 100644
--- a/tests/mechanic/telemetry_test.py
+++ b/tests/mechanic/telemetry_test.py
@@ -764,6 +764,17 @@ class IndexStatsTests(TestCase):
"flush": {
"total_time_in_millis": 100
}
+ },
+ "total": {
+ "store": {
+ "size_in_bytes": 2113867510
+ },
+ "translog": {
+ "operations": 6840000,
+ "size_in_bytes": 2647984713,
+ "uncommitted_operations": 0,
+ "uncommitted_size_in_bytes": 430
+ }
}
}
})
@@ -784,7 +795,8 @@ class IndexStatsTests(TestCase):
mock.call("segments_stored_fields_memory_in_bytes", 1024, "byte"),
mock.call("segments_terms_memory_in_bytes", 256, "byte"),
# we don't have norms, so nothing should have been called
- mock.call("segments_points_memory_in_bytes", 512, "byte"),
+ mock.call("store_size_in_bytes", 2113867510, "byte"),
+ mock.call("translog_size_in_bytes", 2647984713, "byte"),
], any_order=True)
@mock.patch("esrally.metrics.EsMetricsStore.put_value_cluster_level")
diff --git a/tests/metrics_test.py b/tests/metrics_test.py
index 1c676b4a..ea1224ca 100644
--- a/tests/metrics_test.py
+++ b/tests/metrics_test.py
@@ -668,7 +668,7 @@ class EsResultsStoreTests(TestCase):
t = track.Track(name="unittest-track",
indices=[track.Index(name="tests", types=["test-type"])],
- challenges=[track.Challenge(name="index", default=True, index_settings=None, schedule=schedule)])
+ challenges=[track.Challenge(name="index", default=True, schedule=schedule)])
c = cluster.Cluster([], [], None)
c.distribution_version = "5.0.0"
diff --git a/tests/track/loader_test.py b/tests/track/loader_test.py
index 85f8bb31..7afea9a7 100644
--- a/tests/track/loader_test.py
+++ b/tests/track/loader_test.py
@@ -691,7 +691,6 @@ class TrackPostProcessingTests(TestCase):
{
"name": "default-challenge",
"description": "Default challenge",
- "index-settings": {},
"schedule": [
{
"clients": 8,
@@ -764,7 +763,6 @@ class TrackPostProcessingTests(TestCase):
{
"name": "default-challenge",
"description": "Default challenge",
- "index-settings": {},
"schedule": [
{
"clients": 8,
@@ -1024,7 +1022,6 @@ class TrackSpecificationReaderTests(TestCase):
"challenges": [
{
"name": "default-challenge",
- "index-settings": {},
"schedule": [
{
"clients": 8,
@@ -1148,7 +1145,6 @@ class TrackSpecificationReaderTests(TestCase):
"challenges": [
{
"name": "default-challenge",
- "index-settings": {},
"schedule": [
{
"clients": 8,
@@ -1327,9 +1323,6 @@ class TrackSpecificationReaderTests(TestCase):
"mixed": True,
"max-clients": 8
},
- "index-settings": {
- "index.number_of_replicas": 2
- },
"schedule": [
{
"clients": 8,
@@ -1414,8 +1407,6 @@ class TrackSpecificationReaderTests(TestCase):
self.assertEqual(1, len(resulting_track.challenges))
self.assertEqual("default-challenge", resulting_track.challenges[0].name)
self.assertEqual("Default challenge", resulting_track.challenges[0].description)
- self.assertEqual(1, len(resulting_track.challenges[0].index_settings))
- self.assertEqual(2, resulting_track.challenges[0].index_settings["index.number_of_replicas"])
self.assertEqual({"mixed": True, "max-clients": 8}, resulting_track.challenges[0].meta_data)
self.assertEqual({"append": True}, resulting_track.challenges[0].schedule[0].operation.meta_data)
self.assertEqual({"operation-index": 0}, resulting_track.challenges[0].schedule[0].meta_data)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 13
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"tox",
"pytest",
"pytest-benchmark"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
distlib==0.3.9
elasticsearch==6.0.0
-e git+https://github.com/elastic/rally.git@f245f685dcd625ac4d7f0fa151a885a4366f85a6#egg=esrally
filelock==3.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==2.9.5
jsonschema==2.5.1
MarkupSafe==2.0.1
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==5.4.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
py-cpuinfo==3.2.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-benchmark==3.4.1
six==1.17.0
tabulate==0.8.1
thespian==3.9.2
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tox==3.28.0
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.22
virtualenv==20.17.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: rally
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- distlib==0.3.9
- elasticsearch==6.0.0
- filelock==3.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- jinja2==2.9.5
- jsonschema==2.5.1
- markupsafe==2.0.1
- platformdirs==2.4.0
- psutil==5.4.0
- py-cpuinfo==3.2.0
- pytest-benchmark==3.4.1
- six==1.17.0
- tabulate==0.8.1
- thespian==3.9.2
- tox==3.28.0
- urllib3==1.22
- virtualenv==20.17.1
prefix: /opt/conda/envs/rally
| [
"tests/mechanic/telemetry_test.py::IndexStatsTests::test_stores_available_index_stats"
]
| []
| [
"tests/mechanic/telemetry_test.py::TelemetryTests::test_merges_options_set_by_different_devices",
"tests/mechanic/telemetry_test.py::StartupTimeTests::test_store_calculated_metrics",
"tests/mechanic/telemetry_test.py::MergePartsDeviceTests::test_store_calculated_metrics",
"tests/mechanic/telemetry_test.py::MergePartsDeviceTests::test_store_nothing_if_no_metrics_present",
"tests/mechanic/telemetry_test.py::JfrTests::test_sets_options_for_java_9_or_above_custom_recording_template",
"tests/mechanic/telemetry_test.py::JfrTests::test_sets_options_for_java_9_or_above_default_recording_template",
"tests/mechanic/telemetry_test.py::JfrTests::test_sets_options_for_pre_java_9_custom_recording_template",
"tests/mechanic/telemetry_test.py::JfrTests::test_sets_options_for_pre_java_9_default_recording_template",
"tests/mechanic/telemetry_test.py::GcTests::test_sets_options_for_java_9_or_above",
"tests/mechanic/telemetry_test.py::GcTests::test_sets_options_for_pre_java_9",
"tests/mechanic/telemetry_test.py::ClusterEnvironmentInfoTests::test_stores_cluster_level_metrics_on_attach",
"tests/mechanic/telemetry_test.py::NodeEnvironmentInfoTests::test_stores_node_level_metrics_on_attach",
"tests/mechanic/telemetry_test.py::ExternalEnvironmentInfoTests::test_fallback_when_host_not_available",
"tests/mechanic/telemetry_test.py::ExternalEnvironmentInfoTests::test_stores_all_node_metrics_on_attach",
"tests/mechanic/telemetry_test.py::ClusterMetaDataInfoTests::test_enriches_cluster_nodes_for_elasticsearch_1_x",
"tests/mechanic/telemetry_test.py::ClusterMetaDataInfoTests::test_enriches_cluster_nodes_for_elasticsearch_after_1_x",
"tests/mechanic/telemetry_test.py::NodeStatsTests::test_stores_only_diff_of_gc_times",
"tests/mechanic/telemetry_test.py::IndexStatsTests::test_index_stats_are_per_lap",
"tests/mechanic/telemetry_test.py::IndexSizeTests::test_stores_index_size_for_data_paths",
"tests/mechanic/telemetry_test.py::IndexSizeTests::test_stores_nothing_if_no_data_path",
"tests/metrics_test.py::ExtractUserTagsTests::test_extracts_proper_user_tags",
"tests/metrics_test.py::ExtractUserTagsTests::test_missing_comma_raises_error",
"tests/metrics_test.py::ExtractUserTagsTests::test_missing_value_raises_error",
"tests/metrics_test.py::ExtractUserTagsTests::test_no_tags_returns_empty_dict",
"tests/metrics_test.py::EsClientTests::test_fails_after_too_many_timeouts",
"tests/metrics_test.py::EsClientTests::test_raises_rally_error_on_unknown_problems",
"tests/metrics_test.py::EsClientTests::test_raises_sytem_setup_error_on_authentication_problems",
"tests/metrics_test.py::EsClientTests::test_raises_sytem_setup_error_on_authorization_problems",
"tests/metrics_test.py::EsClientTests::test_raises_sytem_setup_error_on_connection_problems",
"tests/metrics_test.py::EsClientTests::test_retries_on_timeouts",
"tests/metrics_test.py::EsMetricsTests::test_get_error_rate_additional_unknown_key",
"tests/metrics_test.py::EsMetricsTests::test_get_error_rate_explicit_one",
"tests/metrics_test.py::EsMetricsTests::test_get_error_rate_explicit_zero",
"tests/metrics_test.py::EsMetricsTests::test_get_error_rate_implicit_one",
"tests/metrics_test.py::EsMetricsTests::test_get_error_rate_implicit_zero",
"tests/metrics_test.py::EsMetricsTests::test_get_error_rate_mixed",
"tests/metrics_test.py::EsMetricsTests::test_get_median",
"tests/metrics_test.py::EsMetricsTests::test_get_value",
"tests/metrics_test.py::EsMetricsTests::test_put_value_with_explicit_timestamps",
"tests/metrics_test.py::EsMetricsTests::test_put_value_with_meta_info",
"tests/metrics_test.py::EsMetricsTests::test_put_value_without_meta_info",
"tests/metrics_test.py::EsRaceStoreTests::test_store_race",
"tests/metrics_test.py::EsResultsStoreTests::test_store_results",
"tests/metrics_test.py::InMemoryMetricsStoreTests::test_externalize_and_bulk_add",
"tests/metrics_test.py::InMemoryMetricsStoreTests::test_get_error_rate_by_sample_type",
"tests/metrics_test.py::InMemoryMetricsStoreTests::test_get_error_rate_mixed",
"tests/metrics_test.py::InMemoryMetricsStoreTests::test_get_error_rate_zero_without_samples",
"tests/metrics_test.py::InMemoryMetricsStoreTests::test_get_median",
"tests/metrics_test.py::InMemoryMetricsStoreTests::test_get_percentile",
"tests/metrics_test.py::InMemoryMetricsStoreTests::test_get_value",
"tests/metrics_test.py::InMemoryMetricsStoreTests::test_meta_data_per_document",
"tests/metrics_test.py::FileRaceStoreTests::test_store_race",
"tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_directory",
"tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_directory_without_track",
"tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_file",
"tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_file_but_not_json",
"tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_named_pipe",
"tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_non_existing_path",
"tests/track/loader_test.py::GitRepositoryTests::test_track_from_existing_repo",
"tests/track/loader_test.py::TrackPreparationTests::test_decompresses_if_archive_available",
"tests/track/loader_test.py::TrackPreparationTests::test_does_nothing_if_document_file_available",
"tests/track/loader_test.py::TrackPreparationTests::test_download_document_archive_if_no_file_available",
"tests/track/loader_test.py::TrackPreparationTests::test_download_document_file_if_no_file_available",
"tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_decompresses_compressed_docs",
"tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_does_nothing_if_no_document_files",
"tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_error_compressed_docs_wrong_size",
"tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_if_document_file_available",
"tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_uncompressed_docs_wrong_size",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_if_no_url_provided_and_file_missing",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_if_no_url_provided_and_wrong_file_size",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_if_offline",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_no_test_mode_file",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_on_connection_problems",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_error_if_compressed_does_not_contain_expected_document_file",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_error_on_wrong_uncompressed_file_size",
"tests/track/loader_test.py::TemplateRenderTests::test_render_simple_template",
"tests/track/loader_test.py::TemplateRenderTests::test_render_template_with_external_variables",
"tests/track/loader_test.py::TemplateRenderTests::test_render_template_with_globbing",
"tests/track/loader_test.py::TemplateRenderTests::test_render_template_with_variables",
"tests/track/loader_test.py::TrackPostProcessingTests::test_post_processes_track_spec",
"tests/track/loader_test.py::TrackPathTests::test_sets_absolute_path",
"tests/track/loader_test.py::TrackFilterTests::test_create_filters_from_empty_included_tasks",
"tests/track/loader_test.py::TrackFilterTests::test_create_filters_from_mixed_included_tasks",
"tests/track/loader_test.py::TrackFilterTests::test_filters_tasks",
"tests/track/loader_test.py::TrackFilterTests::test_rejects_invalid_syntax",
"tests/track/loader_test.py::TrackFilterTests::test_rejects_unknown_filter_type",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_at_least_one_default_challenge",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_can_read_track_info",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_description_is_optional",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_document_count_mandatory_if_file_present",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_exactly_one_default_challenge",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_inline_operations",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_not_more_than_one_default_challenge_possible",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_completed_by_set",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_completed_by_set_multiple_tasks_match",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_completed_by_set_no_task_matches",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_default_clients_does_not_propagate",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_default_values",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_challenge_and_challenges_are_defined",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_duplicate_explicit_task_names",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_duplicate_implicit_task_names",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_missing_challenge_or_challenges",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_unique_task_names",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_valid_track_specification",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_valid_track_specification_with_index_template",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_with_mixed_warmup_iterations_and_measurement",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_with_mixed_warmup_time_period_and_iterations",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_selects_sole_challenge_implicitly_as_default",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_supports_target_interval",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_supports_target_throughput",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_unique_challenge_names"
]
| []
| Apache License 2.0 | 2,370 | [
"docs/command_line_reference.rst",
"docs/metrics.rst",
"esrally/metrics.py",
"setup.py",
"esrally/client.py",
"esrally/track/track.py",
"esrally/resources/track-schema.json",
"docs/track.rst",
"run.sh",
"esrally/reporter.py",
"esrally/mechanic/telemetry.py",
"docs/summary_report.rst",
"esrally/track/loader.py"
]
| [
"docs/command_line_reference.rst",
"docs/metrics.rst",
"esrally/metrics.py",
"setup.py",
"esrally/client.py",
"esrally/track/track.py",
"esrally/resources/track-schema.json",
"docs/track.rst",
"run.sh",
"esrally/reporter.py",
"esrally/mechanic/telemetry.py",
"docs/summary_report.rst",
"esrally/track/loader.py"
]
|
|
Azure__WALinuxAgent-1105 | fb7d6c51dac236538a8c9eb8e752159d5e3f54b8 | 2018-04-06 15:03:47 | 6e9b985c1d7d564253a1c344bab01b45093103cd | diff --git a/azurelinuxagent/pa/provision/cloudinit.py b/azurelinuxagent/pa/provision/cloudinit.py
index 60d42fd2..de07ea93 100644
--- a/azurelinuxagent/pa/provision/cloudinit.py
+++ b/azurelinuxagent/pa/provision/cloudinit.py
@@ -64,7 +64,7 @@ class CloudInitProvisionHandler(ProvisionHandler):
logger.info("Finished provisioning")
self.report_ready(thumbprint)
- self.report_event("Provisioning with cloud-init succeeded",
+ self.report_event("Provisioning with cloud-init succeeded ({0})".format(self._get_uptime_seconds()),
is_success=True,
duration=elapsed_milliseconds(utc_start))
diff --git a/azurelinuxagent/pa/provision/default.py b/azurelinuxagent/pa/provision/default.py
index 5d07fdf4..5df572cb 100644
--- a/azurelinuxagent/pa/provision/default.py
+++ b/azurelinuxagent/pa/provision/default.py
@@ -88,7 +88,7 @@ class ProvisionHandler(object):
self.write_provisioned()
- self.report_event("Provisioning succeeded",
+ self.report_event("Provisioning succeeded ({0})".format(self._get_uptime_seconds()),
is_success=True,
duration=elapsed_milliseconds(utc_start))
@@ -125,6 +125,15 @@ class ProvisionHandler(object):
continue
return is_running == is_expected
+ @staticmethod
+ def _get_uptime_seconds():
+ try:
+ with open('/proc/uptime') as fh:
+ uptime, _ = fh.readline().split()
+ return uptime
+ except:
+ return 0
+
def reg_ssh_host_key(self):
keypair_type = conf.get_ssh_host_keypair_type()
if conf.get_regenerate_ssh_host_key():
| Track Boot Time in Provision Event
To better understand and break down the provision process please include the boot time in the provision event, or emit a boot event with an appropriate duration. | Azure/WALinuxAgent | diff --git a/tests/pa/test_provision.py b/tests/pa/test_provision.py
index 1004547b..52098f2f 100644
--- a/tests/pa/test_provision.py
+++ b/tests/pa/test_provision.py
@@ -146,8 +146,12 @@ class TestProvision(AgentTestCase):
ph.run()
- call1 = call("Provisioning succeeded", duration=ANY, is_success=True)
- ph.report_event.assert_has_calls([call1])
+ self.assertEqual(1, ph.report_event.call_count)
+ positional_args, kw_args = ph.report_event.call_args
+ # [call('Provisioning succeeded (146473.68)', duration=65, is_success=True)]
+ self.assertTrue(re.match(r'Provisioning succeeded \(\d+\.\d+\)', positional_args[0]) is not None)
+ self.assertTrue(isinstance(kw_args['duration'], int))
+ self.assertTrue(kw_args['is_success'])
@distros()
@patch(
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 2.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pyasn1"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///croot/attrs_1668696182826/work
certifi @ file:///croot/certifi_1671487769961/work/certifi
distro==1.9.0
flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1671697413597/work
pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyasn1==0.5.1
pytest==7.1.2
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions @ file:///croot/typing_extensions_1669924550328/work
-e git+https://github.com/Azure/WALinuxAgent.git@fb7d6c51dac236538a8c9eb8e752159d5e3f54b8#egg=WALinuxAgent
zipp @ file:///croot/zipp_1672387121353/work
| name: WALinuxAgent
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=22.1.0=py37h06a4308_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- flit-core=3.6.0=pyhd3eb1b0_0
- importlib-metadata=4.11.3=py37h06a4308_0
- importlib_metadata=4.11.3=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=22.0=py37h06a4308_0
- pip=22.3.1=py37h06a4308_0
- pluggy=1.0.0=py37h06a4308_1
- py=1.11.0=pyhd3eb1b0_0
- pytest=7.1.2=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py37h06a4308_0
- typing_extensions=4.4.0=py37h06a4308_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zipp=3.11.0=py37h06a4308_0
- zlib=1.2.13=h5eee18b_1
- pip:
- distro==1.9.0
- pyasn1==0.5.1
prefix: /opt/conda/envs/WALinuxAgent
| [
"tests/pa/test_provision.py::TestProvision::test_provision_telemetry_success"
]
| []
| [
"tests/pa/test_provision.py::TestProvision::test_customdata",
"tests/pa/test_provision.py::TestProvision::test_is_provisioned_is_provisioned",
"tests/pa/test_provision.py::TestProvision::test_is_provisioned_not_deprovisioned",
"tests/pa/test_provision.py::TestProvision::test_is_provisioned_not_provisioned",
"tests/pa/test_provision.py::TestProvision::test_provision",
"tests/pa/test_provision.py::TestProvision::test_provision_telemetry_fail",
"tests/pa/test_provision.py::TestProvision::test_provisioning_is_skipped_when_not_enabled"
]
| []
| Apache License 2.0 | 2,371 | [
"azurelinuxagent/pa/provision/default.py",
"azurelinuxagent/pa/provision/cloudinit.py"
]
| [
"azurelinuxagent/pa/provision/default.py",
"azurelinuxagent/pa/provision/cloudinit.py"
]
|
|
NeurodataWithoutBorders__pynwb-439 | 0db2fc1528a7eb0204911fba88f54f07cd70bc94 | 2018-04-06 15:39:09 | f749097718cf344f4c95de7771a1ef523f26762f | diff --git a/src/pynwb/form/spec/namespace.py b/src/pynwb/form/spec/namespace.py
index b152adef..bf7388ad 100644
--- a/src/pynwb/form/spec/namespace.py
+++ b/src/pynwb/form/spec/namespace.py
@@ -337,6 +337,40 @@ class NamespaceCatalog(object):
for subspec_dict in it:
self.__resolve_includes(subspec_dict, catalog)
+ def __load_namespace(self, namespace, reader, types_key, resolve=True):
+ ns_name = namespace['name']
+ if ns_name in self.__namespaces:
+ raise KeyError("namespace '%s' already exists" % ns_name)
+ catalog = SpecCatalog()
+ included_types = dict()
+ for s in namespace['schema']:
+ if 'source' in s:
+ # read specs from file
+ dtypes = None
+ if types_key in s:
+ dtypes = set(s[types_key])
+ self.__load_spec_file(reader, s['source'], catalog, dtypes=dtypes, resolve=resolve)
+ self.__included_sources.setdefault(ns_name, list()).append(s['source'])
+ elif 'namespace' in s:
+ # load specs from namespace
+ try:
+ inc_ns = self.get_namespace(s['namespace'])
+ except KeyError:
+ raise ValueError("Could not load namespace '%s'" % s['namespace'])
+ if types_key in s:
+ types = s[types_key]
+ else:
+ types = inc_ns.get_registered_types()
+ for ndt in types:
+ spec = inc_ns.get_spec(ndt)
+ spec_file = inc_ns.catalog.get_spec_source_file(ndt)
+ catalog.register_spec(spec, spec_file)
+ included_types[s['namespace']] = tuple(types)
+ # construct namespace
+ self.add_namespace(ns_name,
+ self.__spec_namespace_cls.build_namespace(catalog=catalog, **namespace))
+ return included_types
+
@docval({'name': 'namespace_path', 'type': str, 'doc': 'the path to the file containing the namespaces(s) to load'},
{'name': 'resolve',
'type': bool,
@@ -361,35 +395,11 @@ class NamespaceCatalog(object):
return ret
namespaces = reader.read_namespace(namespace_path)
types_key = self.__spec_namespace_cls.types_key()
+ for ns in namespaces:
+ if ns['name'] in self.__namespaces:
+ raise KeyError("namespace '%s' already exists" % ns['name'])
# now load specs into namespace
for ns in namespaces:
- catalog = SpecCatalog()
- included_types = dict()
- for s in ns['schema']:
- if 'source' in s:
- # read specs from file
- dtypes = None
- if types_key in s:
- dtypes = set(s[types_key])
- self.__load_spec_file(reader, s['source'], catalog, dtypes=dtypes, resolve=resolve)
- self.__included_sources.setdefault(ns['name'], list()).append(s['source'])
- elif 'namespace' in s:
- # load specs from namespace
- try:
- inc_ns = self.get_namespace(s['namespace'])
- except KeyError:
- raise ValueError("Could not load namespace '%s'" % s['namespace'])
- if types_key in s:
- types = s[types_key]
- else:
- types = inc_ns.get_registered_types()
- for ndt in types:
- spec = inc_ns.get_spec(ndt)
- spec_file = inc_ns.catalog.get_spec_source_file(ndt)
- catalog.register_spec(spec, spec_file)
- included_types[s['namespace']] = tuple(types)
- ret[ns['name']] = included_types
- # construct namespace
- self.add_namespace(ns['name'], self.__spec_namespace_cls.build_namespace(catalog=catalog, **ns))
+ ret[ns['name']] = self.__load_namespace(ns, reader, types_key, resolve=resolve)
self.__included_specs[namespace_path] = ret
return ret
diff --git a/src/pynwb/form/spec/write.py b/src/pynwb/form/spec/write.py
index 850a697b..d477c282 100644
--- a/src/pynwb/form/spec/write.py
+++ b/src/pynwb/form/spec/write.py
@@ -8,6 +8,7 @@ from abc import ABCMeta, abstractmethod
from .namespace import SpecNamespace
from .spec import GroupSpec, DatasetSpec
+from .catalog import SpecCatalog
from ..utils import docval, getargs, popargs
@@ -59,6 +60,7 @@ class NamespaceBuilder(object):
self.__ns_args = copy.deepcopy(kwargs)
self.__namespaces = OrderedDict()
self.__sources = OrderedDict()
+ self.__catalog = SpecCatalog()
self.__dt_key = ns_cls.types_key()
@docval({'name': 'source', 'type': str, 'doc': 'the path to write the spec to'},
@@ -66,6 +68,7 @@ class NamespaceBuilder(object):
def add_spec(self, **kwargs):
''' Add a Spec to the namespace '''
source, spec = getargs('source', 'spec', kwargs)
+ self.__catalog.auto_register(spec, source)
self.add_source(source)
self.__sources[source].setdefault(self.__dt_key, list()).append(spec)
| NWBNamespaceBuilder does not validate spec it is building/writing.
NWBNamespaceBuilder allows construction of specs that can't be loaded due to redefining types already defined in the spec. The problem isn't discovered until load time.
Example:
from pynwb.spec import NWBGroupSpec, NWBNamespaceBuilder
spec1 = NWBGroupSpec("This is my new group 1",
"Group1",
neurodata_type_inc="NWBDataInterface",
neurodata_type_def="Group1")
spec2 = NWBGroupSpec("This is my new group 2",
"Group2",
groups=[spec1], # oops, this will look like it writes a valid spec, but fails on read
neurodata_type_inc="NWBDataInterface",
neurodata_type_def="Group2")
ext_source = "myext.yaml"
ns_builder = NWBNamespaceBuilder("Example namespace", "example")
ns_builder.add_spec(ext_source, spec1)
ns_builder.add_spec(ext_source, spec2)
ns_path = "example.namespace.yaml"
ns_builder.export(ns_path)
Fails on load:
from pynwb import load_namespaces
namespace_path = 'example.namespace.yaml'
load_namespaces(namespace_path)
Tested in python 3.6 in ubuntu using latest dev commit.
- [X] Have you ensured the feature or change was not already [reported](https://github.com/NeurodataWithoutBorders/pynwb/issues/) ?
- [X] Have you included a brief and descriptive title?
- [X] Have you included a clear description of the problem you are trying to solve?
- [X] Have you included a minimal code snippet that reproduces the issue you are encountering?
| NeurodataWithoutBorders/pynwb | diff --git a/tests/unit/pynwb_tests/test_extension.py b/tests/unit/pynwb_tests/test_extension.py
index 0e6e72ee..b425db55 100644
--- a/tests/unit/pynwb_tests/test_extension.py
+++ b/tests/unit/pynwb_tests/test_extension.py
@@ -72,3 +72,28 @@ class TestCatchDupNS(unittest.TestCase):
load_namespaces(os.path.join(self.tempdir, self.ns_path1))
with self.assertRaises(KeyError):
load_namespaces(os.path.join(self.tempdir, self.ns_path2))
+
+
+class TestCatchDuplicateSpec(unittest.TestCase):
+
+ def setUp(self):
+ self.ext_source = 'fake_extension3.yaml'
+
+ def tearDown(self):
+ pass
+
+ def test_catch_duplicate_spec(self):
+ spec1 = NWBGroupSpec("This is my new group 1",
+ "Group1",
+ neurodata_type_inc="NWBDataInterface",
+ neurodata_type_def="Group1")
+ spec2 = NWBGroupSpec("This is my new group 2",
+ "Group2",
+ groups=[spec1],
+ neurodata_type_inc="NWBDataInterface",
+ neurodata_type_def="Group2")
+ ns_builder = NWBNamespaceBuilder("Example namespace",
+ "pynwb_test_ext")
+ ns_builder.add_spec(self.ext_source, spec1)
+ with self.assertRaises(ValueError):
+ ns_builder.add_spec(self.ext_source, spec2)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2018.1.18
chardet==3.0.4
coverage==6.2
execnet==1.9.0
h5py==2.7.1
idna==2.6
importlib-metadata==4.8.3
iniconfig==1.1.1
numpy==1.14.2
packaging==21.3
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/NeurodataWithoutBorders/pynwb.git@0db2fc1528a7eb0204911fba88f54f07cd70bc94#egg=pynwb
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.7.2
requests==2.18.4
ruamel.yaml==0.15.37
six==1.11.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.22
zipp==3.6.0
| name: pynwb
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- certifi==2018.1.18
- chardet==3.0.4
- coverage==6.2
- execnet==1.9.0
- h5py==2.7.1
- idna==2.6
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- numpy==1.14.2
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.7.2
- requests==2.18.4
- ruamel-yaml==0.15.37
- six==1.11.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.22
- zipp==3.6.0
prefix: /opt/conda/envs/pynwb
| [
"tests/unit/pynwb_tests/test_extension.py::TestCatchDuplicateSpec::test_catch_duplicate_spec"
]
| []
| [
"tests/unit/pynwb_tests/test_extension.py::TestExtension::test_export",
"tests/unit/pynwb_tests/test_extension.py::TestExtension::test_get_class",
"tests/unit/pynwb_tests/test_extension.py::TestExtension::test_load_namespace",
"tests/unit/pynwb_tests/test_extension.py::TestCatchDupNS::test_catch_dup_name"
]
| []
| BSD-3-Clause | 2,372 | [
"src/pynwb/form/spec/write.py",
"src/pynwb/form/spec/namespace.py"
]
| [
"src/pynwb/form/spec/write.py",
"src/pynwb/form/spec/namespace.py"
]
|
|
mkdocs__mkdocs-1459 | d4c5832876accc7e59293852d86810585b97674c | 2018-04-06 17:35:43 | 27f06517db4d8b73b162f2a2af65826ddcc8db54 | diff --git a/docs/about/release-notes.md b/docs/about/release-notes.md
index 9ae74fb1..6f0f3518 100644
--- a/docs/about/release-notes.md
+++ b/docs/about/release-notes.md
@@ -66,6 +66,7 @@ authors should review how [search and themes] interact.
### Other Changes and Additions to Development Version
+* Improve Markdown extension error messages. (#782).
* Drop official support for Python 3.3 and set `tornado>=5.0` (#1427).
* Add support for GitLab edit links (#1435).
* Link to GitHub issues from release notes (#644).
diff --git a/mkdocs/config/config_options.py b/mkdocs/config/config_options.py
index 30da2a2a..a67dd05e 100644
--- a/mkdocs/config/config_options.py
+++ b/mkdocs/config/config_options.py
@@ -3,6 +3,7 @@ from __future__ import unicode_literals
from collections import Sequence
import os
from collections import namedtuple
+import markdown
from mkdocs import utils, theme, plugins
from mkdocs.config.base import Config, ValidationError
@@ -629,7 +630,16 @@ class MarkdownExtensions(OptionallyRequired):
extensions.append(item)
else:
raise ValidationError('Invalid Markdown Extensions configuration')
- return utils.reduce_list(self.builtins + extensions)
+
+ extensions = utils.reduce_list(self.builtins + extensions)
+
+ # Confirm that Markdown considers extensions to be valid
+ try:
+ markdown.Markdown(extensions=extensions, extension_configs=self.configdata)
+ except Exception as e:
+ raise ValidationError(e.args[0])
+
+ return extensions
def post_validation(self, config, key_name):
config[self.configkey] = self.configdata
diff --git a/requirements/project-min.txt b/requirements/project-min.txt
index 6b0262e0..a60bec24 100644
--- a/requirements/project-min.txt
+++ b/requirements/project-min.txt
@@ -4,3 +4,4 @@ livereload==2.5.1
Markdown==2.5
PyYAML==3.10
tornado==4.1
+mdx_gh_links>=0.2
| Improve error message for missing Markdown extensions
I just had to install that manually.
| mkdocs/mkdocs | diff --git a/mkdocs/tests/cli_tests.py b/mkdocs/tests/cli_tests.py
index 85ab87de..57f12845 100644
--- a/mkdocs/tests/cli_tests.py
+++ b/mkdocs/tests/cli_tests.py
@@ -186,8 +186,9 @@ class CLITests(unittest.TestCase):
logger = logging.getLogger('mkdocs')
self.assertEqual(logger.level, logging.INFO)
+ @mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
- def test_build_clean(self, mock_build):
+ def test_build_clean(self, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['build', '--clean'], catch_exceptions=False)
@@ -198,8 +199,9 @@ class CLITests(unittest.TestCase):
self.assertTrue('dirty' in kwargs)
self.assertFalse(kwargs['dirty'])
+ @mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
- def test_build_dirty(self, mock_build):
+ def test_build_dirty(self, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['build', '--dirty'], catch_exceptions=False)
@@ -296,8 +298,9 @@ class CLITests(unittest.TestCase):
site_dir='custom'
)
+ @mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
- def test_build_verbose(self, mock_build):
+ def test_build_verbose(self, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['build', '--verbose'], catch_exceptions=False)
@@ -307,8 +310,9 @@ class CLITests(unittest.TestCase):
logger = logging.getLogger('mkdocs')
self.assertEqual(logger.level, logging.DEBUG)
+ @mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
- def test_build_quiet(self, mock_build):
+ def test_build_quiet(self, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['build', '--quiet'], catch_exceptions=False)
@@ -352,9 +356,10 @@ class CLITests(unittest.TestCase):
remote_name=None
)
+ @mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
@mock.patch('mkdocs.commands.gh_deploy.gh_deploy', autospec=True)
- def test_gh_deploy_clean(self, mock_gh_deploy, mock_build):
+ def test_gh_deploy_clean(self, mock_gh_deploy, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['gh-deploy', '--clean'], catch_exceptions=False)
@@ -366,9 +371,10 @@ class CLITests(unittest.TestCase):
self.assertTrue('dirty' in kwargs)
self.assertFalse(kwargs['dirty'])
+ @mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
@mock.patch('mkdocs.commands.gh_deploy.gh_deploy', autospec=True)
- def test_gh_deploy_dirty(self, mock_gh_deploy, mock_build):
+ def test_gh_deploy_dirty(self, mock_gh_deploy, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['gh-deploy', '--dirty'], catch_exceptions=False)
diff --git a/mkdocs/tests/config/config_options_tests.py b/mkdocs/tests/config/config_options_tests.py
index 56b7a724..1d573ba2 100644
--- a/mkdocs/tests/config/config_options_tests.py
+++ b/mkdocs/tests/config/config_options_tests.py
@@ -2,6 +2,7 @@ from __future__ import unicode_literals
import os
import unittest
+from mock import patch
import mkdocs
from mkdocs import utils
@@ -491,7 +492,8 @@ class PrivateTest(unittest.TestCase):
class MarkdownExtensionsTest(unittest.TestCase):
- def test_simple_list(self):
+ @patch('markdown.Markdown')
+ def test_simple_list(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': ['foo', 'bar']
@@ -503,7 +505,8 @@ class MarkdownExtensionsTest(unittest.TestCase):
'mdx_configs': {}
}, config)
- def test_list_dicts(self):
+ @patch('markdown.Markdown')
+ def test_list_dicts(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': [
@@ -522,7 +525,8 @@ class MarkdownExtensionsTest(unittest.TestCase):
}
}, config)
- def test_mixed_list(self):
+ @patch('markdown.Markdown')
+ def test_mixed_list(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': [
@@ -539,7 +543,8 @@ class MarkdownExtensionsTest(unittest.TestCase):
}
}, config)
- def test_builtins(self):
+ @patch('markdown.Markdown')
+ def test_builtins(self, mockMd):
option = config_options.MarkdownExtensions(builtins=['meta', 'toc'])
config = {
'markdown_extensions': ['foo', 'bar']
@@ -577,7 +582,8 @@ class MarkdownExtensionsTest(unittest.TestCase):
'mdx_configs': {'toc': {'permalink': True}}
}, config)
- def test_configkey(self):
+ @patch('markdown.Markdown')
+ def test_configkey(self, mockMd):
option = config_options.MarkdownExtensions(configkey='bar')
config = {
'markdown_extensions': [
@@ -605,12 +611,14 @@ class MarkdownExtensionsTest(unittest.TestCase):
'mdx_configs': {}
}, config)
- def test_not_list(self):
+ @patch('markdown.Markdown')
+ def test_not_list(self, mockMd):
option = config_options.MarkdownExtensions()
self.assertRaises(config_options.ValidationError,
option.validate, 'not a list')
- def test_invalid_config_option(self):
+ @patch('markdown.Markdown')
+ def test_invalid_config_option(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': [
@@ -622,7 +630,8 @@ class MarkdownExtensionsTest(unittest.TestCase):
option.validate, config['markdown_extensions']
)
- def test_invalid_config_item(self):
+ @patch('markdown.Markdown')
+ def test_invalid_config_item(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': [
@@ -634,7 +643,8 @@ class MarkdownExtensionsTest(unittest.TestCase):
option.validate, config['markdown_extensions']
)
- def test_invalid_dict_item(self):
+ @patch('markdown.Markdown')
+ def test_invalid_dict_item(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': [
@@ -645,3 +655,13 @@ class MarkdownExtensionsTest(unittest.TestCase):
config_options.ValidationError,
option.validate, config['markdown_extensions']
)
+
+ def test_unknown_extension(self):
+ option = config_options.MarkdownExtensions()
+ config = {
+ 'markdown_extensions': ['unknown']
+ }
+ self.assertRaises(
+ config_options.ValidationError,
+ option.validate, config['markdown_extensions']
+ )
diff --git a/mkdocs/tests/gh_deploy_tests.py b/mkdocs/tests/gh_deploy_tests.py
index b08e28df..d12f98cc 100644
--- a/mkdocs/tests/gh_deploy_tests.py
+++ b/mkdocs/tests/gh_deploy_tests.py
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
import unittest
import mock
-from mkdocs.config import load_config
+from mkdocs.tests.base import load_config
from mkdocs.commands import gh_deploy
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 3
} | 0.17 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-mock",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/project.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | click==8.1.8
exceptiongroup==1.2.2
importlib_metadata==8.6.1
iniconfig==2.1.0
Jinja2==3.1.6
livereload==2.7.1
Markdown==3.7
MarkupSafe==3.0.2
mdx-gh-links==0.4
-e git+https://github.com/mkdocs/mkdocs.git@d4c5832876accc7e59293852d86810585b97674c#egg=mkdocs
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-mock==3.14.0
PyYAML==6.0.2
tomli==2.2.1
tornado==6.4.2
zipp==3.21.0
| name: mkdocs
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- click==8.1.8
- exceptiongroup==1.2.2
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jinja2==3.1.6
- livereload==2.7.1
- markdown==3.7
- markupsafe==3.0.2
- mdx-gh-links==0.4
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-mock==3.14.0
- pyyaml==6.0.2
- tomli==2.2.1
- tornado==6.4.2
- zipp==3.21.0
prefix: /opt/conda/envs/mkdocs
| [
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_unknown_extension"
]
| []
| [
"mkdocs/tests/cli_tests.py::CLITests::test_build_clean",
"mkdocs/tests/cli_tests.py::CLITests::test_build_config_file",
"mkdocs/tests/cli_tests.py::CLITests::test_build_defaults",
"mkdocs/tests/cli_tests.py::CLITests::test_build_dirty",
"mkdocs/tests/cli_tests.py::CLITests::test_build_quiet",
"mkdocs/tests/cli_tests.py::CLITests::test_build_site_dir",
"mkdocs/tests/cli_tests.py::CLITests::test_build_strict",
"mkdocs/tests/cli_tests.py::CLITests::test_build_theme",
"mkdocs/tests/cli_tests.py::CLITests::test_build_theme_dir",
"mkdocs/tests/cli_tests.py::CLITests::test_build_verbose",
"mkdocs/tests/cli_tests.py::CLITests::test_gh_deploy_clean",
"mkdocs/tests/cli_tests.py::CLITests::test_gh_deploy_config_file",
"mkdocs/tests/cli_tests.py::CLITests::test_gh_deploy_defaults",
"mkdocs/tests/cli_tests.py::CLITests::test_gh_deploy_dirty",
"mkdocs/tests/cli_tests.py::CLITests::test_gh_deploy_force",
"mkdocs/tests/cli_tests.py::CLITests::test_gh_deploy_message",
"mkdocs/tests/cli_tests.py::CLITests::test_gh_deploy_remote_branch",
"mkdocs/tests/cli_tests.py::CLITests::test_gh_deploy_remote_name",
"mkdocs/tests/cli_tests.py::CLITests::test_new",
"mkdocs/tests/cli_tests.py::CLITests::test_serve_config_file",
"mkdocs/tests/cli_tests.py::CLITests::test_serve_default",
"mkdocs/tests/cli_tests.py::CLITests::test_serve_dev_addr",
"mkdocs/tests/cli_tests.py::CLITests::test_serve_dirtyreload",
"mkdocs/tests/cli_tests.py::CLITests::test_serve_livereload",
"mkdocs/tests/cli_tests.py::CLITests::test_serve_no_livereload",
"mkdocs/tests/cli_tests.py::CLITests::test_serve_strict",
"mkdocs/tests/cli_tests.py::CLITests::test_serve_theme",
"mkdocs/tests/cli_tests.py::CLITests::test_serve_theme_dir",
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_default",
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_empty",
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_replace_default",
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_required",
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_required_no_default",
"mkdocs/tests/config/config_options_tests.py::TypeTest::test_length",
"mkdocs/tests/config/config_options_tests.py::TypeTest::test_multiple_types",
"mkdocs/tests/config/config_options_tests.py::TypeTest::test_single_type",
"mkdocs/tests/config/config_options_tests.py::IpAddressTest::test_default_address",
"mkdocs/tests/config/config_options_tests.py::IpAddressTest::test_invalid_address_format",
"mkdocs/tests/config/config_options_tests.py::IpAddressTest::test_invalid_address_missing_port",
"mkdocs/tests/config/config_options_tests.py::IpAddressTest::test_invalid_address_port",
"mkdocs/tests/config/config_options_tests.py::IpAddressTest::test_invalid_address_type",
"mkdocs/tests/config/config_options_tests.py::IpAddressTest::test_named_address",
"mkdocs/tests/config/config_options_tests.py::IpAddressTest::test_valid_IPv6_address",
"mkdocs/tests/config/config_options_tests.py::IpAddressTest::test_valid_address",
"mkdocs/tests/config/config_options_tests.py::URLTest::test_invalid",
"mkdocs/tests/config/config_options_tests.py::URLTest::test_invalid_url",
"mkdocs/tests/config/config_options_tests.py::URLTest::test_valid_url",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_edit_uri_bitbucket",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_edit_uri_custom",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_edit_uri_github",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_edit_uri_gitlab",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_bitbucket",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_custom",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_custom_and_empty_edit_uri",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_github",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_gitlab",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_doc_dir_is_config_dir",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_file",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_incorrect_type_attribute_error",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_incorrect_type_type_error",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_missing_dir",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_missing_dir_but_required",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_valid_dir",
"mkdocs/tests/config/config_options_tests.py::SiteDirTest::test_common_prefix",
"mkdocs/tests/config/config_options_tests.py::SiteDirTest::test_doc_dir_in_site_dir",
"mkdocs/tests/config/config_options_tests.py::SiteDirTest::test_site_dir_in_docs_dir",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme_as_complex_config",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme_as_simple_config",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme_as_string",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme_config_missing_name",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme_default",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme_invalid_type",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme_name_is_none",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_uninstalled_theme_as_config",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_uninstalled_theme_as_string",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_invalid_config",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_invalid_type",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_old_format",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_provided_dict",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_provided_empty",
"mkdocs/tests/config/config_options_tests.py::PrivateTest::test_defined",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_builtins",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_builtins_config",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_configkey",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_duplicates",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_invalid_config_item",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_invalid_config_option",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_invalid_dict_item",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_list_dicts",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_mixed_list",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_none",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_not_list",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_simple_list",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_deploy",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_deploy_error",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_deploy_hostname",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_deploy_no_cname",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_get_current_sha",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_get_remote_url_enterprise",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_get_remote_url_http",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_get_remote_url_ssh",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_is_cwd_git_repo",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_is_cwd_not_git_repo"
]
| []
| BSD 2-Clause "Simplified" License | 2,373 | [
"docs/about/release-notes.md",
"mkdocs/config/config_options.py",
"requirements/project-min.txt"
]
| [
"docs/about/release-notes.md",
"mkdocs/config/config_options.py",
"requirements/project-min.txt"
]
|
|
PlasmaPy__PlasmaPy-346 | 3dad2fc171f3dd72cac4df18dc0581accc8e69d5 | 2018-04-06 19:18:46 | 3dad2fc171f3dd72cac4df18dc0581accc8e69d5 | diff --git a/docs/atomic/particle_class.rst b/docs/atomic/particle_class.rst
index 4de86de4..5f4267c0 100644
--- a/docs/atomic/particle_class.rst
+++ b/docs/atomic/particle_class.rst
@@ -4,7 +4,7 @@ Particle Class
**************
The `~plasmapy.atomic.Particle` class provides an object-oriented
-interface to particle information.
+interface for particle information.
Creating a Particle Instance
============================
@@ -30,7 +30,7 @@ but element names and many aliases are not.
>>> hydride = Particle('H-')
An `int` may be used as the first positional argument to
-`plasmapy.atomic.Particle` to represent an atomic number. For isotopes
+`~plasmapy.atomic.Particle` to represent an atomic number. For isotopes
and ions, the mass number may be represented with the `mass_numb`
keyword and the integer charge may be represented with the `Z` keyword.
@@ -48,8 +48,8 @@ directly are: `proton`, `electron`, `neutron`, `positron`, `deuteron`,
Accessing Particle Properties
=============================
-The properties of each particle may be accessed using attributes of the
-`~plasmapy.atomic.Particle` instance.
+The properties of each particle may be accessed using the attributes of
+the `~plasmapy.atomic.Particle` instance.
>>> proton.atomic_number
1
@@ -71,7 +71,7 @@ SI units.
<Quantity 0.49225958 GeV>
Strings representing particles may be accessed using the `particle`,
-`element`, `isotope`, and `ion` attributes.
+`element`, `isotope`, and `ionic_symbol` attributes.
>>> antimuon.particle
'mu-'
@@ -79,7 +79,7 @@ Strings representing particles may be accessed using the `particle`,
'H'
>>> alpha.isotope
'He-4'
->>> deuteron.ion
+>>> deuteron.ionic_symbol
'D 1+'
.. _particle-class-categories
@@ -139,19 +139,22 @@ True
>>> alpha == 'He-4 1+'
False
-The `is_electron` attribute provides a quick way to check whether or not
-a particle is an electron.
+The `is_electron` and `is_ion` attributes provide a quick way to check
+whether or not a particle is an electron or ion, respectively.
>>> electron.is_electron
True
>>> hydride.is_electron
False
+>>> deuteron.is_ion
+True
-The `element`, `isotope`, and `ion` return `None` when the particle is
-not the respective category. Because non-empty strings evaluate to
-`True` and `None` evaluates to `False` when converted to a `bool`, these
-attributes may be used in conditional statements to test whether or not
-a particle is in one of these categories.
+The `element` and `isotope` attributes return `None` when the particle
+does not correspond to an element or isotope. Because non-empty
+strings evaluate to `True` and `None` evaluates to `False` when
+converted to a `bool`, these attributes may be used in conditional
+statements to test whether or not a particle is in one of these
+categories.
.. code-block:: python
@@ -162,5 +165,17 @@ a particle is in one of these categories.
print(f"{particle} corresponds to element {particle.element}")
if particle.isotope:
print(f"{particle} corresponds to isotope {particle.isotope}")
- if particle.ion:
- print(f"{particle} corresponds to ion {particle.ion}")
+
+.. _particle-class-antiparticles
+
+Returning Antiparticles
+=======================
+
+The antiparticle of an elementary particle or antiparticle may be found
+by using the unary operator `~` to invert a `~plasmapy.atomic.Particle`
+instance.
+
+>>> ~electron
+Particle("e+")
+>>> ~antimuon
+Particle("mu-")
diff --git a/plasmapy/atomic/atomic.py b/plasmapy/atomic/atomic.py
index 514efcf5..e9714297 100644
--- a/plasmapy/atomic/atomic.py
+++ b/plasmapy/atomic/atomic.py
@@ -341,7 +341,7 @@ def ion_mass(particle: Particle, *, Z: int = None, mass_numb: int = None) -> u.Q
# TODO: Remove deprecated functionality elsewhere in the code
- if particle.ion or particle.particle in {'e+'}:
+ if particle.is_ion or particle.particle in {'e+'}:
return particle.mass
elif particle.particle == 'n':
raise InvalidIonError
diff --git a/plasmapy/atomic/parsing.py b/plasmapy/atomic/parsing.py
index 0d302216..4e3a5bed 100644
--- a/plasmapy/atomic/parsing.py
+++ b/plasmapy/atomic/parsing.py
@@ -43,7 +43,8 @@ def _create_alias_dicts(Particles: dict) -> (Dict[str, str], Dict[str, str]):
]
case_insensitive_aliases_for_a_symbol = [
- (['antielectron'], 'e+'),
+ (['antielectron', 'anti_electron'], 'e+'),
+ (['antipositron', 'anti_positron'], 'e-'),
(['muon-'], 'mu-'),
(['muon+'], 'mu+'),
(['tau particle'], 'tau-'),
diff --git a/plasmapy/atomic/particle_class.py b/plasmapy/atomic/particle_class.py
index a32c960b..1dbcf607 100644
--- a/plasmapy/atomic/particle_class.py
+++ b/plasmapy/atomic/particle_class.py
@@ -29,7 +29,12 @@
from .elements import _Elements, _PeriodicTable
from .isotopes import _Isotopes
-from .special_particles import (_Particles, ParticleZoo, _special_ion_masses)
+from .special_particles import (
+ _Particles,
+ ParticleZoo,
+ _special_ion_masses,
+ _antiparticles,
+)
_classification_categories = {
'lepton',
@@ -140,7 +145,8 @@ class Particle:
not available.
`~plasmapy.utils.AtomicError`
- Raised for attempts at converting a Particle object to a `bool`.
+ Raised for attempts at converting a
+ `~plasmapy.atomic.Particle` object to a `bool`.
Examples
--------
@@ -154,34 +160,43 @@ class Particle:
>>> positron = Particle('positron')
>>> hydrogen = Particle(1) # atomic number
- The `particle` attribute returns the particle's symbol in
- the standard form.
+ The `particle` attribute returns the particle's symbol in the
+ standard form.
>>> positron.particle
'e+'
- The `element`, `isotope`, and `ion` attributes return the symbols
- for each of these different types of particles.
+ The `atomic_symbol`, `isotope_symbol`, and `ionic_symbol` attributes
+ return the symbols for each of these different types of particles.
>>> proton.element
'H'
>>> alpha.isotope
'He-4'
- >>> deuteron.ion
+ >>> deuteron.ionic_symbol
'D 1+'
+ The `ionic_symbol` attribute works for neutral atoms if charge
+ information is available.
+
+ >>> deuterium = Particle("D", Z=0)
+ >>> deuterium.ionic_symbol
+ 'D 0+'
+
If the particle doesn't belong to one of those categories, then
these attributes return `None`.
>>> positron.element is None
True
- These attributes may therefore be used to test whether or not a
- particle is an element, isotope, or ion.
+ The attributes of a `~plasmapy.atomic.Particle` instance may be used
+ to test whether or not a particle is an element, isotope, or ion.
- >>> True if Particle('e-').element else False
+ >>> True if positron.element else False
False
- >>> True if Particle('alpha').ion else False
+ >>> True if deuterium.isotope else False
+ True
+ >>> True if Particle('alpha').is_ion else False
True
Many of the attributes return physical properties of a particle.
@@ -207,6 +222,17 @@ class Particle:
>>> alpha.neutron_number
2
+ If a `~plasmapy.atomic.Particle` instance represents an elementary
+ particle, then the unary `~` (invert) operator may be used to return
+ the particle's antiparticle.
+
+ >>> ~electron
+ Particle("e+")
+ >>> ~proton
+ Particle("p-")
+ >>> ~positron
+ Particle("e-")
+
The `~plasmapy.atomic.particle_class.Particle.categories` attribute
and `~plasmapy.atomic.particle_class.Particle.is_category` method
may be used to find and test particle membership in categories.
@@ -304,7 +330,7 @@ def __init__(self, argument: Union[str, int], mass_numb: int = None, Z: int = No
categories.add('element')
if isotope:
categories.add('isotope')
- if ion:
+ if self.element and self._attributes['integer charge']:
categories.add('ion')
# Element properties
@@ -415,7 +441,7 @@ def __eq__(self, other) -> bool:
except InvalidParticleError as exc:
raise InvalidParticleError(
f"{other} is not a particle and cannot be "
- f"compared to {self}.")
+ f"compared to {self}.") from exc
if not isinstance(other, self.__class__):
raise TypeError(
@@ -480,9 +506,18 @@ def __bool__(self):
"""
raise AtomicError("The truthiness of a Particle object is not defined.")
+ def __invert__(self):
+ """
+ Return the corresponding antiparticle, or raise an
+ `~plasmapy.utils.AtomicError` if the particle is not an
+ elementary particle.
+ """
+ return self.antiparticle
+
@property
- def particle(self) -> str:
- """Return the particle's symbol.
+ def particle(self) -> Optional[str]:
+ """
+ Return the particle's symbol.
Examples
--------
@@ -493,6 +528,34 @@ def particle(self) -> str:
"""
return self._attributes['particle']
+ @property
+ def antiparticle(self):
+ """
+ Return the corresponding antiparticle, or raise an
+ `~plasmapy.utils.AtomicError` if the particle is not an
+ elementary particle.
+
+ This attribute may be accessed by using the unary operator `~`
+ acting on a `~plasma.atomic.Particle` instance.
+
+ Examples
+ --------
+ >>> electron = Particle('e-')
+ >>> electron.antiparticle
+ Particle("e+")
+
+ >>> antineutron = Particle('antineutron')
+ >>> ~antineutron
+ Particle("n")
+
+ """
+ if self.particle in _antiparticles.keys():
+ return Particle(_antiparticles[self.particle])
+ else:
+ raise AtomicError(
+ "The unary operator can only be used for elementary "
+ "particles and antiparticles.")
+
@property
def element(self) -> Optional[str]:
"""
@@ -524,16 +587,19 @@ def isotope(self) -> Optional[str]:
return self._attributes['isotope']
@property
- def ion(self) -> Optional[str]:
+ def ionic_symbol(self) -> Optional[str]:
"""
- Return the ion symbol if the particle corresponds to an ion,
- and `None` otherwise.
+ Return the ionic symbol if the particle corresponds to an ion or
+ neutral atom, and `None` otherwise.
Examples
--------
>>> deuteron = Particle('deuteron')
- >>> deuteron.ion
+ >>> deuteron.ionic_symbol
'D 1+'
+ >>> hydrogen_atom = Particle('H', Z=0)
+ >>> hydrogen_atom.ionic_symbol
+ 'H 0+'
"""
return self._attributes['ion']
@@ -616,7 +682,7 @@ def standard_atomic_weight(self) -> u.Quantity:
<Quantity 2.65669641e-26 kg>
"""
- if self.isotope or self.ion or not self.element:
+ if self.isotope or self.is_ion or not self.element:
raise InvalidElementError(_category_errmsg(self, 'element'))
if self._attributes['standard atomic weight'] is None: # coveralls: ignore
raise MissingAtomicDataError(
@@ -642,14 +708,14 @@ def nuclide_mass(self) -> u.Quantity:
"""
- if self.particle in ['H-1', 'p+']:
+ if self.isotope == 'H-1':
return const.m_p
- elif self.particle == 'n':
- return const.m_n
- elif self.particle in ['D', 'D 1+']:
+ elif self.isotope == 'D':
return _special_ion_masses['D 1+']
- elif self.particle in ['T', 'T 1+']:
+ elif self.isotope == 'T':
return _special_ion_masses['T 1+']
+ elif self.particle == 'n':
+ return const.m_n
if not self.isotope:
raise InvalidIsotopeError(_category_errmsg(self, 'isotope'))
@@ -703,7 +769,7 @@ def mass(self) -> u.Quantity:
if self._attributes['mass'] is not None:
return self._attributes['mass'].to(u.kg)
- if self.ion:
+ if self.is_ion:
if self.isotope:
base_mass = self._attributes['isotope mass']
@@ -711,7 +777,9 @@ def mass(self) -> u.Quantity:
base_mass = self._attributes['standard atomic weight']
if base_mass is None:
- raise MissingAtomicDataError(f"The mass of ion '{self.ion}' is not available.")
+ raise MissingAtomicDataError(
+ f"The mass of ion '{self.ionic_symbol}' is not available."
+ )
mass = base_mass - self.integer_charge * const.m_e
@@ -821,7 +889,7 @@ def electron_number(self) -> int:
"""
if self.particle == 'e-':
return 1
- elif self.ion:
+ elif self.ionic_symbol:
return self.atomic_number - self.integer_charge
else: # coveralls: ignore
raise InvalidIonError(_category_errmsg(self, 'ion'))
@@ -845,7 +913,7 @@ def isotopic_abundance(self) -> u.Quantity:
"""
from .atomic import common_isotopes
- if not self.isotope or self.ion: # coveralls: ignore
+ if not self.isotope or self.is_ion: # coveralls: ignore
raise InvalidIsotopeError(_category_errmsg(self.particle, 'isotope'))
abundance = self._attributes.get('isotopic abundance', 0.0)
@@ -998,7 +1066,7 @@ def spin(self) -> Union[int, float]:
return self._attributes['spin']
@property
- def periodic_table(self):
+ def periodic_table(self) -> collections.namedtuple:
"""
Return a `~collections.namedtuple` to access category, period,
group, and block information about an element.
@@ -1142,3 +1210,21 @@ def is_electron(self) -> bool:
"""
return self == "e-"
+
+ @property
+ def is_ion(self) -> bool:
+ """
+ Return `True` if the particle is an ion, and `False`
+ otherwise.
+
+ Examples
+ --------
+ >>> Particle('D+').is_ion
+ True
+ >>> Particle('H-1 0+').is_ion
+ False
+ >>> Particle('e+').is_ion
+ False
+
+ """
+ return self.is_category('ion')
diff --git a/plasmapy/atomic/particle_input.py b/plasmapy/atomic/particle_input.py
index a4126426..7e6a3b41 100644
--- a/plasmapy/atomic/particle_input.py
+++ b/plasmapy/atomic/particle_input.py
@@ -301,7 +301,7 @@ def wrapper(*args, **kwargs):
cat_table = [
('element', particle.element, InvalidElementError),
('isotope', particle.isotope, InvalidIsotopeError),
- ('ion', particle.ion, InvalidIonError),
+ ('ion', particle.ionic_symbol, InvalidIonError),
]
for category_name, category_symbol, CategoryError in cat_table:
diff --git a/plasmapy/atomic/special_particles.py b/plasmapy/atomic/special_particles.py
index 785303b9..880d6529 100644
--- a/plasmapy/atomic/special_particles.py
+++ b/plasmapy/atomic/special_particles.py
@@ -287,6 +287,24 @@ def _create_Particles_dict() -> Dict[str, dict]:
'T 1+': 5.007356665e-27 * u.kg,
}
+_antiparticles = {
+ 'p+': 'p-',
+ 'n': 'antineutron',
+ 'e-': 'e+',
+ 'mu-': 'mu+',
+ 'tau-': 'tau+',
+ 'nu_e': 'anti_nu_e',
+ 'nu_mu': 'anti_nu_mu',
+ 'nu_tau': 'anti_nu_tau',
+ 'p-': 'p+',
+ 'antineutron': 'n',
+ 'e+': 'e-',
+ 'mu+': 'mu-',
+ 'tau+': 'tau-',
+ 'anti_nu_e': 'nu_e',
+ 'anti_nu_mu': 'nu_mu',
+ 'anti_nu_tau': 'nu_tau',
+}
if __name__ == "__main__": # coveralls: ignore
from pprint import pprint
diff --git a/plasmapy/atomic/symbols.py b/plasmapy/atomic/symbols.py
index 6963a4a0..fc50af05 100644
--- a/plasmapy/atomic/symbols.py
+++ b/plasmapy/atomic/symbols.py
@@ -204,7 +204,9 @@ def ionic_symbol(particle: Particle, mass_numb: int = None, Z: int = None) -> st
'H-1 0+'
"""
- return particle.ion
+
+ return particle.ionic_symbol
+
@particle_input
| Separate how Particle class treats ions and neutral atoms
Right now the `Particle` class has an attribute `Particle.ion` which returns the ionic symbol if the particle is either an ion or a neutral charged particle (see also #338 which changes `ion_symbol` to `ionic_symbol`). This can lead to confusion since if we treat `Particle.ion` as a `bool` to check whether or not something is an ion, then it will return `True` for neutral atoms where the charge information is given.
One possibility would be to create an `is_ion` attribute for `Particle` that returns a `bool` of whether or not the particle is a charged ion. In that case, we could rename `Particle.ion` as `Particle.ionic_symbol` (and `Particle.isotope` as `Particle.isotope_symbol`, etc.) and also create a `Particle.is_isotope` attribute. We should keep in mind that plasma physicists sometimes worry about electron-positron pair plasmas as we are doing this.
The `categories` and `is_category` attributes of `Particle` need to updated to reflect that `'H 0+'`, for example, is not classified as an `'ion'`. This would also affect how the `@particle_input` decorator treats things (e.g., the `any_of`, `require`, and `exclude` keyword arguments).
I'm also wondering if the `atomic` subpackage should keep separating elements (e.g., `'H'`, `'He'`, etc.) from neutrally charged atoms (e.g., `'H 0+'`, `'He 0+'`, etc.). I'm still inclined to keep them separate, since explicitly putting in the charge is better than implicitly assuming it, but this is worth thinking more about too. | PlasmaPy/PlasmaPy | diff --git a/plasmapy/atomic/tests/test_particle_class.py b/plasmapy/atomic/tests/test_particle_class.py
index 26a4dde5..f3f4a524 100644
--- a/plasmapy/atomic/tests/test_particle_class.py
+++ b/plasmapy/atomic/tests/test_particle_class.py
@@ -21,6 +21,7 @@
from ..atomic import known_isotopes
from ..isotopes import _Isotopes
from ..particle_class import Particle
+from ..special_particles import ParticleZoo
# (arg, kwargs, results_dict
test_Particle_table = [
@@ -29,7 +30,9 @@
{'particle': 'n',
'element': None,
'isotope': None,
- 'ion': None,
+ 'ionic_symbol': None,
+ 'is_ion': False,
+ 'is_electron': False,
'integer_charge': 0,
'atomic_number': InvalidElementError,
'mass_number': InvalidIsotopeError,
@@ -46,7 +49,8 @@
'element': 'H',
'element_name': 'hydrogen',
'isotope': 'H-1',
- 'ion': 'p+',
+ 'ionic_symbol': 'p+',
+ 'is_ion': True,
'mass': m_p,
'nuclide_mass': m_p,
'integer_charge': 1,
@@ -78,7 +82,8 @@
'element': None,
'element_name': InvalidElementError,
'isotope': None,
- 'ion': None,
+ 'ionic_symbol': None,
+ 'is_ion': False,
'mass': m_p,
'integer_charge': -1,
'spin': 1 / 2,
@@ -97,7 +102,8 @@
'element': None,
'element_name': InvalidElementError,
'isotope': None,
- 'ion': None,
+ 'ionic_symbol': None,
+ 'is_ion': False,
'mass': m_e,
'integer_charge': -1,
'spin': 1 / 2,
@@ -118,7 +124,8 @@
{'particle': 'e+',
'element': None,
'isotope': None,
- 'ion': None,
+ 'ionic_symbol': None,
+ 'is_ion': False,
'mass': m_e,
'nuclide_mass': InvalidIsotopeError,
'integer_charge': 1,
@@ -130,6 +137,7 @@
'is_category(require="positron")': True,
'is_category(any_of={"positron"})': True,
'is_category(exclude="positron")': False,
+ 'is_category("ion")': False,
'__str__()': 'e+',
'__repr__()': 'Particle("e+")',
'periodic_table.group': InvalidElementError,
@@ -142,7 +150,8 @@
{'particle': 'H',
'element': 'H',
'isotope': None,
- 'ion': None,
+ 'ionic_symbol': None,
+ 'is_ion': False,
'charge': ChargeError,
'integer_charge': ChargeError,
'mass_number': InvalidIsotopeError,
@@ -157,12 +166,33 @@
'is_category("proton")': False,
}),
+ ('H-1 0+', {}, {
+ 'particle': 'H-1 0+',
+ 'element': 'H',
+ 'isotope': 'H-1',
+ 'ionic_symbol': 'H-1 0+',
+ 'is_ion': False,
+ 'charge': 0 * u.C,
+ 'integer_charge': 0,
+ 'mass_number': 1,
+ 'baryon_number': 1,
+ 'lepton_number': 0,
+ 'half_life': np.inf * u.s,
+ 'nuclide_mass': m_p,
+ 'is_category("charged")': False,
+ 'is_category("uncharged")': True,
+ 'is_category("ion")': False,
+ 'is_category("nonmetal")': True,
+ 'is_category("proton")': False,
+ }),
+
('D+', {},
{'particle': 'D 1+',
'element': 'H',
'element_name': 'hydrogen',
'isotope': 'D',
- 'ion': 'D 1+',
+ 'ionic_symbol': 'D 1+',
+ 'is_ion': True,
'integer_charge': 1,
'atomic_number': 1,
'mass_number': 2,
@@ -180,7 +210,8 @@
{'particle': 'T 1+',
'element': 'H',
'isotope': 'T',
- 'ion': 'T 1+',
+ 'ionic_symbol': 'T 1+',
+ 'is_ion': True,
'integer_charge': 1,
'atomic_number': 1,
'mass_number': 3,
@@ -197,7 +228,9 @@
'element': 'Fe',
'element_name': 'iron',
'isotope': 'Fe-56',
- 'ion': 'Fe-56 17+',
+ 'ionic_symbol': 'Fe-56 17+',
+ 'is_electron': False,
+ 'is_ion': True,
'integer_charge': 17,
'atomic_number': 26,
'mass_number': 56,
@@ -214,7 +247,8 @@
'element': 'He',
'element_name': 'helium',
'isotope': 'He-4',
- 'ion': 'He-4 2+',
+ 'ionic_symbol': 'He-4 2+',
+ 'is_ion': True,
'integer_charge': 2,
'atomic_number': 2,
'mass_number': 4,
@@ -228,7 +262,8 @@
'element': 'Li',
'element_name': 'lithium',
'isotope': 'Li-7',
- 'ion': None,
+ 'ionic_symbol': None,
+ 'is_ion': False,
'integer_charge': ChargeError,
'atomic_number': 3,
'mass_number': 7,
@@ -242,7 +277,8 @@
{'particle': 'Cn-276 22+',
'element': 'Cn',
'isotope': 'Cn-276',
- 'ion': 'Cn-276 22+',
+ 'ionic_symbol': 'Cn-276 22+',
+ 'is_ion': True,
'element_name': 'copernicium',
'integer_charge': 22,
'atomic_number': 112,
@@ -255,7 +291,8 @@
{'particle': 'mu-',
'element': None,
'isotope': None,
- 'ion': None,
+ 'ionic_symbol': None,
+ 'is_ion': False,
'integer_charge': -1,
'atomic_number': InvalidElementError,
'mass_number': InvalidIsotopeError,
@@ -274,6 +311,8 @@
'baryon_number': 0,
'lepton_number': 1,
'half_life': np.inf * u.s,
+ 'is_electron': False,
+ 'is_ion': False,
'is_category("fermion")': True,
'is_category("neutrino")': True,
'is_category("boson")': False,
@@ -323,9 +362,9 @@ def test_Particle_class(arg, kwargs, expected_dict):
try:
with pytest.raises(expected):
exec(f"particle.{key}")
- except pytest.fail.Exception as exc_failed_fail:
+ except pytest.fail.Exception:
errmsg += f"\n{call}[{key}] does not raise {expected}."
- except Exception as exc_bad:
+ except Exception:
errmsg += (f"\n{call}[{key}] does not raise {expected} but "
f"raises a different exception.")
@@ -334,10 +373,10 @@ def test_Particle_class(arg, kwargs, expected_dict):
try:
result = eval(f"particle.{key}")
assert result == expected
- except AssertionError as exc_assert:
+ except AssertionError:
errmsg += (f"\n{call}.{key} returns {result} instead "
f"of the expected value of {expected}.")
- except Exception as exc_general:
+ except Exception:
errmsg += f"\n{call}.{key} raises an unexpected exception."
if len(errmsg) > 0:
@@ -449,6 +488,7 @@ def test_Particle_cmp():
('n', 'neutron'),
('p+', 'proton'),
('H-1', 'p+'),
+ ('H-1 0+', 'p+'),
('D', 'D+'),
('T', 'T+'),
('He-4', 'alpha'),
@@ -470,7 +510,8 @@ def test_particle_class_mass_nuclide_mass(isotope: str, ion: str):
Isotope = Particle(isotope)
Ion = Particle(ion)
- if Isotope.particle == Ion.particle and Isotope.particle in ('n', 'p+'):
+ if Isotope.categories & {'isotope', 'baryon'} and Ion.categories & {'ion', 'baryon'}:
+
particle = Isotope.particle
assert Isotope.nuclide_mass == Ion.mass, (
@@ -524,3 +565,90 @@ def test_particle_is_electron(p, is_one):
def test_particle_bool_error():
with pytest.raises(AtomicError):
bool(Particle('e-'))
+
+
+particle_antiparticle_pairs = [
+ ('p+', 'p-'),
+ ('n', 'antineutron'),
+ ('e-', 'e+'),
+ ('mu-', 'mu+'),
+ ('tau-', 'tau+'),
+ ('nu_e', 'anti_nu_e'),
+ ('nu_mu', 'anti_nu_mu'),
+ ('nu_tau', 'anti_nu_tau'),
+]
+
+
[email protected]("particle, antiparticle", particle_antiparticle_pairs)
+def test_particle_inversion(particle, antiparticle):
+ """Test that particles have the correct antiparticles."""
+ assert Particle(particle).antiparticle == Particle(antiparticle), \
+ (f"The antiparticle of {particle} is found to be "
+ f"{~Particle(particle)} instead of {antiparticle}.")
+
+
[email protected]("particle, antiparticle", particle_antiparticle_pairs)
+def test_antiparticle_inversion(particle, antiparticle):
+ """Test that antiparticles have the correct antiparticles."""
+ assert Particle(antiparticle).antiparticle == Particle(particle), \
+ (f"The antiparticle of {antiparticle} is found to be "
+ f"{~Particle(antiparticle)} instead of {particle}.")
+
+
[email protected](params=ParticleZoo.everything)
+def particle(request):
+ return Particle(request.param)
+
+
[email protected]()
+def opposite(particle):
+ try:
+ opposite_particle = ~particle
+ except Exception as exc:
+ raise InvalidParticleError(
+ f"The unary ~ (invert) operator is unable to find the "
+ f"antiparticle of {particle}.") from exc
+ return opposite_particle
+
+
+class Test_antiparticle_properties_inversion:
+ """
+ Test particle and antiparticle inversion and properties for Particle
+ instances.
+ """
+ def test_inverted_inversion(self, particle):
+ """
+ Test that the antiparticle of the antiparticle of a particle is
+ the original particle.
+ """
+ assert particle == ~~particle, \
+ (f"~~{repr(particle)} equals {repr(~~particle)} instead of "
+ f"{repr(particle)}.")
+
+ def test_opposite_charge(self, particle, opposite):
+ """
+ Test that a particle and its antiparticle have the opposite
+ charge.
+ """
+ assert particle.integer_charge == -opposite.integer_charge, \
+ (f"The charges of {particle} and {opposite} are not "
+ f"opposites, as expected of a particle/antiparticle pair.")
+
+ def test_equal_mass(self, particle, opposite):
+ """
+ Test that a particle and its antiparticle have the same mass.
+ """
+ assert particle._attributes['mass'] == opposite._attributes['mass'], \
+ (f"The masses of {particle} and {opposite} are not equal, "
+ f"as expected of a particle/antiparticle pair.")
+
+ def test_antiparticle_attribute_and_operator(self, particle, opposite):
+ """
+ Test that the Particle.antiparticle attribute returns the same
+ value as the unary ~ (invert) operator acting on the same
+ Particle instance.
+ """
+ assert particle.antiparticle == ~particle, \
+ (f"{repr(particle)}.antiparticle returned "
+ f"{particle.antiparticle}, whereas ~{repr(particle)} "
+ f"returned {~particle}.")
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 7
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements/automated-code-tests.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asteval==1.0.6
astropy==6.0.1
astropy-iers-data==0.2025.3.31.0.36.18
certifi==2025.1.31
charset-normalizer==3.4.1
colorama==0.4.6
contourpy==1.3.0
coverage==7.8.0
coveralls==4.0.1
cycler==0.12.1
Cython==3.0.12
dill==0.3.9
docopt==0.6.2
exceptiongroup==1.2.2
execnet==2.1.1
flake8==7.2.0
fonttools==4.56.0
idna==3.10
importlib_resources==6.5.2
iniconfig==2.1.0
kiwisolver==1.4.7
lmfit==1.3.3
matplotlib==3.9.4
mccabe==0.7.0
mpmath==1.3.0
numpy==1.26.4
packaging==24.2
pillow==11.1.0
-e git+https://github.com/PlasmaPy/PlasmaPy.git@3dad2fc171f3dd72cac4df18dc0581accc8e69d5#egg=plasmapy
pluggy==1.5.0
pycodestyle==2.13.0
pyerfa==2.0.1.5
pyflakes==3.3.2
pyparsing==3.2.3
pytest==8.3.5
pytest-cov==6.0.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
roman==5.0
scipy==1.13.1
six==1.17.0
tomli==2.2.1
uncertainties==3.2.2
urllib3==2.3.0
zipp==3.21.0
| name: PlasmaPy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asteval==1.0.6
- astropy==6.0.1
- astropy-iers-data==0.2025.3.31.0.36.18
- certifi==2025.1.31
- charset-normalizer==3.4.1
- colorama==0.4.6
- contourpy==1.3.0
- coverage==7.8.0
- coveralls==4.0.1
- cycler==0.12.1
- cython==3.0.12
- dill==0.3.9
- docopt==0.6.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- flake8==7.2.0
- fonttools==4.56.0
- idna==3.10
- importlib-resources==6.5.2
- iniconfig==2.1.0
- kiwisolver==1.4.7
- lmfit==1.3.3
- matplotlib==3.9.4
- mccabe==0.7.0
- mpmath==1.3.0
- numpy==1.26.4
- packaging==24.2
- pillow==11.1.0
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyerfa==2.0.1.5
- pyflakes==3.3.2
- pyparsing==3.2.3
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- requests==2.32.3
- roman==5.0
- scipy==1.13.1
- six==1.17.0
- tomli==2.2.1
- uncertainties==3.2.2
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/PlasmaPy
| [
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[neutron-kwargs0-expected_dict0]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[p+-kwargs1-expected_dict1]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[p--kwargs2-expected_dict2]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[e--kwargs3-expected_dict3]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[e+-kwargs4-expected_dict4]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[H-kwargs5-expected_dict5]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[H-1",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[D+-kwargs7-expected_dict7]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[tritium-kwargs8-expected_dict8]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[Fe-kwargs9-expected_dict9]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[alpha-kwargs10-expected_dict10]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[Cn-276-kwargs12-expected_dict12]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[muon-kwargs13-expected_dict13]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[nu_tau-kwargs14-expected_dict14]",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[H-1",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[p+-p-]",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[n-antineutron]",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[e--e+]",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[mu--mu+]",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[tau--tau+]",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[nu_e-anti_nu_e]",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[nu_mu-anti_nu_mu]",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[nu_tau-anti_nu_tau]",
"plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[p+-p-]",
"plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[n-antineutron]",
"plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[e--e+]",
"plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[mu--mu+]",
"plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[tau--tau+]",
"plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[nu_e-anti_nu_e]",
"plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[nu_mu-anti_nu_mu]",
"plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[nu_tau-anti_nu_tau]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[mu-]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[p+]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[anti_nu_tau]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[p-]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[anti_nu_e]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[e-]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[nu_tau]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[tau+]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[anti_nu_mu]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[e+]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[nu_mu]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[tau-]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[mu+]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[nu_e]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[antineutron]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[n]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[mu-]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[p+]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[anti_nu_tau]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[p-]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[anti_nu_e]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[e-]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[nu_tau]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[tau+]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[anti_nu_mu]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[e+]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[nu_mu]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[tau-]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[mu+]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[nu_e]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[antineutron]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[n]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[mu-]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[p+]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[anti_nu_tau]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[p-]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[anti_nu_e]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[e-]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[nu_tau]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[tau+]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[anti_nu_mu]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[e+]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[nu_mu]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[tau-]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[mu+]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[nu_e]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[antineutron]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[n]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[mu-]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[p+]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[anti_nu_tau]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[p-]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[anti_nu_e]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[e-]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[nu_tau]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[tau+]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[anti_nu_mu]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[e+]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[nu_mu]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[tau-]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[mu+]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[nu_e]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[antineutron]",
"plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[n]"
]
| [
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[Li-kwargs11-expected_dict11]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[a-kwargs0--InvalidParticleError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[d+-kwargs1--InvalidParticleError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[H-kwargs2--InvalidParticleError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Au-818-kwargs3--InvalidParticleError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Au-12-kwargs4--InvalidParticleError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Au-kwargs5--InvalidParticleError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Au-kwargs6--InvalidParticleError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[e--kwargs7--InvalidParticleError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[e--kwargs8-.atomic_number-InvalidElementError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[alpha-kwargs9-.standard_atomic_weight-InvalidElementError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Fe-56-kwargs10-.standard_atomic_weight-InvalidElementError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[e--kwargs11-.standard_atomic_weight-InvalidElementError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[tau--kwargs12-.element_name-InvalidElementError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[tau+-kwargs13-.atomic_number-InvalidElementError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[neutron-kwargs14-.atomic_number-InvalidElementError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[H-kwargs15-.mass_number-InvalidIsotopeError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[neutron-kwargs16-.mass_number-InvalidIsotopeError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[He-kwargs17-.charge-ChargeError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[He-kwargs18-.integer_charge-ChargeError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Fe-kwargs19-.spin-MissingAtomicDataError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[nu_e-kwargs20-.mass-MissingAtomicDataError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Og-kwargs21-.standard_atomic_weight-MissingAtomicDataError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[arg22-kwargs22--TypeError]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_warnings[H-----kwargs0--AtomicWarning]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_warnings[alpha-kwargs1--AtomicWarning]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_warnings[alpha-kwargs2--AtomicWarning]"
]
| [
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles0]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles1]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles2]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles3]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles4]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles5]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles6]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles7]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles8]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles9]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles10]",
"plasmapy/atomic/tests/test_particle_class.py::test_Particle_cmp",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[n-neutron]",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[p+-proton]",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[H-1-p+]",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[D-D+]",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[T-T+]",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[He-4-alpha]",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[Fe-56-Fe-56",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_half_life_string",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_is_electron[Particle(\"e-\")-True]",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_is_electron[Particle(\"p+\")-False]",
"plasmapy/atomic/tests/test_particle_class.py::test_particle_bool_error"
]
| []
| BSD 3-Clause "New" or "Revised" License | 2,374 | [
"docs/atomic/particle_class.rst",
"plasmapy/atomic/symbols.py",
"plasmapy/atomic/parsing.py",
"plasmapy/atomic/special_particles.py",
"plasmapy/atomic/atomic.py",
"plasmapy/atomic/particle_input.py",
"plasmapy/atomic/particle_class.py"
]
| [
"docs/atomic/particle_class.rst",
"plasmapy/atomic/symbols.py",
"plasmapy/atomic/parsing.py",
"plasmapy/atomic/special_particles.py",
"plasmapy/atomic/atomic.py",
"plasmapy/atomic/particle_input.py",
"plasmapy/atomic/particle_class.py"
]
|
|
missionpinball__mpf-1137 | 941faa48843f7a67de2e59df89af6e44a7d4edbe | 2018-04-07 14:51:26 | 2c1bb3aa1e25674916bc4e0d17ccb6c3c87bd01b | diff --git a/mpf/core/config_spec.py b/mpf/core/config_spec.py
index ef463d1ef..e47ec2dd7 100644
--- a/mpf/core/config_spec.py
+++ b/mpf/core/config_spec.py
@@ -247,6 +247,14 @@ coils:
platform_settings: single|dict|None
psu: single|machine(psus)|default
platform: single|str|None
+digital_outputs:
+ __valid_in__: machine
+ number: single|str|
+ disable_events: dict|str:ms|None
+ enable_events: dict|str:ms|None
+ platform: single|str|None
+ type: single|enum(light,driver)|
+ light_subtype: single|str|None
dual_wound_coils:
__valid_in__: machine
main_coil: single|machine(coils)|
diff --git a/mpf/devices/digital_output.py b/mpf/devices/digital_output.py
new file mode 100644
index 000000000..8a985d1f0
--- /dev/null
+++ b/mpf/devices/digital_output.py
@@ -0,0 +1,100 @@
+"""A digital output on either a light or driver platform."""
+from functools import partial
+from typing import Union, Tuple
+
+from mpf.core.machine import MachineController
+from mpf.core.platform import DriverConfig
+from mpf.core.system_wide_device import SystemWideDevice
+from mpf.platforms.interfaces.driver_platform_interface import PulseSettings, HoldSettings
+
+MYPY = False
+if MYPY: # noqa
+ from mpf.core.platform import DriverPlatform, LightsPlatform
+ from mpf.platforms.interfaces.driver_platform_interface import DriverPlatformInterface
+ from mpf.platforms.interfaces.light_platform_interface import LightPlatformInterface
+
+
+class DigitalOutput(SystemWideDevice):
+
+ """A digital output on either a light or driver platform."""
+
+ config_section = 'digital_outputs'
+ collection = 'digital_outputs'
+ class_label = 'digital_output'
+
+ def __init__(self, machine: MachineController, name: str) -> None:
+ """Initialise digital output."""
+ self.hw_driver = None # type: Union[DriverPlatformInterface, LightPlatformInterface]
+ self.platform = None # type: Union[DriverPlatform, LightsPlatform]
+ self.type = None # type: str
+ super().__init__(machine, name)
+
+ def _initialize(self):
+ """Initialise the hardware driver for this digital output."""
+ if self.config['type'] == "driver":
+ self._initialize_driver()
+ elif self.config['type'] == "light":
+ self._initialize_light()
+ else:
+ raise AssertionError("Invalid type {}".format(self.config['type']))
+
+ def _initialize_light(self):
+ """Configure a light as digital output."""
+ self.platform = self.machine.get_platform_sections('lights', self.config['platform'])
+ self.type = "light"
+
+ try:
+ self.hw_driver = self.platform.configure_light(self.config['number'], self.config['light_subtype'], {})
+ except AssertionError as e:
+ raise AssertionError("Failed to configure light {} in platform. See error above".format(self.name)) from e
+
+ def _initialize_driver(self):
+ """Configure a driver as digital output."""
+ self.platform = self.machine.get_platform_sections('coils', self.config['platform'])
+ self.type = "driver"
+
+ config = DriverConfig(
+ default_pulse_ms=255,
+ default_pulse_power=1.0,
+ default_hold_power=1.0,
+ default_recycle=False,
+ max_pulse_ms=255,
+ max_pulse_power=1.0,
+ max_hold_power=1.0)
+
+ try:
+ self.hw_driver = self.platform.configure_driver(config, self.config['number'], {})
+ except AssertionError as e:
+ raise AssertionError("Failed to configure driver {} in platform. See error above".format(self.name)) from e
+
+ @staticmethod
+ def _get_state(max_fade_ms: int, state: bool) -> Tuple[float, int]:
+ """Return the current state without any fade."""
+ del max_fade_ms
+ if state:
+ return 1.0, -1
+ else:
+ return 0.0, -1
+
+ def enable(self, **kwargs):
+ """Enable digital output."""
+ del kwargs
+ if self.type == "driver":
+ self.hw_driver.enable(PulseSettings(power=1.0, duration=0),
+ HoldSettings(power=1.0))
+ elif self.type == "light":
+ self.hw_driver.set_fade(partial(self._get_state, state=True))
+ self.platform.light_sync()
+ else:
+ raise AssertionError("Invalid type {}".format(self.type))
+
+ def disable(self, **kwargs):
+ """Disable digital output."""
+ del kwargs
+ if self.type == "driver":
+ self.hw_driver.disable()
+ elif self.type == "light":
+ self.hw_driver.set_fade(partial(self._get_state, state=False))
+ self.platform.light_sync()
+ else:
+ raise AssertionError("Invalid type {}".format(self.type))
diff --git a/mpf/mpfconfig.yaml b/mpf/mpfconfig.yaml
index 9459d6ffe..570718476 100644
--- a/mpf/mpfconfig.yaml
+++ b/mpf/mpfconfig.yaml
@@ -38,6 +38,7 @@ mpf:
device_modules:
- mpf.devices.driver.Driver
+ - mpf.devices.digital_output.DigitalOutput
- mpf.devices.dual_wound_coil.DualWoundCoil
- mpf.devices.switch.Switch
- mpf.devices.light.Light
| Implement digital_outputs
Most platforms drive lights and coils separately because they need slightly different commands. However, lights may be on coil outputs (mostly for incandescent bulbs) and we support that case by mapping lights to a coil output (via the virtual coils platform). However, we got some more special cases:
- Outputs to drive motors can be either on coil outputs (most custom games) or on lights outputs (Ghostbusters on Spike)
- Flipper enable outputs (System 1/80; Lisy platform)
- GI enable outputs (System 1/80; Lisy)
- AC relay (System 11; Snux platform)
Implement a digital_output which can either drive a light or a driver/coil channel on a platform. Maybe some platforms even got additional logic outputs (such as Aux on the P-Roc). | missionpinball/mpf | diff --git a/mpf/tests/machine_files/digital_output/config/config.yaml b/mpf/tests/machine_files/digital_output/config/config.yaml
new file mode 100644
index 000000000..b18f3a515
--- /dev/null
+++ b/mpf/tests/machine_files/digital_output/config/config.yaml
@@ -0,0 +1,10 @@
+#config_version=5
+
+digital_outputs:
+ light_output:
+ number: 1
+ type: light
+ light_subtype: test_subtype
+ driver_output:
+ number: 1
+ type: driver
diff --git a/mpf/tests/machine_files/lisy/config/config.yaml b/mpf/tests/machine_files/lisy/config/config.yaml
index 583f4913e..958e06999 100644
--- a/mpf/tests/machine_files/lisy/config/config.yaml
+++ b/mpf/tests/machine_files/lisy/config/config.yaml
@@ -29,20 +29,18 @@ coils:
c_trough_eject:
number: 103
default_pulse_ms: 3s
+
+digital_outputs:
game_over_relay:
- number: 101
- default_hold_power: 1.0
+ number: 1
+ type: light
+ enable_events: ball_started
+ disable_events: ball_will_end
lights:
test_light:
number: 3
-coil_player:
- ball_started:
- game_over_relay: enable
- ball_will_end:
- game_over_relay: disable
-
segment_displays:
info_display:
number: 0
diff --git a/mpf/tests/test_DigitalOutput.py b/mpf/tests/test_DigitalOutput.py
new file mode 100644
index 000000000..c2cb8a86b
--- /dev/null
+++ b/mpf/tests/test_DigitalOutput.py
@@ -0,0 +1,31 @@
+from mpf.platforms.virtual import VirtualDriver, VirtualLight
+
+from mpf.tests.MpfTestCase import MpfTestCase
+
+
+class TestDigitalOutputs(MpfTestCase):
+
+ def getConfigFile(self):
+ return 'config.yaml'
+
+ def getMachinePath(self):
+ return 'tests/machine_files/digital_output/'
+
+ def test_enable_disable(self):
+ light = self.machine.digital_outputs["light_output"].hw_driver
+ driver = self.machine.digital_outputs["driver_output"].hw_driver
+ self.assertIsInstance(driver, VirtualDriver)
+ self.assertIsInstance(light, VirtualLight)
+ self.assertEqual("1", driver.number)
+ self.assertEqual("test_subtype-1", light.number)
+ self.assertEqual("disabled", driver.state)
+ self.machine.digital_outputs["driver_output"].enable()
+ self.assertEqual("enabled", driver.state)
+ self.machine.digital_outputs["driver_output"].disable()
+ self.assertEqual("disabled", driver.state)
+
+ self.assertEqual(0.0, light.current_brightness)
+ self.machine.digital_outputs["light_output"].enable()
+ self.assertEqual(1.0, light.current_brightness)
+ self.machine.digital_outputs["light_output"].disable()
+ self.assertEqual(0.0, light.current_brightness)
diff --git a/mpf/tests/test_Lisy.py b/mpf/tests/test_Lisy.py
index c756861d4..f5cd6560e 100644
--- a/mpf/tests/test_Lisy.py
+++ b/mpf/tests/test_Lisy.py
@@ -190,7 +190,7 @@ class TestLisy(MpfTestCase):
self._wait_for_processing()
self.assertFalse(self.serialMock.expected_commands)
- # disable flipper (using light 1)
+ # disable light (using light 3)
self.serialMock.expected_commands = {
b'\x0c\x03': None
}
@@ -200,16 +200,17 @@ class TestLisy(MpfTestCase):
# start ball. enable flipper (using light 1)
self.serialMock.expected_commands = {
- b'\x18\x65\x0a': None, # set pulse_ms to 10ms
- b'\x15\x65': None
+ b'\x0b\x01': None
}
self.post_event("ball_started")
self._wait_for_processing()
self.assertFalse(self.serialMock.expected_commands)
+ self.advance_time_and_run()
+
# end ball. disable flipper (using light 1)
self.serialMock.expected_commands = {
- b'\x16\x65': None
+ b'\x0c\x01': None
}
self.post_event("ball_will_end")
self._wait_for_processing()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 2
} | 0.33 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asciimatics==1.14.0
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
future==1.0.0
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
-e git+https://github.com/missionpinball/mpf.git@941faa48843f7a67de2e59df89af6e44a7d4edbe#egg=mpf
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
Pillow==8.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==7.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyfiglet==0.8.post1
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pyserial==3.5
pyserial-asyncio==0.6
pytest==6.2.4
ruamel.base==1.0.0
ruamel.yaml==0.10.23
terminaltables==3.1.10
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing==3.7.4.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
wcwidth==0.2.13
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: mpf
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- asciimatics==1.14.0
- future==1.0.0
- pillow==8.4.0
- psutil==7.0.0
- pyfiglet==0.8.post1
- pyserial==3.5
- pyserial-asyncio==0.6
- ruamel-base==1.0.0
- ruamel-yaml==0.10.23
- terminaltables==3.1.10
- typing==3.7.4.3
- wcwidth==0.2.13
prefix: /opt/conda/envs/mpf
| [
"mpf/tests/test_DigitalOutput.py::TestDigitalOutputs::test_enable_disable",
"mpf/tests/test_Lisy.py::TestLisy::test_platform"
]
| []
| []
| []
| MIT License | 2,375 | [
"mpf/devices/digital_output.py",
"mpf/mpfconfig.yaml",
"mpf/core/config_spec.py"
]
| [
"mpf/devices/digital_output.py",
"mpf/mpfconfig.yaml",
"mpf/core/config_spec.py"
]
|
|
grabbles__grabbit-60 | 2d48a95f25c674ae64225d426722ac22b64f278b | 2018-04-07 21:23:47 | 5a588731d1a4a42a6b67f09ede110d7770845ed0 | diff --git a/grabbit/core.py b/grabbit/core.py
index cdb0663..8c3c375 100644
--- a/grabbit/core.py
+++ b/grabbit/core.py
@@ -9,6 +9,7 @@ from os.path import (join, basename, dirname, abspath, split, isabs, exists)
from functools import partial
from copy import deepcopy
import warnings
+from keyword import iskeyword
__all__ = ['File', 'Entity', 'Layout']
@@ -90,7 +91,18 @@ class File(object):
Returns the File as a named tuple. The full path plus all entity
key/value pairs are returned as attributes.
"""
- entities = self.entities
+ keys = list(self.entities.keys())
+ replaced = []
+ for i, k in enumerate(keys):
+ if iskeyword(k):
+ replaced.append(k)
+ keys[i] = '%s_' % k
+ if replaced:
+ safe = ['%s_' % k for k in replaced]
+ warnings.warn("Entity names cannot be reserved keywords when "
+ "representing a File as a namedtuple. Replacing "
+ "entities %s with safe versions %s." % (keys, safe))
+ entities = dict(zip(keys, self.entities.values()))
_File = namedtuple('File', 'filename ' + ' '.join(entities.keys()))
return _File(filename=self.path, **entities)
| Handle entity names that conflict with reserved keywords
In cases where an `Entity` uses a reserved keyword as its name (e.g., `class`), exceptions can occur for some `.get()` queries (e.g., when `return_type='tuple'`, because reserved keywords can't be attributes). We need to find some workaround for this--e.g., setting a different name internally, or adopting a convention of appending underscores, etc. See INCF/pybids#142 for relevant discussion. | grabbles/grabbit | diff --git a/grabbit/tests/test_core.py b/grabbit/tests/test_core.py
index e85cbe8..d13da59 100644
--- a/grabbit/tests/test_core.py
+++ b/grabbit/tests/test_core.py
@@ -5,6 +5,7 @@ from os.path import join
import posixpath as psp
import tempfile
import json
+from copy import copy
DIRNAME = os.path.dirname(__file__)
@@ -64,6 +65,7 @@ class TestFile:
assert f.entities == {}
def test_matches(self, file):
+ file = copy(file)
assert file._matches()
assert file._matches(extensions='nii.gz')
assert not file._matches(extensions=['.txt', '.rtf'])
@@ -79,6 +81,7 @@ class TestFile:
regex_search=True)
def test_named_tuple(self, file):
+ file = copy(file)
file.tags = {'attrA': Tag(None, 'apple'), 'attrB': Tag(None, 'banana')}
tup = file.as_named_tuple()
assert(tup.filename == file.path)
@@ -86,6 +89,15 @@ class TestFile:
assert not hasattr(tup, 'task')
assert tup.attrA == 'apple'
+ def test_named_tuple_with_reserved_name(self, file):
+ file = copy(file)
+ file.tags['class'] = Tag(None, 'invalid')
+ with pytest.warns(UserWarning) as w:
+ res = file.as_named_tuple()
+ assert w[0].message.args[0].startswith('Entity names cannot')
+ assert hasattr(res, 'class_')
+ assert not hasattr(res, 'class')
+
class TestEntity:
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"six"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
-e git+https://github.com/grabbles/grabbit.git@2d48a95f25c674ae64225d426722ac22b64f278b#egg=grabbit
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
six==1.17.0
tomli==2.2.1
| name: grabbit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/grabbit
| [
"grabbit/tests/test_core.py::TestFile::test_named_tuple_with_reserved_name"
]
| []
| [
"grabbit/tests/test_core.py::TestFile::test_init",
"grabbit/tests/test_core.py::TestFile::test_matches",
"grabbit/tests/test_core.py::TestFile::test_named_tuple",
"grabbit/tests/test_core.py::TestEntity::test_init",
"grabbit/tests/test_core.py::TestEntity::test_matches",
"grabbit/tests/test_core.py::TestEntity::test_unique_and_count",
"grabbit/tests/test_core.py::TestEntity::test_add_file",
"grabbit/tests/test_core.py::TestLayout::test_init[local]",
"grabbit/tests/test_core.py::TestLayout::test_init_with_include_arg[local]",
"grabbit/tests/test_core.py::TestLayout::test_init_with_exclude_arg[local]",
"grabbit/tests/test_core.py::TestLayout::test_absolute_paths[local]",
"grabbit/tests/test_core.py::TestLayout::test_querying[local]",
"grabbit/tests/test_core.py::TestLayout::test_natsort[local]",
"grabbit/tests/test_core.py::TestLayout::test_unique_and_count[local]",
"grabbit/tests/test_core.py::TestLayout::test_get_nearest[local]",
"grabbit/tests/test_core.py::TestLayout::test_index_regex[local]",
"grabbit/tests/test_core.py::TestLayout::test_save_index[local]",
"grabbit/tests/test_core.py::TestLayout::test_load_index[local]",
"grabbit/tests/test_core.py::TestLayout::test_clone[local]",
"grabbit/tests/test_core.py::TestLayout::test_parse_file_entities[local]",
"grabbit/tests/test_core.py::test_merge_layouts[local]",
"grabbit/tests/test_core.py::TestLayout::test_dynamic_getters[/grabbit/grabbit/tests/data/7t_trt-/grabbit/grabbit/tests/specs/test.json]",
"grabbit/tests/test_core.py::TestLayout::test_entity_mapper",
"grabbit/tests/test_core.py::TestLayout::test_excludes",
"grabbit/tests/test_core.py::TestLayout::test_multiple_domains",
"grabbit/tests/test_core.py::TestLayout::test_get_by_domain"
]
| []
| MIT License | 2,376 | [
"grabbit/core.py"
]
| [
"grabbit/core.py"
]
|
|
tornadoweb__tornado-2348 | 54ad63e907f4da96d6bddb84b860672b2dc9845b | 2018-04-07 23:14:37 | 6410cd98c1a5e938246a17cac0769f689ed471c5 | diff --git a/docs/guide/async.rst b/docs/guide/async.rst
index fb350d94..60f8a23b 100644
--- a/docs/guide/async.rst
+++ b/docs/guide/async.rst
@@ -91,7 +91,6 @@ And again with a `.Future` instead of a callback:
.. testcode::
from tornado.concurrent import Future
- from tornado.httpclient import AsyncHTTPClient
def async_fetch_future(url):
http_client = AsyncHTTPClient()
diff --git a/docs/releases.rst b/docs/releases.rst
index 74afb455..6f87edc3 100644
--- a/docs/releases.rst
+++ b/docs/releases.rst
@@ -4,6 +4,7 @@ Release notes
.. toctree::
:maxdepth: 2
+ releases/v5.0.2
releases/v5.0.1
releases/v5.0.0
releases/v4.5.3
diff --git a/docs/releases/v5.0.2.rst b/docs/releases/v5.0.2.rst
new file mode 100644
index 00000000..d16b3a8e
--- /dev/null
+++ b/docs/releases/v5.0.2.rst
@@ -0,0 +1,18 @@
+What's new in Tornado 5.0.1
+===========================
+
+Apr 7, 2018
+-----------
+
+Bug fixes
+~~~~~~~~~
+
+- Fixed a memory leak when `.IOLoop` objects are created and destroyed.
+- If `.AsyncTestCase.get_new_ioloop` returns a reference to a
+ preexisting event loop (typically when it has been overridden to
+ return `.IOLoop.current()`), the test's ``tearDown`` method will not
+ close this loop.
+- Fixed a confusing error message when the synchronous `.HTTPClient`
+ fails to initialize because an event loop is already running.
+- `.PeriodicCallback` no longer executes twice in a row due to
+ backwards clock adjustments.
diff --git a/setup.py b/setup.py
index 239d47c5..1f71d6cd 100644
--- a/setup.py
+++ b/setup.py
@@ -103,7 +103,7 @@ http://api.mongodb.org/python/current/installation.html#osx
kwargs = {}
-version = "5.0.1"
+version = "5.0.2"
with open('README.rst') as f:
kwargs['long_description'] = f.read()
diff --git a/tornado/__init__.py b/tornado/__init__.py
index d577e8f2..07df195b 100644
--- a/tornado/__init__.py
+++ b/tornado/__init__.py
@@ -24,5 +24,5 @@ from __future__ import absolute_import, division, print_function
# is zero for an official release, positive for a development branch,
# or negative for a release candidate or beta (after the base version
# number has been incremented)
-version = "5.0.1"
-version_info = (5, 0, 1, 0)
+version = "5.0.2"
+version_info = (5, 0, 2, 0)
diff --git a/tornado/httpclient.py b/tornado/httpclient.py
index 3547631d..9c438d15 100644
--- a/tornado/httpclient.py
+++ b/tornado/httpclient.py
@@ -54,8 +54,10 @@ from tornado.util import Configurable
class HTTPClient(object):
"""A blocking HTTP client.
- This interface is provided for convenience and testing; most applications
- that are running an IOLoop will want to use `AsyncHTTPClient` instead.
+ This interface is provided to make it easier to share code between
+ synchronous and asynchronous applications. Applications that are
+ running an `.IOLoop` must use `AsyncHTTPClient` instead.
+
Typical usage looks like this::
http_client = httpclient.HTTPClient()
@@ -70,8 +72,19 @@ class HTTPClient(object):
# Other errors are possible, such as IOError.
print("Error: " + str(e))
http_client.close()
+
+ .. versionchanged:: 5.0
+
+ Due to limitations in `asyncio`, it is no longer possible to
+ use the synchronous ``HTTPClient`` while an `.IOLoop` is running.
+ Use `AsyncHTTPClient` instead.
+
"""
def __init__(self, async_client_class=None, **kwargs):
+ # Initialize self._closed at the beginning of the constructor
+ # so that an exception raised here doesn't lead to confusing
+ # failures in __del__.
+ self._closed = True
self._io_loop = IOLoop(make_current=False)
if async_client_class is None:
async_client_class = AsyncHTTPClient
diff --git a/tornado/ioloop.py b/tornado/ioloop.py
index 839e7ee5..48700139 100644
--- a/tornado/ioloop.py
+++ b/tornado/ioloop.py
@@ -47,7 +47,6 @@ import threading
import time
import traceback
import math
-import weakref
from tornado.concurrent import Future, is_future, chain_future, future_set_exc_info, future_add_done_callback # noqa: E501
from tornado.log import app_log, gen_log
@@ -185,7 +184,7 @@ class IOLoop(Configurable):
_current = threading.local()
# In Python 3, _ioloop_for_asyncio maps from asyncio loops to IOLoops.
- _ioloop_for_asyncio = weakref.WeakKeyDictionary()
+ _ioloop_for_asyncio = dict()
@classmethod
def configure(cls, impl, **kwargs):
@@ -1214,11 +1213,31 @@ class PeriodicCallback(object):
def _schedule_next(self):
if self._running:
- current_time = self.io_loop.time()
-
- if self._next_timeout <= current_time:
- callback_time_sec = self.callback_time / 1000.0
- self._next_timeout += (math.floor((current_time - self._next_timeout) /
- callback_time_sec) + 1) * callback_time_sec
-
+ self._update_next(self.io_loop.time())
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
+
+ def _update_next(self, current_time):
+ callback_time_sec = self.callback_time / 1000.0
+ if self._next_timeout <= current_time:
+ # The period should be measured from the start of one call
+ # to the start of the next. If one call takes too long,
+ # skip cycles to get back to a multiple of the original
+ # schedule.
+ self._next_timeout += (math.floor((current_time - self._next_timeout) /
+ callback_time_sec) + 1) * callback_time_sec
+ else:
+ # If the clock moved backwards, ensure we advance the next
+ # timeout instead of recomputing the same value again.
+ # This may result in long gaps between callbacks if the
+ # clock jumps backwards by a lot, but the far more common
+ # scenario is a small NTP adjustment that should just be
+ # ignored.
+ #
+ # Note that on some systems if time.time() runs slower
+ # than time.monotonic() (most common on windows), we
+ # effectively experience a small backwards time jump on
+ # every iteration because PeriodicCallback uses
+ # time.time() while asyncio schedules callbacks using
+ # time.monotonic().
+ # https://github.com/tornadoweb/tornado/issues/2333
+ self._next_timeout += callback_time_sec
diff --git a/tornado/platform/asyncio.py b/tornado/platform/asyncio.py
index b2ad9fe6..b6a490af 100644
--- a/tornado/platform/asyncio.py
+++ b/tornado/platform/asyncio.py
@@ -38,6 +38,20 @@ class BaseAsyncIOLoop(IOLoop):
self.readers = set()
self.writers = set()
self.closing = False
+ # If an asyncio loop was closed through an asyncio interface
+ # instead of IOLoop.close(), we'd never hear about it and may
+ # have left a dangling reference in our map. In case an
+ # application (or, more likely, a test suite) creates and
+ # destroys a lot of event loops in this way, check here to
+ # ensure that we don't have a lot of dead loops building up in
+ # the map.
+ #
+ # TODO(bdarnell): consider making self.asyncio_loop a weakref
+ # for AsyncIOMainLoop and make _ioloop_for_asyncio a
+ # WeakKeyDictionary.
+ for loop in list(IOLoop._ioloop_for_asyncio):
+ if loop.is_closed():
+ del IOLoop._ioloop_for_asyncio[loop]
IOLoop._ioloop_for_asyncio[asyncio_loop] = self
super(BaseAsyncIOLoop, self).initialize(**kwargs)
@@ -49,6 +63,7 @@ class BaseAsyncIOLoop(IOLoop):
if all_fds:
self.close_fd(fileobj)
self.asyncio_loop.close()
+ del IOLoop._ioloop_for_asyncio[self.asyncio_loop]
def add_handler(self, fd, handler, events):
fd, fileobj = self.split_fd(fd)
| Release 5.0.2
We've fixed a few regressions since 5.0.1, so it's about time for a new release. I think it should include
- #2326 (memory leak, especially with sync HTTPClient)
- #2327 (testing, improves compatibility with pytest-asyncio)
- #2338 (PeriodicCallback misbehaves when clock moves backwards)
| tornadoweb/tornado | diff --git a/tornado/test/asyncio_test.py b/tornado/test/asyncio_test.py
index 41fda20d..02545882 100644
--- a/tornado/test/asyncio_test.py
+++ b/tornado/test/asyncio_test.py
@@ -122,6 +122,44 @@ class AsyncIOLoopTest(AsyncTestCase):
42)
[email protected](asyncio is None, "asyncio module not present")
+class LeakTest(unittest.TestCase):
+ def setUp(self):
+ # Trigger a cleanup of the mapping so we start with a clean slate.
+ AsyncIOLoop().close()
+ # If we don't clean up after ourselves other tests may fail on
+ # py34.
+ self.orig_policy = asyncio.get_event_loop_policy()
+ asyncio.set_event_loop_policy(asyncio.DefaultEventLoopPolicy())
+
+ def tearDown(self):
+ asyncio.get_event_loop().close()
+ asyncio.set_event_loop_policy(self.orig_policy)
+
+ def test_ioloop_close_leak(self):
+ orig_count = len(IOLoop._ioloop_for_asyncio)
+ for i in range(10):
+ # Create and close an AsyncIOLoop using Tornado interfaces.
+ loop = AsyncIOLoop()
+ loop.close()
+ new_count = len(IOLoop._ioloop_for_asyncio) - orig_count
+ self.assertEqual(new_count, 0)
+
+ def test_asyncio_close_leak(self):
+ orig_count = len(IOLoop._ioloop_for_asyncio)
+ for i in range(10):
+ # Create and close an AsyncIOMainLoop using asyncio interfaces.
+ loop = asyncio.new_event_loop()
+ loop.call_soon(IOLoop.current)
+ loop.call_soon(loop.stop)
+ loop.run_forever()
+ loop.close()
+ new_count = len(IOLoop._ioloop_for_asyncio) - orig_count
+ # Because the cleanup is run on new loop creation, we have one
+ # dangling entry in the map (but only one).
+ self.assertEqual(new_count, 1)
+
+
@unittest.skipIf(asyncio is None, "asyncio module not present")
class AnyThreadEventLoopPolicyTest(unittest.TestCase):
def setUp(self):
diff --git a/tornado/test/ioloop_test.py b/tornado/test/ioloop_test.py
index 1aa3f1e5..c202b9a0 100644
--- a/tornado/test/ioloop_test.py
+++ b/tornado/test/ioloop_test.py
@@ -785,6 +785,62 @@ class TestPeriodicCallback(unittest.TestCase):
io_loop.close()
+class TestPeriodicCallbackMath(unittest.TestCase):
+ def simulate_calls(self, pc, durations):
+ """Simulate a series of calls to the PeriodicCallback.
+
+ Pass a list of call durations in seconds (negative values
+ work to simulate clock adjustments during the call, or more or
+ less equivalently, between calls). This method returns the
+ times at which each call would be made.
+ """
+ calls = []
+ now = 1000
+ pc._next_timeout = now
+ for d in durations:
+ pc._update_next(now)
+ calls.append(pc._next_timeout)
+ now = pc._next_timeout + d
+ return calls
+
+ def test_basic(self):
+ pc = PeriodicCallback(None, 10000)
+ self.assertEqual(self.simulate_calls(pc, [0] * 5),
+ [1010, 1020, 1030, 1040, 1050])
+
+ def test_overrun(self):
+ # If a call runs for too long, we skip entire cycles to get
+ # back on schedule.
+ call_durations = [9, 9, 10, 11, 20, 20, 35, 35, 0, 0, 0]
+ expected = [
+ 1010, 1020, 1030, # first 3 calls on schedule
+ 1050, 1070, # next 2 delayed one cycle
+ 1100, 1130, # next 2 delayed 2 cycles
+ 1170, 1210, # next 2 delayed 3 cycles
+ 1220, 1230, # then back on schedule.
+ ]
+
+ pc = PeriodicCallback(None, 10000)
+ self.assertEqual(self.simulate_calls(pc, call_durations),
+ expected)
+
+ def test_clock_backwards(self):
+ pc = PeriodicCallback(None, 10000)
+ # Backwards jumps are ignored, potentially resulting in a
+ # slightly slow schedule (although we assume that when
+ # time.time() and time.monotonic() are different, time.time()
+ # is getting adjusted by NTP and is therefore more accurate)
+ self.assertEqual(self.simulate_calls(pc, [-2, -1, -3, -2, 0]),
+ [1010, 1020, 1030, 1040, 1050])
+
+ # For big jumps, we should perhaps alter the schedule, but we
+ # don't currently. This trace shows that we run callbacks
+ # every 10s of time.time(), but the first and second calls are
+ # 110s of real time apart because the backwards jump is
+ # ignored.
+ self.assertEqual(self.simulate_calls(pc, [-100, 0, 0]),
+ [1010, 1020, 1030])
+
class TestIOLoopConfiguration(unittest.TestCase):
def run_python(self, *statements):
statements = [
diff --git a/tornado/test/testing_test.py b/tornado/test/testing_test.py
index 796530ff..e1f34f08 100644
--- a/tornado/test/testing_test.py
+++ b/tornado/test/testing_test.py
@@ -11,6 +11,11 @@ import platform
import traceback
import warnings
+try:
+ import asyncio
+except ImportError:
+ asyncio = None
+
@contextlib.contextmanager
def set_environ(name, value):
@@ -310,5 +315,30 @@ class GenTest(AsyncTestCase):
self.finished = True
[email protected](asyncio is None, "asyncio module not present")
+class GetNewIOLoopTest(AsyncTestCase):
+ def get_new_ioloop(self):
+ # Use the current loop instead of creating a new one here.
+ return ioloop.IOLoop.current()
+
+ def setUp(self):
+ # This simulates the effect of an asyncio test harness like
+ # pytest-asyncio.
+ self.orig_loop = asyncio.get_event_loop()
+ self.new_loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(self.new_loop)
+ super(GetNewIOLoopTest, self).setUp()
+
+ def tearDown(self):
+ super(GetNewIOLoopTest, self).tearDown()
+ # AsyncTestCase must not affect the existing asyncio loop.
+ self.assertFalse(asyncio.get_event_loop().is_closed())
+ asyncio.set_event_loop(self.orig_loop)
+ self.new_loop.close()
+
+ def test_loop(self):
+ self.assertIs(self.io_loop.asyncio_loop, self.new_loop)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/tornado/testing.py b/tornado/testing.py
index 400dd3e4..19bc5a94 100644
--- a/tornado/testing.py
+++ b/tornado/testing.py
@@ -74,6 +74,12 @@ else:
import unittest # type: ignore
+if asyncio is None:
+ _NON_OWNED_IOLOOPS = ()
+else:
+ import tornado.platform.asyncio
+ _NON_OWNED_IOLOOPS = tornado.platform.asyncio.AsyncIOMainLoop
+
def bind_unused_port(reuse_port=False):
"""Binds a server socket to an available port on localhost.
@@ -216,11 +222,12 @@ class AsyncTestCase(unittest.TestCase):
# Clean up Subprocess, so it can be used again with a new ioloop.
Subprocess.uninitialize()
self.io_loop.clear_current()
- # Try to clean up any file descriptors left open in the ioloop.
- # This avoids leaks, especially when tests are run repeatedly
- # in the same process with autoreload (because curl does not
- # set FD_CLOEXEC on its file descriptors)
- self.io_loop.close(all_fds=True)
+ if not isinstance(self.io_loop, _NON_OWNED_IOLOOPS):
+ # Try to clean up any file descriptors left open in the ioloop.
+ # This avoids leaks, especially when tests are run repeatedly
+ # in the same process with autoreload (because curl does not
+ # set FD_CLOEXEC on its file descriptors)
+ self.io_loop.close(all_fds=True)
super(AsyncTestCase, self).tearDown()
# In case an exception escaped or the StackContext caught an exception
# when there wasn't a wait() to re-raise it, do so here.
@@ -229,9 +236,15 @@ class AsyncTestCase(unittest.TestCase):
self.__rethrow()
def get_new_ioloop(self):
- """Creates a new `.IOLoop` for this test. May be overridden in
- subclasses for tests that require a specific `.IOLoop` (usually
- the singleton `.IOLoop.instance()`).
+ """Returns the `.IOLoop` to use for this test.
+
+ By default, a new `.IOLoop` is created for each test.
+ Subclasses may override this method to return
+ `.IOLoop.current()` if it is not appropriate to use a new
+ `.IOLoop` in each tests (for example, if there are global
+ singletons using the default `.IOLoop`) or if a per-test event
+ loop is being provided by another system (such as
+ ``pytest-asyncio``).
"""
return IOLoop()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 7
} | 5.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[full]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
-e git+https://github.com/tornadoweb/tornado.git@54ad63e907f4da96d6bddb84b860672b2dc9845b#egg=tornado
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: tornado
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/tornado
| [
"tornado/test/asyncio_test.py::LeakTest::test_asyncio_close_leak",
"tornado/test/asyncio_test.py::LeakTest::test_ioloop_close_leak",
"tornado/test/ioloop_test.py::TestPeriodicCallbackMath::test_basic",
"tornado/test/ioloop_test.py::TestPeriodicCallbackMath::test_clock_backwards",
"tornado/test/ioloop_test.py::TestPeriodicCallbackMath::test_overrun"
]
| []
| [
"tornado/test/asyncio_test.py::AsyncIOLoopTest::test_asyncio_adapter",
"tornado/test/asyncio_test.py::AsyncIOLoopTest::test_asyncio_callback",
"tornado/test/asyncio_test.py::AsyncIOLoopTest::test_asyncio_future",
"tornado/test/asyncio_test.py::AsyncIOLoopTest::test_asyncio_yield_from",
"tornado/test/asyncio_test.py::AnyThreadEventLoopPolicyTest::test_asyncio_accessor",
"tornado/test/asyncio_test.py::AnyThreadEventLoopPolicyTest::test_tornado_accessor",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_callback_from_signal",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_callback_from_signal_other_thread",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_callback_return_sequence",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_callback_wakeup",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_callback_wakeup_other_thread",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_callback_while_closing",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_timeout_return",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_timeout_timedelta",
"tornado/test/ioloop_test.py::TestIOLoop::test_call_at_return",
"tornado/test/ioloop_test.py::TestIOLoop::test_call_later_return",
"tornado/test/ioloop_test.py::TestIOLoop::test_close_file_object",
"tornado/test/ioloop_test.py::TestIOLoop::test_exception_logging",
"tornado/test/ioloop_test.py::TestIOLoop::test_exception_logging_future",
"tornado/test/ioloop_test.py::TestIOLoop::test_exception_logging_native_coro",
"tornado/test/ioloop_test.py::TestIOLoop::test_handle_callback_exception",
"tornado/test/ioloop_test.py::TestIOLoop::test_handler_callback_file_object",
"tornado/test/ioloop_test.py::TestIOLoop::test_mixed_fd_fileobj",
"tornado/test/ioloop_test.py::TestIOLoop::test_multiple_add",
"tornado/test/ioloop_test.py::TestIOLoop::test_read_while_writeable",
"tornado/test/ioloop_test.py::TestIOLoop::test_reentrant",
"tornado/test/ioloop_test.py::TestIOLoop::test_remove_handler_from_handler",
"tornado/test/ioloop_test.py::TestIOLoop::test_remove_timeout_after_fire",
"tornado/test/ioloop_test.py::TestIOLoop::test_remove_timeout_cleanup",
"tornado/test/ioloop_test.py::TestIOLoop::test_remove_timeout_from_timeout",
"tornado/test/ioloop_test.py::TestIOLoop::test_remove_without_add",
"tornado/test/ioloop_test.py::TestIOLoop::test_spawn_callback",
"tornado/test/ioloop_test.py::TestIOLoop::test_timeout_with_arguments",
"tornado/test/ioloop_test.py::TestIOLoopCurrent::test_default_current",
"tornado/test/ioloop_test.py::TestIOLoopCurrent::test_force_current",
"tornado/test/ioloop_test.py::TestIOLoopCurrent::test_non_current",
"tornado/test/ioloop_test.py::TestIOLoopCurrentAsync::test_clear_without_current",
"tornado/test/ioloop_test.py::TestIOLoopAddCallback::test_pre_wrap",
"tornado/test/ioloop_test.py::TestIOLoopAddCallback::test_pre_wrap_with_args",
"tornado/test/ioloop_test.py::TestIOLoopAddCallbackFromSignal::test_pre_wrap",
"tornado/test/ioloop_test.py::TestIOLoopAddCallbackFromSignal::test_pre_wrap_with_args",
"tornado/test/ioloop_test.py::TestIOLoopFutures::test_add_future_stack_context",
"tornado/test/ioloop_test.py::TestIOLoopFutures::test_add_future_threads",
"tornado/test/ioloop_test.py::TestIOLoopFutures::test_run_in_executor_gen",
"tornado/test/ioloop_test.py::TestIOLoopFutures::test_run_in_executor_native",
"tornado/test/ioloop_test.py::TestIOLoopFutures::test_set_default_executor",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_async_exception",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_async_result",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_current",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_native_coroutine",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_sync_exception",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_sync_result",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_timeout",
"tornado/test/ioloop_test.py::TestIOLoopConfiguration::test_asyncio",
"tornado/test/ioloop_test.py::TestIOLoopConfiguration::test_asyncio_main",
"tornado/test/ioloop_test.py::TestIOLoopConfiguration::test_default",
"tornado/test/testing_test.py::AsyncTestCaseTest::test_exception_in_callback",
"tornado/test/testing_test.py::AsyncTestCaseTest::test_multiple_errors",
"tornado/test/testing_test.py::AsyncTestCaseTest::test_subsequent_wait_calls",
"tornado/test/testing_test.py::AsyncTestCaseTest::test_wait_timeout",
"tornado/test/testing_test.py::AsyncHTTPTestCaseTest::test_fetch_full_http_url",
"tornado/test/testing_test.py::AsyncHTTPTestCaseTest::test_fetch_full_https_url",
"tornado/test/testing_test.py::AsyncHTTPTestCaseTest::test_fetch_segment",
"tornado/test/testing_test.py::AsyncTestCaseWrapperTest::test_other_return",
"tornado/test/testing_test.py::AsyncTestCaseWrapperTest::test_undecorated_coroutine",
"tornado/test/testing_test.py::AsyncTestCaseWrapperTest::test_undecorated_generator",
"tornado/test/testing_test.py::AsyncTestCaseWrapperTest::test_undecorated_generator_with_skip",
"tornado/test/testing_test.py::SetUpTearDownTest::test_set_up_tear_down",
"tornado/test/testing_test.py::GenTest::test_async",
"tornado/test/testing_test.py::GenTest::test_native_coroutine",
"tornado/test/testing_test.py::GenTest::test_native_coroutine_timeout",
"tornado/test/testing_test.py::GenTest::test_no_timeout",
"tornado/test/testing_test.py::GenTest::test_no_timeout_environment_variable",
"tornado/test/testing_test.py::GenTest::test_sync",
"tornado/test/testing_test.py::GenTest::test_timeout",
"tornado/test/testing_test.py::GenTest::test_timeout_environment_variable",
"tornado/test/testing_test.py::GenTest::test_with_method_args",
"tornado/test/testing_test.py::GenTest::test_with_method_kwargs",
"tornado/test/testing_test.py::GetNewIOLoopTest::test_loop"
]
| []
| Apache License 2.0 | 2,377 | [
"docs/releases.rst",
"tornado/httpclient.py",
"tornado/__init__.py",
"setup.py",
"tornado/platform/asyncio.py",
"docs/guide/async.rst",
"tornado/ioloop.py",
"docs/releases/v5.0.2.rst"
]
| [
"docs/releases.rst",
"tornado/httpclient.py",
"tornado/__init__.py",
"setup.py",
"tornado/platform/asyncio.py",
"docs/guide/async.rst",
"tornado/ioloop.py",
"docs/releases/v5.0.2.rst"
]
|
|
openfoodfacts__openfoodfacts-python-43 | da39e20197634a0beef2229bba9bd73fa1e3fdf7 | 2018-04-08 16:43:50 | da39e20197634a0beef2229bba9bd73fa1e3fdf7 | diff --git a/openfoodfacts/__init__.py b/openfoodfacts/__init__.py
index a891bf3..ff63f77 100644
--- a/openfoodfacts/__init__.py
+++ b/openfoodfacts/__init__.py
@@ -3,6 +3,7 @@ import sys
from . import facets
from . import utils
from .products import get_product
+from . import openbeautyfacts
openfoodfacts = sys.modules[__name__]
__version__ = '0.0.1'
diff --git a/openfoodfacts/facets.py b/openfoodfacts/facets.py
index cc00cb9..0b2f78e 100644
--- a/openfoodfacts/facets.py
+++ b/openfoodfacts/facets.py
@@ -11,8 +11,8 @@ facets = [
'label',
'languages',
'nutrition_grade',
- 'packagings',
- 'packager_code',
+ 'packaging',
+ 'packaging_codes',
'purchase_places',
'photographer',
'informer',
diff --git a/openfoodfacts/openbeautyfacts.py b/openfoodfacts/openbeautyfacts.py
new file mode 100644
index 0000000..dd2f8c4
--- /dev/null
+++ b/openfoodfacts/openbeautyfacts.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+import requests
+from . import utils
+
+
+def get_product(barcode, locale='world'):
+ """
+ Return information of a given product.
+ """
+ return utils.fetch(utils.build_url(geography=locale,
+ service='api',
+ resource_type='product',
+ parameters=barcode,
+ entity="beauty"))
+
+
+def get_by_facets(query, page=1, locale='world'):
+ """
+ Return products for a set of facets.
+ """
+ path = []
+ keys = query.keys()
+
+ if len(keys) == 0:
+ return []
+
+ else:
+ keys = sorted(keys)
+ for key in keys:
+ path.append(key)
+ path.append(query[key])
+
+ return utils. \
+ fetch(utils.build_url(geography=locale,
+ resource_type=path,
+ parameters=str(page),
+ entity="beauty"))['products']
+
+
+def search(query, page=1, page_size=20,
+ sort_by='unique_scans', locale='world'):
+ """
+ Perform a search using Open Food Facts search engine.
+ """
+ parameters = {'search_terms': query,
+ 'page': page,
+ 'page_size': page_size,
+ 'sort_by': sort_by,
+ 'json': '1'}
+
+ path = utils.build_url(geography=locale,
+ service='cgi',
+ resource_type='search.pl',
+ parameters=parameters,
+ entity="beauty")
+
+ return utils.fetch(path, json_file=False)
diff --git a/openfoodfacts/utils.py b/openfoodfacts/utils.py
index 4601e6d..e8d5f99 100644
--- a/openfoodfacts/utils.py
+++ b/openfoodfacts/utils.py
@@ -5,6 +5,7 @@ import sys
import urllib
API_URL = "https://%s.openfoodfacts.org/"
+OBF_API_URL = "https://%s.openbeautyfacts.org/"
def login_into_OFF():
@@ -62,9 +63,16 @@ def download_data(file_type='mongodb'):
def build_url(geography='world', service=None,
- resource_type=None, parameters=None):
+ resource_type=None, parameters=None, entity="food"):
- geo_url = API_URL % (geography)
+ if entity == "food":
+ geo_url = API_URL % (geography)
+
+ elif entity == "beauty":
+ geo_url = OBF_API_URL % (geography)
+
+ else:
+ raise ValueError("Product not recognized!")
geo_url = geo_url[:-1]
| Make it possible to create an Open Beauty Facts variant | openfoodfacts/openfoodfacts-python | diff --git a/tests/facets_test.py b/tests/facets_test.py
index b54228a..8d4e5c6 100644
--- a/tests/facets_test.py
+++ b/tests/facets_test.py
@@ -10,15 +10,35 @@ class TestFacets(unittest.TestCase):
mock.get('https://world.openfoodfacts.org/traces.json',
text='{"tags":["egg"]}')
res = openfoodfacts.facets.get_traces()
- self.assertEquals(res, ["egg"])
+ self.assertEqual(res, ["egg"])
def test_get_additives(self):
with requests_mock.mock() as mock:
mock.get('https://world.openfoodfacts.org/additives.json',
text='{"tags":["additive"]}')
res = openfoodfacts.facets.get_additives()
- self.assertEquals(res, ["additive"])
+ self.assertEqual(res, ["additive"])
+ def test_get_purchase_places(self):
+ with requests_mock.mock() as mock:
+ mock.get('https://world.openfoodfacts.org/purchase-places.json',
+ text='{"tags":["France"]}')
+ res = openfoodfacts.facets.get_purchase_places()
+ self.assertEqual(res, ["France"])
+
+ def test_get_packaging_codes(self):
+ with requests_mock.mock() as mock:
+ mock.get('https://world.openfoodfacts.org/packager-codes.json',
+ text='{"tags":["FABRICANTE-Y-ENVASADOR"]}')
+ res = openfoodfacts.facets.get_packaging_codes()
+ self.assertEqual(res, ["FABRICANTE-Y-ENVASADOR"])
+
+ def test_get_entry_dates(self):
+ with requests_mock.mock() as mock:
+ mock.get('https://world.openfoodfacts.org/entry-dates.json',
+ text='{"tags":["2017"]}')
+ res = openfoodfacts.facets.get_entry_dates()
+ self.assertEqual(res, ["2017"])
if __name__ == '__main__':
unittest.main()
diff --git a/tests/products_test.py b/tests/products_test.py
index a2c3ab9..1bf702c 100644
--- a/tests/products_test.py
+++ b/tests/products_test.py
@@ -13,32 +13,32 @@ class TestProducts(unittest.TestCase):
'https://world.openfoodfacts.org/api/v0/product/1223435.json',
text='{"name":"product_test"}')
res = openfoodfacts.get_product('1223435')
- self.assertEquals(res, {'name': 'product_test'})
+ self.assertEqual(res, {'name': 'product_test'})
def test_get_by_trace(self):
with requests_mock.mock() as mock:
mock.get('https://world.openfoodfacts.org/trace/egg/1.json',
text='{"products":["omelet"]}')
res = openfoodfacts.products.get_by_trace('egg')
- self.assertEquals(res, ["omelet"])
+ self.assertEqual(res, ["omelet"])
def test_get_by_trace_pagination(self):
with requests_mock.mock() as mock:
mock.get('https://world.openfoodfacts.org/trace/egg/2.json',
text='{"products":["omelet"]}')
res = openfoodfacts.products.get_by_trace('egg', 2)
- self.assertEquals(res, ["omelet"])
+ self.assertEqual(res, ["omelet"])
def test_get_by_country(self):
with requests_mock.mock() as mock:
mock.get('https://world.openfoodfacts.org/country/france/1.json',
text='{"products":["omelet"]}')
res = openfoodfacts.products.get_by_country('france')
- self.assertEquals(res, ["omelet"])
+ self.assertEqual(res, ["omelet"])
def test_get_by_country_and_trace(self):
res = openfoodfacts.products.get_by_facets({})
- self.assertEquals(res, [])
+ self.assertEqual(res, [])
with requests_mock.mock() as mock:
mock.get(
@@ -47,7 +47,7 @@ class TestProducts(unittest.TestCase):
text='{"products":["omelet"]}')
res = openfoodfacts.products.get_by_facets(
{'trace': 'egg', 'country': 'france'})
- self.assertEquals(res, ["omelet"])
+ self.assertEqual(res, ["omelet"])
def test_search(self):
with requests_mock.mock() as mock:
@@ -57,7 +57,7 @@ class TestProducts(unittest.TestCase):
'1&page_size=20&sort_by=unique_scans',
text='{"products":["kinder bueno"], "count": 1}')
res = openfoodfacts.products.search('kinder bueno')
- self.assertEquals(res["products"], ["kinder bueno"])
+ self.assertEqual(res["products"], ["kinder bueno"])
mock.get(
'https://world.openfoodfacts.org/cgi/search.pl?' +
'search_terms=banania&json=1&page=' +
@@ -65,7 +65,30 @@ class TestProducts(unittest.TestCase):
text='{"products":["banania", "banania big"], "count": 2}')
res = openfoodfacts.products.search(
'banania', page=2, page_size=10)
- self.assertEquals(res["products"], ["banania", "banania big"])
+ self.assertEqual(res["products"], ["banania", "banania big"])
+
+ def test_advanced_search(self):
+ with requests_mock.mock() as mock:
+ mock.get(
+ 'https://world.openfoodfacts.org/cgi/search.pl?' +
+ 'search_terms=coke&tagtype_0=packaging&' +
+ 'tag_contains_0=contains&tag_0=plastic&' +
+ 'nutriment_0=energy&nutriment_compare_0=gt&' +
+ 'nutriment_value_0=0&sort_by=unique_scans&' +
+ 'page_size=20',
+ text= '{"products":["Diet Coke"], "count": 1}')
+ res = openfoodfacts.products.advanced_search({
+ "search_terms":"coke",
+ "tagtype_0":"packaging",
+ "tag_contains_0":"contains",
+ "tag_0":"plastic",
+ "nutriment_0":"energy",
+ "nutriment_compare_0":"gt",
+ "nutriment_value_0":"0",
+ "sort_by":"unique_scans",
+ "page_size":"20"
+ })
+ self.assertEqual(res["products"],["Diet Coke"])
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"requests_mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/openfoodfacts/openfoodfacts-python.git@da39e20197634a0beef2229bba9bd73fa1e3fdf7#egg=openfoodfacts
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
requests==2.11.1
requests-mock==1.12.1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: openfoodfacts-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.11.1
- requests-mock==1.12.1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/openfoodfacts-python
| [
"tests/facets_test.py::TestFacets::test_get_packaging_codes"
]
| []
| [
"tests/facets_test.py::TestFacets::test_get_additives",
"tests/facets_test.py::TestFacets::test_get_entry_dates",
"tests/facets_test.py::TestFacets::test_get_purchase_places",
"tests/facets_test.py::TestFacets::test_get_traces",
"tests/products_test.py::TestProducts::test_advanced_search",
"tests/products_test.py::TestProducts::test_get_by_country",
"tests/products_test.py::TestProducts::test_get_by_country_and_trace",
"tests/products_test.py::TestProducts::test_get_by_trace",
"tests/products_test.py::TestProducts::test_get_by_trace_pagination",
"tests/products_test.py::TestProducts::test_get_product",
"tests/products_test.py::TestProducts::test_search"
]
| []
| MIT License | 2,378 | [
"openfoodfacts/__init__.py",
"openfoodfacts/facets.py",
"openfoodfacts/openbeautyfacts.py",
"openfoodfacts/utils.py"
]
| [
"openfoodfacts/__init__.py",
"openfoodfacts/facets.py",
"openfoodfacts/openbeautyfacts.py",
"openfoodfacts/utils.py"
]
|
|
ionelmc__python-holdup-2 | 01e29e2ee1348043caa29684f0b841dae95990ec | 2018-04-08 18:41:24 | 01e29e2ee1348043caa29684f0b841dae95990ec | diff --git a/.gitignore b/.gitignore
index a74475a..66ad3dd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -59,6 +59,8 @@ docs/_build
.env
.cache
.pytest
+.pytest_cache
.bootstrap
.appveyor.token
*.bak
+.venv/
diff --git a/AUTHORS.rst b/AUTHORS.rst
index d01607a..3268092 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -3,3 +3,4 @@ Authors
=======
* Ionel Cristian Mărieș - https://blog.ionelmc.ro
+* Mithun Ayachit
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 93aaf2c..b6682f3 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -2,6 +2,10 @@
Changelog
=========
+1.7.0 ()
+------------------
+
+* Added support for skipping SSL certificate verification for HTTPS services
1.6.0 (2018-03-22)
------------------
diff --git a/README.rst b/README.rst
index 7c59423..fc1cbb3 100644
--- a/README.rst
+++ b/README.rst
@@ -72,7 +72,7 @@ Installation
Documentation
=============
-Usage: ``holdup [-h] [-t SECONDS] [-T SECONDS] [-i SECONDS] [-n] service [service ...] [-- command [arg [arg ...]]]``
+Usage: ``holdup [-h] [-t SECONDS] [-T SECONDS] [-i SECONDS] [-n] [--insecure] service [service ...] [-- command [arg [arg ...]]]``
Wait for services to be ready and optionally exec command.
@@ -81,7 +81,7 @@ Positional arguments:
A service to wait for. Supported protocols:
"tcp://host:port/", "path:///path/to/something",
"unix:///path/to/domain.sock", "eval://expr",
- "http://urn", "http://urn" (status 200 expected). Join
+ "http://urn", "http://urn", "https+insecure//urn" (status 200 expected). Join
protocols with a comma to make holdup exit at the
first passing one, eg: tcp://host:1,host:2 or
tcp://host:1,tcp://host:2 are equivalent and mean "any
@@ -100,6 +100,7 @@ Optional arguments:
How often to check. Default: 0.2
-n, --no-abort Ignore failed services. This makes `holdup` return 0
exit code regardless of services actually responding.
+ --insecure Skip SSL Certificate verification for HTTPS services.
Suggested use
-------------
@@ -108,8 +109,8 @@ Assuming you always want the container to wait add this in your ``Dockerfile``::
COPY entrypoint.sh /
ENTRYPOINT ["/entrypoint.sh"]
- CMD ["/bin/bash"]
-
+ CMD ["/bin/bash"]
+
Then in ``entrypoint.sh`` you could have::
#!/bin/sh
@@ -122,6 +123,19 @@ Then in ``entrypoint.sh`` you could have::
The only disadvantage is that you might occasionally need to use ``docker run --entrypoint=''`` to avoid running holdup. No biggie.
+Insecure HTTPS Service Checks
+-------------------------------
+
+You may choose to skip SSL validation when waiting for an HTTPS service (for e.g., when using an IP Address). This can be done using either of the following methods::
+
+ # Specifying a https+insecure protocol
+ holdup https+insecure://10.1.2.3/
+
+ # Specifying the --insecure` option
+ holdup --insecure https://10.1.2.3/
+
+Skipping SSL Certificate verification requires a minimum of Python-2.7.9 or Python-3.4.3.
+
Development
===========
diff --git a/src/holdup/cli.py b/src/holdup/cli.py
index f5417bd..9615dd7 100644
--- a/src/holdup/cli.py
+++ b/src/holdup/cli.py
@@ -50,13 +50,13 @@ except ImportError:
class Check(object):
error = None
- def is_passing(self, timeout, verbose):
+ def is_passing(self, options):
try:
- self.run(timeout, verbose)
+ self.run(options)
except Exception as exc:
self.error = exc
else:
- if verbose:
+ if options.verbose:
print('holdup: Passed check: %r' % self)
return True
@@ -72,9 +72,9 @@ class TcpCheck(Check):
self.host = host
self.port = port
- def run(self, timeout, _):
+ def run(self, options):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock.settimeout(timeout)
+ sock.settimeout(options.check_timeout)
with closing(sock):
sock.connect((self.host, self.port))
@@ -84,14 +84,49 @@ class TcpCheck(Check):
class HttpCheck(Check):
def __init__(self, url):
- self.url = url
-
- def run(self, timeout, _):
- if hasattr(ssl, 'create_default_context') and 'context' in getargspec(urlopen).args:
- kwargs = {'context': ssl.create_default_context()}
+ self.do_insecure = False
+ proto, urn = url.split('://', 1)
+ if proto == 'https+insecure':
+ self.do_insecure = True
+ proto = 'https'
+ self.url = '{}://{}'.format(proto, urn)
+
+ def can_create_default_context(self):
+ """
+ Check if the current python version supports
+ * ssl.create_default_context()
+ * 'context' kwargs for urlopen
+ Supported Python versions are:
+ * >2.7.9
+ * >3.4.3
+ """
+ if hasattr(ssl, 'create_default_context'):
+ urlopen_argspec = getargspec(urlopen)
+ urlopen_args = urlopen_argspec.args
+ if hasattr(urlopen_argspec, 'kwonlyargs'):
+ urlopen_args.extend(urlopen_argspec.kwonlyargs)
+ if 'context' in urlopen_args:
+ return True
+ else:
+ return False
+ else:
+ return False
+
+ def run(self, options):
+ kwargs = {}
+ do_insecure = self.do_insecure
+ if options.insecure:
+ do_insecure = True
+ if self.can_create_default_context():
+ ssl_ctx = ssl.create_default_context()
+ if do_insecure:
+ ssl_ctx.check_hostname = False
+ ssl_ctx.verify_mode = ssl.CERT_NONE
+ kwargs = {'context': ssl_ctx}
else:
- kwargs = {}
- with closing(urlopen(self.url, timeout=timeout, **kwargs)) as req:
+ if do_insecure:
+ raise Exception("Insecure HTTPS is not supported with the current version of python")
+ with closing(urlopen(self.url, timeout=options.check_timeout, **kwargs)) as req:
status = req.getcode()
if status != 200:
raise Exception("Expected status code 200, got: %r." % status)
@@ -104,9 +139,9 @@ class UnixCheck(Check):
def __init__(self, path):
self.path = path
- def run(self, timeout, _verbose):
+ def run(self, options):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- sock.settimeout(timeout)
+ sock.settimeout(options.check_timeout)
with closing(sock):
sock.connect(self.path)
@@ -118,7 +153,7 @@ class PathCheck(Check):
def __init__(self, path):
self.path = path
- def run(self, _timeout, _verbose):
+ def run(self, _):
os.stat(self.path)
if not os.access(self.path, os.R_OK):
raise Exception("Failed access(%r, 'R_OK') test." % self.path)
@@ -146,7 +181,7 @@ class EvalCheck(Check):
raise argparse.ArgumentTypeError('Invalid service spec %r. Import error: %s' % (expr, exc))
self.ns[node.id] = sys.modules[node.id]
- def run(self, _timeout, _verbose):
+ def run(self, _):
result = eval(self.expr, dict(self.ns), dict(self.ns))
if not result:
raise Exception("Failed to evaluate %r. Result %r is falsey." % (self.expr, result))
@@ -159,10 +194,10 @@ class AnyCheck(Check):
def __init__(self, checks):
self.checks = checks
- def run(self, timeout, verbose):
+ def run(self, options):
errors = []
for check in self.checks:
- if check.is_passing(timeout, verbose):
+ if check.is_passing(options):
return
else:
errors.append(check)
@@ -206,7 +241,7 @@ def parse_value(value, proto):
return UnixCheck(value)
elif proto == 'path':
return PathCheck(value)
- elif proto in ('http', 'https'):
+ elif proto in ('http', 'https', 'https+insecure'):
return HttpCheck('%s://%s' % (proto, value))
elif proto == 'eval':
return EvalCheck(value)
@@ -222,7 +257,7 @@ parser.add_argument('service', nargs=argparse.ONE_OR_MORE, type=parse_service,
help='A service to wait for. '
'Supported protocols: "tcp://host:port/", "path:///path/to/something", '
'"unix:///path/to/domain.sock", "eval://expr", '
- '"http://urn", "http://urn" (status 200 expected). '
+ '"http://urn", "http://urn", "https+insecure//urn" (status 200 expected). '
'Join protocols with a comma to make holdup exit at the first '
'passing one, eg: tcp://host:1,host:2 or tcp://host:1,tcp://host:2 are equivalent and mean '
'"any that pass".')
@@ -239,6 +274,8 @@ parser.add_argument('-v', '--verbose', action='store_true',
parser.add_argument('-n', '--no-abort', action='store_true',
help='Ignore failed services. '
'This makes `holdup` return 0 exit code regardless of services actually responding.')
+parser.add_argument('--insecure', action='store_true',
+ help='Disable SSL Certificate verification for HTTPS services')
def main():
@@ -270,7 +307,7 @@ def main():
at_least_once = True
while at_least_once or pending and time() - start < options.timeout:
lapse = time()
- pending = [check for check in pending if not check.is_passing(options.check_timeout, options.verbose)]
+ pending = [check for check in pending if not check.is_passing(options)]
sleep(max(0, options.interval - time() + lapse))
at_least_once = False
| Add support for Insecure HTTPS
The current `HttpCheck` against a https service defaults to requiring SSL Certificate Validation. In certain cases, we would like to perform a check against an IP address instead of a hostname (for e.g., `holdup https://10.1.2.3`). In this case the certificate validation will always fail due to a hostname mismatch.
Can we introduce an option to ignore certificate validation? For e.g., `holdup --insecure https://10.1.2.3`
I have been able to implement this change [here](https://github.com/mithun/python-holdup/commit/da1701f74881de6dd4fa561e601ca9a2737dd7df). However, my change will break support for older pythons.
I'm willing to submit a PR if you can provide suggestions on how backwards compatibility can be maintained. | ionelmc/python-holdup | diff --git a/tests/test_holdup.py b/tests/test_holdup.py
index 106531e..7b69e57 100644
--- a/tests/test_holdup.py
+++ b/tests/test_holdup.py
@@ -1,11 +1,35 @@
import os
import socket
+import ssl
+import sys
import threading
import pytest
+try:
+ from inspect import getfullargspec as getargspec
+except ImportError:
+ from inspect import getargspec
+
+try:
+ from urllib.request import urlopen
+except ImportError:
+ from urllib2 import urlopen
+
pytest_plugins = 'pytester',
+def skip_http_insecure_test():
+ if hasattr(ssl, 'create_default_context'):
+ urlopen_argspec = getargspec(urlopen)
+ urlopen_args = urlopen_argspec.args
+ if hasattr(urlopen_argspec, 'kwonlyargs'):
+ urlopen_args.extend(urlopen_argspec.kwonlyargs)
+ if 'context' in urlopen_args:
+ return False
+ else:
+ return True
+ else:
+ return True
@pytest.fixture(params=[[], ['--', 'python', '-c', 'print("success !")']])
def extra(request):
@@ -47,6 +71,7 @@ def test_http(testdir, extra, status, proto):
result = testdir.run(
'holdup',
'-T', '5',
+ '-t', '5.1',
'%s://httpbin.org/status/%s' % (proto, status),
*extra
)
@@ -57,6 +82,27 @@ def test_http(testdir, extra, status, proto):
result.stderr.fnmatch_lines(['*HTTP Error 404*'])
[email protected](skip_http_insecure_test(),reason="requires ssl.create_default_context")
+def test_http_insecure_with_option(testdir):
+ result = testdir.run(
+ 'holdup',
+ '-t', '2',
+ '--insecure',
+ 'https://self-signed.badssl.com/',
+ )
+ assert result.ret == 0
+
+
[email protected](skip_http_insecure_test(),reason="requires ssl.create_default_context")
+def test_http_insecure_with_proto(testdir):
+ result = testdir.run(
+ 'holdup',
+ '-t', '2',
+ 'https+insecure://self-signed.badssl.com/',
+ )
+ assert result.ret == 0
+
+
def test_any(testdir, extra):
tcp = socket.socket()
tcp.bind(('127.0.0.1', 0))
@@ -123,9 +169,9 @@ def test_any_failed(testdir):
result.stderr.fnmatch_lines([
'holdup: Failed service checks: any(tcp://localhost:%s,path:///doesnt/exist,unix:///doesnt/exist) '
'(Nothing succeeded: '
- 'tcp://localhost:%s ([[]Errno 111[]]*), '
- 'path:///doesnt/exist ([[]Errno 2[]]*), '
- 'unix:///doesnt/exist ([[]Errno 2[]]*). Aborting!' % (port, port)
+ 'tcp://localhost:%s (*), '
+ 'path:///doesnt/exist (*), '
+ 'unix:///doesnt/exist (*). Aborting!' % (port, port)
])
@@ -141,11 +187,12 @@ def test_no_abort(testdir, extra):
*extra
)
result.stderr.fnmatch_lines([
- 'holdup: Failed checks: tcp://localhost:0 ([[]Errno 111[]]*), '
- 'path:///doesnt/exist ([[]Errno 2[]]*), unix:///doesnt/exist ([[]Errno 2[]]*)'
+ 'holdup: Failed checks: tcp://localhost:0 (*), '
+ 'path:///doesnt/exist (*), unix:///doesnt/exist (*)'
])
[email protected](os.path.exists('/.dockerenv'),reason="chmod(0) does not work in docker")
def test_not_readable(testdir, extra):
foobar = testdir.maketxtfile(foobar='')
foobar.chmod(0)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 5
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-travis-fold",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
coverage==6.2
-e git+https://github.com/ionelmc/python-holdup.git@01e29e2ee1348043caa29684f0b841dae95990ec#egg=holdup
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
pytest-travis-fold==1.3.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: python-holdup
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==6.2
- pytest-cov==4.0.0
- pytest-travis-fold==1.3.0
- tomli==1.2.3
prefix: /opt/conda/envs/python-holdup
| [
"tests/test_holdup.py::test_http_insecure_with_option",
"tests/test_holdup.py::test_http_insecure_with_proto"
]
| [
"tests/test_holdup.py::test_any[extra1]",
"tests/test_holdup.py::test_no_abort[extra1]",
"tests/test_holdup.py::test_not_readable[extra0]",
"tests/test_holdup.py::test_not_readable[extra1]",
"tests/test_holdup.py::test_eval_distutils[extra0]",
"tests/test_holdup.py::test_eval_distutils[extra1]"
]
| [
"tests/test_holdup.py::test_normal[extra0]",
"tests/test_holdup.py::test_normal[extra1]",
"tests/test_holdup.py::test_http[extra0-http-200]",
"tests/test_holdup.py::test_http[extra0-http-404]",
"tests/test_holdup.py::test_http[extra0-https-200]",
"tests/test_holdup.py::test_http[extra0-https-404]",
"tests/test_holdup.py::test_http[extra1-http-200]",
"tests/test_holdup.py::test_http[extra1-http-404]",
"tests/test_holdup.py::test_http[extra1-https-200]",
"tests/test_holdup.py::test_http[extra1-https-404]",
"tests/test_holdup.py::test_any[extra0]",
"tests/test_holdup.py::test_any_same_proto[extra0]",
"tests/test_holdup.py::test_any_same_proto[extra1]",
"tests/test_holdup.py::test_any_failed",
"tests/test_holdup.py::test_no_abort[extra0]",
"tests/test_holdup.py::test_bad_timeout",
"tests/test_holdup.py::test_eval_bad_import",
"tests/test_holdup.py::test_eval_bad_expr",
"tests/test_holdup.py::test_eval_falsey",
"tests/test_holdup.py::test_eval_comma[extra0]",
"tests/test_holdup.py::test_eval_comma[extra1]",
"tests/test_holdup.py::test_eval_comma_anycheck[extra0]",
"tests/test_holdup.py::test_eval_comma_anycheck[extra1]"
]
| []
| BSD 2-Clause "Simplified" License | 2,379 | [
"README.rst",
"AUTHORS.rst",
".gitignore",
"CHANGELOG.rst",
"src/holdup/cli.py"
]
| [
"README.rst",
"AUTHORS.rst",
".gitignore",
"CHANGELOG.rst",
"src/holdup/cli.py"
]
|
|
flexxui__pscript-17 | 59d373350fb63db08db39d6527b4186c2490cc27 | 2018-04-09 13:51:22 | 59d373350fb63db08db39d6527b4186c2490cc27 | diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..6bd7633
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,5 @@
+include LICENSE README.md
+
+global-exclude .git*
+global-exclude *.pyo
+global-exclude *.pyc
diff --git a/pscript/__init__.py b/pscript/__init__.py
index 5fb654a..ec73544 100644
--- a/pscript/__init__.py
+++ b/pscript/__init__.py
@@ -186,7 +186,7 @@ Supported Python conveniences:
round, int, float, str, bool, abs, divmod, all, any, enumerate, zip,
reversed, sorted, filter, map.
* all methods of list, dict and str are supported (except a few string
- methods: encode, format, format_map, isdecimal, isdigit, isprintable, maketrans)
+ methods: encode, format_map, isprintable, maketrans).
* the default return value of a function is ``None``/``null`` instead
of ``undefined``.
* list concatenation using the plus operator, and list/str repeating
diff --git a/pscript/commonast.py b/pscript/commonast.py
index 6210183..dcdaf7a 100644
--- a/pscript/commonast.py
+++ b/pscript/commonast.py
@@ -223,6 +223,30 @@ class Str(Node):
"""
__slots__ = 'value',
+class FormattedValue(Node):
+ """ Node representing a single formatting field in an f-string. If the
+ string contains a single formatting field and nothing else the node can be
+ isolated, otherwise it appears in JoinedStr.
+
+ Attributes:
+ value_node: an expression (can be anything).
+ conversion: a string, '' means no formatting, 's' means !s string
+ formatting, 'r' means !r repr formatting, 'a' means !a ascii
+ formatting.
+ format_node: a JoinedStr node reprensenting the formatting, or None
+ if no format was specified. Both conversion and format_node can be
+ set at the same time.
+ """
+ __slots__ = 'value_node', 'conversion', 'format_node'
+
+class JoinedStr(Node):
+ """ An f-string, comprising a series of FormattedValue and Str nodes.
+
+ Attributes:
+ value_nodes: list of Str and FormattedValue nodes.
+ """
+ __slots__ = 'value_nodes',
+
class Bytes(Node):
"""
Attributes:
@@ -790,21 +814,33 @@ class NativeAstConverter:
return Num(n.n)
def _convert_Str(self, n):
- # Get string modifier char
- line = self._lines[n.lineno-1]
- pre = ''
- if line[n.col_offset] not in '"\'':
- pre += line[n.col_offset]
- if line[n.col_offset + 1] not in '"\'':
- pre += line[n.col_offset + 1]
- # Formatted, bytes?
- if 'f' in pre:
- raise RuntimeError('Cannot do formatted string literals yet: ' +
- line)
- if pyversion < (3, ) and 'b' in pre:
- return Bytes(n.s)
+ # We check the string prefix here. We only really need it in Python 2,
+ # because u is not needed in py3, and b and r are resolved by the lexer,
+ # and f as well (resulting in JoinedStr or FormattedValue).
+ # Note that the col_offset of the node seems 1 off when the string is
+ # a key in a dict :/ (PScript issue #15)
+ if pyversion < (3, ):
+ line = self._lines[n.lineno-1]
+ i = n.col_offset
+ i = i - 1 if (i > 0 and line[i-1] in 'rufb"\'') else i
+ pre = ''
+ if line[i] not in '"\'':
+ pre += line[i]
+ if line[i + 1] not in '"\'':
+ pre += line[i + 1]
+ if 'b' in pre:
+ return Bytes(n.s)
return Str(n.s)
+ def _convert_JoinedStr(self, n):
+ c = self._convert
+ return JoinedStr([c(x) for x in n.values])
+
+ def _convert_FormattedValue(self, n):
+ conversion = '' if n.conversion < 0 else chr(n.conversion)
+ return FormattedValue(self._convert(n.value), conversion,
+ self._convert(n.format_spec))
+
def _convert_Bytes(self, n):
return Bytes(n.s)
diff --git a/pscript/parser0.py b/pscript/parser0.py
index be23d70..f7afe46 100644
--- a/pscript/parser0.py
+++ b/pscript/parser0.py
@@ -299,7 +299,7 @@ class Parser0:
lineno = getattr(node, 'lineno', -1)
if self._pysource:
filename, lineno = self._pysource
- lineno += node.lineno - 1
+ lineno += node.lineno
msg = 'Error processing %s-node' % (node.__class__.__name__)
if classNode:
diff --git a/pscript/parser1.py b/pscript/parser1.py
index 16dbab2..44bd08b 100644
--- a/pscript/parser1.py
+++ b/pscript/parser1.py
@@ -47,12 +47,39 @@ Slicing and subscriping
String formatting
-----------------
-Basic string formatting is supported for "%s", "%f", and "%i".
+String formatting is supported in various forms.
.. pscript_example::
- "value: %f" % val
- "%s: %f" % (name, val)
+ # Old school
+ "value: %g" % val
+ "%s: %0.2f" % (name, val)
+
+ # Modern
+ "value: {:g}".format(val)
+ "{}: {:3.2f}".format(name, val)
+
+ # F-strings (python 3.6+)
+ f"value: {val:g}"
+ f"{name}: {val:3.2f}"
+
+ # This also works
+ t = "value: {:g}"
+ t.format(val)
+
+ # But this does not (because PScript cannot know whether t is str or float)
+ t = "value: %g"
+ t % val
+
+Kinds of formatting that is supported:
+
+* Float, exponential en "general" number formatting.
+* Specifying precision for numbers.
+* Padding of number with "+" or " ".
+* Repr-formatting.
+
+At the moment, PScript does not support advanced features such as string
+padding.
Assignments
@@ -168,11 +195,31 @@ from .parser0 import Parser0, JSError, unify, reprs # noqa
# Define builtin stuff for which we know that it returns a bool or int
_bool_funcs = 'hasattr', 'all', 'any', 'op_contains', 'op_equals', 'truthy'
_bool_meths = ('count', 'isalnum', 'isalpha', 'isidentifier', 'islower',
- 'isnumeric', 'isspace', 'istitle', 'isupper', 'startswith')
+ 'isnumeric', 'isdigit', 'isdecimal', 'isspace', 'istitle',
+ 'isupper', 'startswith')
returning_bool = tuple([stdlib.FUNCTION_PREFIX + x + '(' for x in _bool_funcs] +
[stdlib.METHOD_PREFIX + x + '.' for x in _bool_meths])
+# precompile regexp to help determine whether a string is an identifier
+isidentifier1 = re.compile(r'^\w+$', re.UNICODE)
+
+reserved_names = (
+ 'abstract', 'instanceof', 'boolean', 'enum', 'switch', 'export',
+ 'interface', 'synchronized', 'extends', 'let', 'case', 'throw',
+ 'catch', 'final', 'native', 'throws', 'new', 'transient',
+ 'const', 'package', 'function', 'private', 'typeof', 'debugger', 'goto',
+ 'protected', 'var', 'default', 'public', 'void', 'delete', 'implements',
+ 'volatile', 'do', 'static',
+ # Commented, because are disallowed in Python too.
+ # 'else', 'break', 'finally', 'class', 'for', 'try', 'continue', 'if',
+ # 'return', 'import', 'while', 'in', 'with',
+ # Commented for pragmatic reasons
+ # 'super', 'float', 'this', 'int', 'byte', 'long', 'char', 'short',
+ # 'double', 'null', 'true', 'false',
+ )
+
+
class Parser1(Parser0):
""" Parser that add basic functionality like assignments,
operations, function calls, and indexing.
@@ -186,6 +233,37 @@ class Parser1(Parser0):
def parse_Str(self, node):
return reprs(node.value)
+ def parse_JoinedStr(self, node):
+ parts, value_nodes = [], []
+ for n in node.value_nodes:
+ if isinstance(n, ast.Str):
+ parts.append(n.value)
+ else:
+ assert isinstance(n, ast.FormattedValue)
+ parts.append('{' + self._parse_FormattedValue_fmt(n) + '}')
+ value_nodes.append(n.value_node)
+ thestring = reprs(''.join(parts))
+ return self.use_std_method(thestring, 'format', value_nodes)
+
+ def parse_FormattedValue(self, node): # can als be present standalone
+ thestring = "{" + self._parse_FormattedValue_fmt(node) + "}"
+ return self.use_std_method(thestring, 'format', [node.value_node])
+
+ def _parse_FormattedValue_fmt(self, node):
+ """ Return fmt for a FormattedValue node.
+ """
+ fmt = ''
+ if node.conversion:
+ fmt += '!' + node.conversion
+ if node.format_node and len(node.format_node.value_nodes) > 0:
+ if len(node.format_node.value_nodes) > 1:
+ raise JSError('String formatting only supports singleton format spec.')
+ spec_node = node.format_node.value_nodes[0]
+ if not isinstance(spec_node, ast.Str):
+ raise JSError('String formatting only supports string format spec.')
+ fmt += ':' + spec_node.value
+ return fmt
+
def parse_Bytes(self, node):
raise JSError('No Bytes in JS')
@@ -209,25 +287,33 @@ class Parser1(Parser0):
def parse_Dict(self, node):
# Oh JS; without the outer braces, it would only be an Object if used
# in an assignment ...
+ use_make_dict_func = False
code = ['({']
for key, val in zip(node.key_nodes, node.value_nodes):
if isinstance(key, (ast.Num, ast.NameConstant)):
code += self.parse(key)
- elif isinstance(key, ast.Str):
- if ' ' in key.value:
- raise JSError('Keys in a literal dict cannot contain spaces.')
+ elif (isinstance(key, ast.Str) and isidentifier1.match(key.value) and
+ key.value[0] not in '0123456789'):
code += key.value
else:
- # code += ['['] + self.parse(key) + [']'] # this actually breaks on IE
- raise JSError('Computed dict attributes are not supported on IE :/')
+ use_make_dict_func = True
+ break
code.append(': ')
code += self.parse(val)
code.append(', ')
if node.key_nodes:
code.pop(-1) # skip last comma
code.append('})')
- return code
+ # Do we need to use the fallback?
+ if use_make_dict_func:
+ func_args = []
+ for key, val in zip(node.key_nodes, node.value_nodes):
+ func_args += [unify(self.parse(key)), unify(self.parse(val))]
+ self.use_std_function('create_dict', [])
+ return stdlib.FUNCTION_PREFIX + 'create_dict(' + ', '.join(func_args) + ')'
+ return code
+
def parse_Set(self, node):
raise JSError('No Set in JS')
@@ -246,6 +332,8 @@ class Parser1(Parser0):
def parse_Name(self, node, fullname=None):
# node.ctx can be Load, Store, Del -> can be of use somewhere?
name = node.name
+ if name in reserved_names:
+ raise JSError('Cannot use reserved name %s as a variable name!' % name)
if self.vars.is_known(name):
return self.with_prefix(name)
if self._scope_prefix:
@@ -310,36 +398,41 @@ class Parser1(Parser0):
return [left, op, right]
def _format_string(self, node):
- # Get left end, stripped from the separator
+ # Get value_nodes
+ if isinstance(node.right_node, (ast.Tuple, ast.List)):
+ value_nodes = node.right_node.element_nodes
+ else:
+ value_nodes = [node.right_node]
+
+ # Is the left side a string? If not, exit early
+ # This works, but we cannot know whether the left was a string or number :P
+ # if not isinstance(node.left_node, ast.Str):
+ # thestring = unify(self.parse(node.left_node))
+ # thestring += ".replace(/%([0-9\.\+\-\#]*[srdeEfgGioxXc])/g, '{:$1}')"
+ # return self.use_std_method(thestring, 'format', value_nodes)
+
+ assert isinstance(node.left_node, ast.Str)
left = ''.join(self.parse(node.left_node))
sep, left = left[0], left[1:-1]
- # Get items
- right = node.right_node
- if isinstance(right, (ast.Tuple, ast.List)):
- items = [unify(self.parse(n)) for n in right.element_nodes]
- else:
- items = [unify(self.parse(right))]
+
# Get matches
matches = list(re.finditer(r'%[0-9\.\+\-\#]*[srdeEfgGioxXc]', left))
- if len(matches) != len(items):
+ if len(matches) != len(value_nodes):
raise JSError('In string formatting, number of placeholders '
'does not match number of replacements')
# Format
- code = []
+ parts = []
start = 0
- for i, m in enumerate(matches):
+ for m in matches:
fmt = m.group(0)
- if fmt in ('%s', '%f', '%i', '%d', '%g'):
- code.append(sep + left[start:m.start()] + sep)
- code.append(' + ' + items[i] + ' + ')
- elif fmt == '%r':
- code.append(sep + left[start:m.start()] + sep)
- code.append(' + %s + ' % self.use_std_function('repr', [items[i]]))
- else:
- raise JSError('Unsupported string formatting %r' % fmt)
+ fmt = {'%r': '!r', '%s': ''}.get(fmt, ':' + fmt[1:])
+ # Add the part in front of the match (and after prev match)
+ parts.append(left[start:m.start()])
+ parts.append("{%s}" % fmt)
start = m.end()
- code.append(sep + left[start:] + sep)
- return code
+ parts.append(left[start:])
+ thestring = sep + ''.join(parts) + sep
+ return self.use_std_method(thestring, 'format', value_nodes)
def _wrap_truthy(self, node):
""" Wraps an operation in a truthy call, unless its not necessary. """
diff --git a/pscript/parser2.py b/pscript/parser2.py
index 97257ec..93b1ef5 100644
--- a/pscript/parser2.py
+++ b/pscript/parser2.py
@@ -446,9 +446,11 @@ class Parser2(Parser1):
if (True and isinstance(node.test_node, ast.Call) and
isinstance(node.test_node.func_node, ast.Name) and
node.test_node.func_node.name == 'this_is_js'):
- code = [self.lf('{ /* if this_is_js() */')]
+ code = [self.lf('if ('), 'true', ') ', '{ /* if this_is_js() */']
+ self._indent += 1
for stmt in node.body_nodes:
code += self.parse(stmt)
+ self._indent -= 1
code.append(self.lf('}'))
return code
@@ -499,6 +501,7 @@ class Parser2(Parser1):
elif isinstance(f, ast.Name) and f.name in ('xrange', 'range'):
sure_is_range = [''.join(self.parse(arg)) for arg in
node.iter_node.arg_nodes]
+ iter = 'range' # stub to prevent the parsing of iter_node below
# Otherwise we parse the iter
if iter is None:
@@ -557,7 +560,7 @@ class Parser2(Parser1):
start, end, step = nums[0], nums[1], nums[2]
# Build for-loop in JS
t = 'for ({i} = {start}; {i} < {end}; {i} += {step})'
- if step.lstrip('+-').isnumeric() and float(step) < 0:
+ if step.lstrip('+-').isdecimal() and float(step) < 0:
t = t.replace('<', '>')
assert len(target) == 1
t = t.format(i=target[0], start=start, end=end, step=step) + ' {'
diff --git a/pscript/parser3.py b/pscript/parser3.py
index 0c34e78..f250879 100644
--- a/pscript/parser3.py
+++ b/pscript/parser3.py
@@ -10,8 +10,7 @@ tuple, range, pow, sum, round, int, float, str, bool, abs, divmod, all,
any, enumerate, zip, reversed, sorted, filter, map.
Further all methods for list, dict and str are implemented (except str
-methods: encode, decode, format, format_map, isdecimal, isdigit,
-isprintable, maketrans).
+methods: encode, decode, format_map, isprintable, maketrans).
.. pscript_example::
@@ -415,6 +414,11 @@ class Parser3(Parser2):
else:
raise JSError('Invalid keyword argument for sort: %r' % kw.name)
return self.use_std_method(base, 'sort', [key, reverse])
+
+ def method_format(self, node, base):
+ if node.kwarg_nodes:
+ raise JSError('Method format() does not support keyword args.')
+ return self.use_std_method(base, 'format', node.arg_nodes)
# Add functions and methods to the class, using the stdib functions ...
diff --git a/pscript/stdlib.py b/pscript/stdlib.py
index 0869ce8..df897ea 100644
--- a/pscript/stdlib.py
+++ b/pscript/stdlib.py
@@ -141,6 +141,12 @@ FUNCTIONS['op_instantiate'] = """function (ob, args) { // nargs: 2
}
}"""
+FUNCTIONS['create_dict'] = """function () {
+ var d = {};
+ for (var i=0; i<arguments.length; i+=2) { d[arguments[i]] = arguments[i+1]; }
+ return d;
+}"""
+
FUNCTIONS['merge_dicts'] = """function () {
var res = {};
for (var i=0; i<arguments.length; i++) {
@@ -219,7 +225,7 @@ FUNCTIONS['list'] = """function (x) {
}"""
FUNCTIONS['range'] = """function (start, end, step) {
-var i, res = [];
+ var i, res = [];
var val = start;
var n = (end - start) / step;
for (i=0; i<n; i++) {
@@ -229,6 +235,59 @@ var i, res = [];
return res;
}"""
+FUNCTIONS['format'] = """function (v, fmt) { // nargs: 2
+ fmt = fmt.toLowerCase();
+ var s = String(v);
+ if (fmt.indexOf('!r') >= 0) {
+ try { s = JSON.stringify(v); } catch (e) { s = undefined; }
+ if (typeof s === 'undefined') { s = v._IS_COMPONENT ? v.id : String(v); }
+ }
+ var i0 = fmt.indexOf(':');
+ if (i0 < 0) {
+ } else if (fmt.indexOf('i', i0) > i0) { // integer formatting
+ s = Number.parseInt(v).toFixed(0);
+ } else if (fmt.indexOf('f', i0) > i0) { // float formatting
+ v = Number.parseFloat(v);
+ var spec = fmt.slice(i0+1, fmt.indexOf('f', i0));
+ var decimals = 6;
+ if (spec.indexOf('.') >= 0) {
+ var decimals = Number(spec.slice(spec.indexOf('.')+1));
+ }
+ s = v.toFixed(decimals);
+ } else if (fmt.indexOf('e', i0) > i0) { // exp formatting
+ v = Number.parseFloat(v);
+ var precision = 6;
+ var spec = fmt.slice(i0+1, fmt.indexOf('e', i0));
+ if (spec.indexOf('.') >= 0) {
+ precision = Number(spec.slice(spec.indexOf('.')+1)) || 1;
+ }
+ s = v.toExponential(precision);
+ } else if (fmt.indexOf('g', i0) > i0) { // "general" formatting
+ v = Number.parseFloat(v);
+ var precision = 6;
+ var spec = fmt.slice(i0+1, fmt.indexOf('g', i0));
+ if (spec.indexOf('.') >= 0) {
+ precision = Number(spec.slice(spec.indexOf('.')+1)) || 1;
+ }
+ // Exp or decimal?
+ s = v.toExponential(precision-1);
+ var s1 = s.slice(0, s.indexOf('e')), s2 = s.slice(s.indexOf('e'));
+ if (s2.length == 3) { s2 = 'e' + s2[1] + '0' + s2[2]; }
+ var exp = Number(s2.slice(1));
+ if (exp >= -4 && exp < precision) { s1=v.toPrecision(precision); s2=''; }
+ // Skip trailing zeros and dot
+ var j = s1.length-1;
+ while (j>0 && s1[j] == '0') { j-=1; }
+ s1 = s1.slice(0, j+1);
+ if (s1.endsWith('.')) { s1 = s1.slice(0, s1.length-1); }
+ s = s1 + s2;
+ }
+ if (i0 >= 0 && v > 0) {
+ if (fmt[i0+1] == '+') { s = '+' + s; }
+ if (fmt[i0+1] == ' ') { s = ' ' + s; }
+ }
+ return s;
+}"""
## Normal functions
@@ -558,8 +617,7 @@ METHODS['values'] = """function () { // nargs: 0
## String only
-# ignores: encode, decode, format, format_map, isdecimal, isdigit,
-# isprintable, maketrans
+# ignores: encode, decode, format_map, isprintable, maketrans
# Not a Python method, but a method that we need, and is only ECMA 6
# http://stackoverflow.com/a/5450113/2271927
@@ -615,6 +673,31 @@ METHODS['find'] = """function (x, start, stop) { // nargs: 1 2 3
return -1;
}"""
+METHODS['format'] = """function () {
+ if (this.constructor !== String) return this.KEY.apply(this, arguments);
+ var parts = [], i = 0, i1, i2;
+ var itemnr = -1;
+ while (i < this.length) {
+ // find opening
+ i1 = this.indexOf('{', i);
+ if (i1 < 0 || i1 == this.length-1) { break; }
+ if (this[i1+1] == '{') {parts.push(this.slice(i, i1+1)); i = i1 + 2; continue;}
+ // find closing
+ i2 = this.indexOf('}', i1);
+ if (i2 < 0) { break; }
+ // parse
+ itemnr += 1;
+ var fmt = this.slice(i1+1, i2);
+ var index = fmt.split(':')[0].split('!')[0];
+ index = index? Number(index) : itemnr
+ var s = FUNCTION_PREFIXformat(arguments[index], fmt);
+ parts.push(this.slice(i, i1), s);
+ i = i2 + 1;
+ }
+ parts.push(this.slice(i));
+ return parts.join('');
+}"""
+
METHODS['isalnum'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return Boolean(/^[A-Za-z0-9]+$/.test(this));
@@ -625,13 +708,6 @@ METHODS['isalpha'] = """function () { // nargs: 0
return Boolean(/^[A-Za-z]+$/.test(this));
}"""
-# METHODS['isdecimal'] = """function () {
-# if (this.constructor !== String) return this.KEY.apply(this, arguments);
-# return Boolean(/^[0-9]+$/.test(this));
-# }"""
-#
-# METHODS['isdigit'] = METHODS['isdecimal']
-
METHODS['isidentifier'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return Boolean(/^[A-Za-z_][A-Za-z0-9_]*$/.test(this));
@@ -643,11 +719,25 @@ METHODS['islower'] = """function () { // nargs: 0
return low != high && low == this;
}"""
-METHODS['isnumeric'] = """function () { // nargs: 0
+METHODS['isdecimal'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return Boolean(/^[0-9]+$/.test(this));
}"""
+# The thing about isdecimal, isdigit and isnumeric.
+# https://stackoverflow.com/a/36800319/2271927
+#
+# * isdecimal() (Only Decimal Numbers)
+# * str.isdigit() (Decimals, Subscripts, Superscripts)
+# * isnumeric() (Digits, Vulgar Fractions, Subscripts, Superscripts,
+# Roman Numerals, Currency Numerators)
+#
+# In other words, isdecimal is the most strict. We used to have
+# isnumeric with isdecimal's implementation, so we provide isnumeric
+# and isdigit as aliases for now.
+
+METHODS['isnumeric'] = METHODS['isdigit'] = METHODS['isdecimal']
+
METHODS['isspace'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return Boolean(/^\\s+$/.test(this));
| parse error
I have the following dictionary. pscript fails on ñ, 'cause if i replace it with `\xf1` it compiles.
```
langs = {'de': ['De', "Deutsch"], 'en': ['En', "English"],
'es': ['Es', "Español"], 'fr': ['Fr', "Français"],
'ru': ['Ru', "Русский"]}
```
Exception:
`RuntimeError: Cannot do formatted string literals yet: 'es': ['Es', "Español"], 'fr': ['Fr', "Français"],`
pscript thinks that `fr` is a prefix of a string. | flexxui/pscript | diff --git a/pscript/tests/test_parser1.py b/pscript/tests/test_parser1.py
index d9300a7..863abbf 100644
--- a/pscript/tests/test_parser1.py
+++ b/pscript/tests/test_parser1.py
@@ -1,5 +1,7 @@
from pscript.testing import run_tests_if_main, raises
+import sys
+
import pscript
from pscript import JSError, py2js, evaljs, evalpy, Parser
@@ -68,16 +70,66 @@ class TestExpressions:
assert evalpy('True or False') == 'true'
# Bug
assert evalpy('(9-3-3)/3') == '1'
-
- # string formatting
+
+ def test_string_formatting1(self):
+ # string formatting that we already had
assert evalpy('"%s" % "bar"') == 'bar'
assert evalpy('"-%s-" % "bar"') == '-bar-'
assert evalpy('"foo %s foo" % "bar"') == 'foo bar foo'
assert evalpy('"x %i" % 6') == 'x 6'
- assert evalpy('"x %f" % 6') == 'x 6'
- assert evalpy('"%s: %f" % ("value", 6)') == 'value: 6'
+ assert evalpy('"x %g" % 6') == 'x 6'
+ assert evalpy('"%s: %f" % ("value", 6)') == 'value: 6.000000'
assert evalpy('"%r: %r" % ("value", 6)') == '"value": 6'
+ def test_string_formatting2(self):
+
+ py2jslight = lambda x: py2js(x, inline_stdlib=False)
+
+ # Verify that percent-formatting produces same JS as str.format
+ assert py2jslight("'hi %i' % a") == py2jslight("'hi {:i}'.format(a)")
+ assert py2jslight("'hi %i %+i' % (a, b)") == py2jslight("'hi {:i} {:+i}'.format(a, b)")
+ assert py2jslight("'hi %f %1.2f' % (a, b)") == py2jslight("'hi {:f} {:1.2f}'.format(a, b)")
+ assert py2jslight("'hi %s %r' % (a, b)") == py2jslight("'hi {} {!r}'.format(a, b)")
+
+ if sys.version_info < (3, 6):
+ return
+
+ # Verify that f-string formatting produces same JS as str.format - Python 3.6+
+ assert py2jslight("f'hi {a:i}'") == py2jslight("'hi {:i}'.format(a)")
+ assert py2js("f'hi {a:i} {b:+i}'") == py2js("'hi {:i} {:+i}'.format(a, b)")
+ assert py2jslight("f'hi {a:f} {b:1.2f}'") == py2jslight("'hi {:f} {:1.2f}'.format(a, b)")
+ assert py2jslight("f'hi {a} {b!r}'") == py2jslight("'hi {} {!r}'.format(a, b)")
+
+ def test_string_formatting3(self):
+ # Verify fancy formatting (mosly for numbers)
+ # We don't support every kind of fortting that Python does.
+
+ x = 'a = 3.1415926535; b = 7; c = "foo"; d = 314159265.35; e = 0.0031415926535;'
+ # i formatting
+ assert evalpy(x + "'hi {:i}'.format(b)") == 'hi 7'
+ # f formatting
+ assert evalpy(x + "'hi {:i} {:+i} {: i}'.format(b, b, b)") == 'hi 7 +7 7'
+ assert evalpy(x + "'hi {:f} {:1.0f} {:1.2f}'.format(a, a, a)") == 'hi 3.141593 3 3.14'
+ # g formatting, these outputs are (manually) validated with Python
+ assert evalpy(x + "'hi {:g} {:.1g} {:.3g}'.format(a, a, a)") == 'hi 3.14159 3 3.14'
+ assert evalpy(x + "'hi {:g} {:.1g} {:.3g}'.format(d, d, d)") == 'hi 3.14159e+08 3e+08 3.14e+08'
+ assert evalpy(x + "'hi {:g} {:.1g} {:.3g}'.format(e, e, e)") == 'hi 0.00314159 0.003 0.00314'
+ # String and repr formatting
+ assert evalpy(x + "'hi {} {!s} {!r}'.format(c, c, c)") == 'hi foo foo "foo"'
+
+ def test_string_formatting4(self):
+
+ x = 'a = 3; b = 4; '
+
+ # Setting positions in format string
+ assert evalpy(x + "'hi {1:g} {1:+g} {0}'.format(a, b)") == 'hi 4 +4 3'
+
+ # Using a predefined template string for .format()
+ assert evalpy(x + "t = 'hi {} {}'; t.format(a, b)") == 'hi 3 4'
+
+ # Using a predefined template string for % - we cannot do this, unfortunately!
+ # assert evalpy(x + "t = 'hi %i %i'; t % (a, b)") == 'hi 3 4'
+
def test_overloaded_list_ops(self):
assert evalpy('[1, 2] + [3, 4]') == '[ 1, 2, 3, 4 ]'
assert evalpy('[3, 4] + [1, 2]') == '[ 3, 4, 1, 2 ]'
@@ -311,8 +363,18 @@ class TestExpressions:
assert py2js('{"foo": 3, "bar": 4}') == '({foo: 3, bar: 4});'
assert evalpy('a={"foo": 3, "bar": 4};a') == '{ foo: 3, bar: 4 }'
- with raises(JSError):
- assert evalpy('bla="foo";a={bla: 3, bar: 4};a') == '{ foo: 3, bar: 4 }'
+
+ def test_dict_literals(self):
+ # JS has a different way to define dict literals, with limitation
+ # (especially on IE), so we add some magic sause to make it work.
+
+ def tester1():
+ a = 'foo'
+ d = {a: 'bar1', 2: 'bar2', 'sp' + 'am': 'bar3'}
+ print(d.foo, d[2], d.spam)
+
+ js = py2js(tester1)
+ assert evaljs(js + 'tester1()') == 'bar1 bar2 bar3\nnull'
def test_ignore_import_of_compiler(self):
modname = pscript.__name__
diff --git a/pscript/tests/test_parser3.py b/pscript/tests/test_parser3.py
index ee39d85..3dd0ae5 100644
--- a/pscript/tests/test_parser3.py
+++ b/pscript/tests/test_parser3.py
@@ -607,6 +607,20 @@ class TestStrMethods:
assert evalpy('"0a1b2c".isalpha()') == 'false'
assert evalpy('"0a_".isalpha()') == 'false'
+ def test_isdecimal(self):
+ assert evalpy('"".isdecimal()') == 'false'
+ assert evalpy('"012".isdecimal()') == 'true'
+ assert evalpy('"abc".isdecimal()') == 'false'
+ assert evalpy('"0a1b2c".isdecimal()') == 'false'
+ assert evalpy('"0a_".isdecimal()') == 'false'
+
+ def test_isdigit(self):
+ assert evalpy('"".isdigit()') == 'false'
+ assert evalpy('"012".isdigit()') == 'true'
+ assert evalpy('"abc".isdigit()') == 'false'
+ assert evalpy('"0a1b2c".isdigit()') == 'false'
+ assert evalpy('"0a_".isdigit()') == 'false'
+
def test_isnumeric(self):
assert evalpy('"".isnumeric()') == 'false'
assert evalpy('"012".isnumeric()') == 'true'
@@ -780,10 +794,15 @@ class TestStrMethods:
code = "table = {'a':'x', 'b':'y', 'c': None}\n"
assert evalpy(code + "'abcde'.translate(table)") == "xyde"
+ def test_format(self):
+ # Covered more extensively in test_parser1(), but we need the method
+ # to pass the test below ...
+ assert evalpy("'{:+0.2f}'.format(1.23456)") == "+1.23"
+
def test_that_all_str_methods_are_tested(self):
tested = set([x.split('_')[1] for x in dir(self) if x.startswith('test_')])
needed = set([x for x in dir(str) if not x.startswith('_')])
- ignore = 'encode decode format format_map isdecimal isdigit isprintable maketrans'
+ ignore = 'encode decode format_map isprintable maketrans'
needed = needed.difference(ignore.split(' '))
not_tested = needed.difference(tested)
diff --git a/pscript/tests/test_stdlib.py b/pscript/tests/test_stdlib.py
index c8aeac9..9d4222e 100644
--- a/pscript/tests/test_stdlib.py
+++ b/pscript/tests/test_stdlib.py
@@ -46,9 +46,9 @@ def test_stdlib_has_all_dict_methods():
def test_stdlib_has_all_str_methods():
method_names = [m for m in dir(str) if not m.startswith('_')]
if sys.version_info[0] == 2:
- ignore = 'encode decode format isdigit'
+ ignore = 'encode decode'
else:
- ignore = 'encode format format_map isdecimal isdigit isprintable maketrans'
+ ignore = 'encode format_map isprintable maketrans'
for name in ignore.split(' '):
method_names.remove(name)
for method_name in method_names:
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 7
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
coverage==6.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
-e git+https://github.com/flexxui/pscript.git@59d373350fb63db08db39d6527b4186c2490cc27#egg=pscript
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: pscript
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==6.2
- pytest-cov==4.0.0
- tomli==1.2.3
prefix: /opt/conda/envs/pscript
| [
"pscript/tests/test_parser1.py::TestExpressions::test_string_formatting2",
"pscript/tests/test_stdlib.py::test_stdlib_has_all_str_methods"
]
| [
"pscript/tests/test_parser1.py::TestExpressions::test_ops",
"pscript/tests/test_parser1.py::TestExpressions::test_string_formatting1",
"pscript/tests/test_parser1.py::TestExpressions::test_string_formatting3",
"pscript/tests/test_parser1.py::TestExpressions::test_string_formatting4",
"pscript/tests/test_parser1.py::TestExpressions::test_overloaded_list_ops",
"pscript/tests/test_parser1.py::TestExpressions::test_raw_js_overloading",
"pscript/tests/test_parser1.py::TestExpressions::test_overload_funcs_dont_overload_real_funcs",
"pscript/tests/test_parser1.py::TestExpressions::test_comparisons",
"pscript/tests/test_parser1.py::TestExpressions::test_deep_comparisons",
"pscript/tests/test_parser1.py::TestExpressions::test_truthfulness_of_basic_types",
"pscript/tests/test_parser1.py::TestExpressions::test_truthfulness_of_array_and_dict",
"pscript/tests/test_parser1.py::TestExpressions::test_indexing_and_slicing",
"pscript/tests/test_parser1.py::TestExpressions::test_assignments",
"pscript/tests/test_parser1.py::TestExpressions::test_aug_assignments",
"pscript/tests/test_parser1.py::TestExpressions::test_basic_types",
"pscript/tests/test_parser1.py::TestExpressions::test_dict_literals",
"pscript/tests/test_parser1.py::TestExpressions::test_import",
"pscript/tests/test_parser1.py::TestExpressions::test_funcion_call",
"pscript/tests/test_parser1.py::TestExpressions::test_instantiation",
"pscript/tests/test_parser1.py::TestExpressions::test_delete",
"pscript/tests/test_parser3.py::TestHardcoreBuiltins::test_isinstance",
"pscript/tests/test_parser3.py::TestHardcoreBuiltins::test_issubclass",
"pscript/tests/test_parser3.py::TestHardcoreBuiltins::test_hasattr",
"pscript/tests/test_parser3.py::TestHardcoreBuiltins::test_getattr",
"pscript/tests/test_parser3.py::TestHardcoreBuiltins::test_setattr",
"pscript/tests/test_parser3.py::TestHardcoreBuiltins::test_deltattr",
"pscript/tests/test_parser3.py::TestHardcoreBuiltins::test_print",
"pscript/tests/test_parser3.py::TestHardcoreBuiltins::test_min",
"pscript/tests/test_parser3.py::TestHardcoreBuiltins::test_max",
"pscript/tests/test_parser3.py::TestHardcoreBuiltins::test_callable",
"pscript/tests/test_parser3.py::TestHardcoreBuiltins::test_chr_and_ord",
"pscript/tests/test_parser3.py::TestHardcoreBuiltins::test_list",
"pscript/tests/test_parser3.py::TestHardcoreBuiltins::test_dict",
"pscript/tests/test_parser3.py::TestHardcoreBuiltins::test_range",
"pscript/tests/test_parser3.py::TestOtherBuiltins::test_pow",
"pscript/tests/test_parser3.py::TestOtherBuiltins::test_sum",
"pscript/tests/test_parser3.py::TestOtherBuiltins::test_round",
"pscript/tests/test_parser3.py::TestOtherBuiltins::test_int",
"pscript/tests/test_parser3.py::TestOtherBuiltins::test_float",
"pscript/tests/test_parser3.py::TestOtherBuiltins::test_repr",
"pscript/tests/test_parser3.py::TestOtherBuiltins::test_str",
"pscript/tests/test_parser3.py::TestOtherBuiltins::test_bool",
"pscript/tests/test_parser3.py::TestOtherBuiltins::test_abs",
"pscript/tests/test_parser3.py::TestOtherBuiltins::test_divod",
"pscript/tests/test_parser3.py::TestOtherBuiltins::test_all",
"pscript/tests/test_parser3.py::TestOtherBuiltins::test_any",
"pscript/tests/test_parser3.py::TestOtherBuiltins::test_enumerate",
"pscript/tests/test_parser3.py::TestOtherBuiltins::test_zip",
"pscript/tests/test_parser3.py::TestOtherBuiltins::test_reversed",
"pscript/tests/test_parser3.py::TestOtherBuiltins::test_sorted",
"pscript/tests/test_parser3.py::TestOtherBuiltins::test_filter",
"pscript/tests/test_parser3.py::TestOtherBuiltins::test_map",
"pscript/tests/test_parser3.py::TestListMethods::test_append",
"pscript/tests/test_parser3.py::TestListMethods::test_remove",
"pscript/tests/test_parser3.py::TestListMethods::test_count",
"pscript/tests/test_parser3.py::TestListMethods::test_extend",
"pscript/tests/test_parser3.py::TestListMethods::test_index",
"pscript/tests/test_parser3.py::TestListMethods::test_insert",
"pscript/tests/test_parser3.py::TestListMethods::test_reverse",
"pscript/tests/test_parser3.py::TestListMethods::test_sort",
"pscript/tests/test_parser3.py::TestListMethods::test_clear",
"pscript/tests/test_parser3.py::TestListMethods::test_copy",
"pscript/tests/test_parser3.py::TestListMethods::test_pop",
"pscript/tests/test_parser3.py::TestListMethods::test_no_list",
"pscript/tests/test_parser3.py::TestDictMethods::test_get",
"pscript/tests/test_parser3.py::TestDictMethods::test_items",
"pscript/tests/test_parser3.py::TestDictMethods::test_keys",
"pscript/tests/test_parser3.py::TestDictMethods::test_popitem",
"pscript/tests/test_parser3.py::TestDictMethods::test_setdefault",
"pscript/tests/test_parser3.py::TestDictMethods::test_update",
"pscript/tests/test_parser3.py::TestDictMethods::test_values",
"pscript/tests/test_parser3.py::TestDictMethods::test_clear",
"pscript/tests/test_parser3.py::TestDictMethods::test_copy",
"pscript/tests/test_parser3.py::TestDictMethods::test_pop",
"pscript/tests/test_parser3.py::TestDictMethods::test_no_dict",
"pscript/tests/test_parser3.py::TestStrMethods::test_capitalize",
"pscript/tests/test_parser3.py::TestStrMethods::test_title",
"pscript/tests/test_parser3.py::TestStrMethods::test_lower",
"pscript/tests/test_parser3.py::TestStrMethods::test_upper",
"pscript/tests/test_parser3.py::TestStrMethods::test_casefold",
"pscript/tests/test_parser3.py::TestStrMethods::test_swapcase",
"pscript/tests/test_parser3.py::TestStrMethods::test_center",
"pscript/tests/test_parser3.py::TestStrMethods::test_ljust",
"pscript/tests/test_parser3.py::TestStrMethods::test_rjust",
"pscript/tests/test_parser3.py::TestStrMethods::test_zfill",
"pscript/tests/test_parser3.py::TestStrMethods::test_count",
"pscript/tests/test_parser3.py::TestStrMethods::test_endswith",
"pscript/tests/test_parser3.py::TestStrMethods::test_startswith",
"pscript/tests/test_parser3.py::TestStrMethods::test_expandtabs",
"pscript/tests/test_parser3.py::TestStrMethods::test_find",
"pscript/tests/test_parser3.py::TestStrMethods::test_index",
"pscript/tests/test_parser3.py::TestStrMethods::test_rfind",
"pscript/tests/test_parser3.py::TestStrMethods::test_rindex",
"pscript/tests/test_parser3.py::TestStrMethods::test_isalnum",
"pscript/tests/test_parser3.py::TestStrMethods::test_isalpha",
"pscript/tests/test_parser3.py::TestStrMethods::test_isdecimal",
"pscript/tests/test_parser3.py::TestStrMethods::test_isdigit",
"pscript/tests/test_parser3.py::TestStrMethods::test_isnumeric",
"pscript/tests/test_parser3.py::TestStrMethods::test_isidentifier",
"pscript/tests/test_parser3.py::TestStrMethods::test_islower",
"pscript/tests/test_parser3.py::TestStrMethods::test_isupper",
"pscript/tests/test_parser3.py::TestStrMethods::test_isspace",
"pscript/tests/test_parser3.py::TestStrMethods::test_istitle",
"pscript/tests/test_parser3.py::TestStrMethods::test_join",
"pscript/tests/test_parser3.py::TestStrMethods::test_lstrip",
"pscript/tests/test_parser3.py::TestStrMethods::test_rstrip",
"pscript/tests/test_parser3.py::TestStrMethods::test_strip",
"pscript/tests/test_parser3.py::TestStrMethods::test_partition",
"pscript/tests/test_parser3.py::TestStrMethods::test_rpartition",
"pscript/tests/test_parser3.py::TestStrMethods::test_split",
"pscript/tests/test_parser3.py::TestStrMethods::test_rsplit",
"pscript/tests/test_parser3.py::TestStrMethods::test_splitlines",
"pscript/tests/test_parser3.py::TestStrMethods::test_replace",
"pscript/tests/test_parser3.py::TestStrMethods::test_translate",
"pscript/tests/test_parser3.py::TestStrMethods::test_format"
]
| [
"pscript/tests/test_parser1.py::TestTheParser::test_special_functions",
"pscript/tests/test_parser1.py::TestExpressions::test_special",
"pscript/tests/test_parser1.py::TestExpressions::test_ignore_import_of_compiler",
"pscript/tests/test_parser1.py::TestExpressions::test_pass",
"pscript/tests/test_parser1.py::TestModules::test_module",
"pscript/tests/test_parser3.py::TestSpecials::test_rawJS",
"pscript/tests/test_parser3.py::TestHardcoreBuiltins::test_len",
"pscript/tests/test_parser3.py::TestListMethods::test_that_all_list_methods_are_tested",
"pscript/tests/test_parser3.py::TestDictMethods::test_that_all_dict_methods_are_tested",
"pscript/tests/test_parser3.py::TestStrMethods::test_that_all_str_methods_are_tested",
"pscript/tests/test_stdlib.py::test_stdlib_full_and_partial",
"pscript/tests/test_stdlib.py::test_stdlib_has_all_list_methods",
"pscript/tests/test_stdlib.py::test_stdlib_has_all_dict_methods"
]
| []
| BSD 2-Clause "Simplified" License | 2,380 | [
"pscript/stdlib.py",
"pscript/parser0.py",
"MANIFEST.in",
"pscript/__init__.py",
"pscript/parser3.py",
"pscript/parser2.py",
"pscript/parser1.py",
"pscript/commonast.py"
]
| [
"pscript/stdlib.py",
"pscript/parser0.py",
"MANIFEST.in",
"pscript/__init__.py",
"pscript/parser3.py",
"pscript/parser2.py",
"pscript/parser1.py",
"pscript/commonast.py"
]
|
|
conan-io__conan-2728 | 35c00decd88eb8dfb87871209907ddce4f7ce170 | 2018-04-09 15:05:36 | 419beea8c76ebf9271c8612339bdb0e5aa376306 | lasote: Pushed again, please check | diff --git a/conans/client/action_recorder.py b/conans/client/action_recorder.py
index bf573d9cb..d771bf075 100644
--- a/conans/client/action_recorder.py
+++ b/conans/client/action_recorder.py
@@ -7,6 +7,8 @@ from datetime import datetime
from collections import namedtuple, OrderedDict
# Install actions
+from conans.model.ref import ConanFileReference, PackageReference
+
INSTALL_CACHE = 0
INSTALL_DOWNLOADED = 1
INSTALL_BUILT = 2
@@ -32,8 +34,11 @@ class ActionRecorder(object):
def __init__(self):
self._inst_recipes_actions = OrderedDict()
self._inst_packages_actions = OrderedDict()
+ self._inst_recipes_develop = set() # Recipes being created (to set dependency=False)
# ###### INSTALL METHODS ############
+ def add_recipe_being_developed(self, reference):
+ self._inst_recipes_develop.add(reference)
def _add_recipe_action(self, reference, action):
if reference not in self._inst_recipes_actions:
@@ -90,6 +95,12 @@ class ActionRecorder(object):
ret.append((_package_ref, _package_action))
return ret
+ def in_development_recipe(self, reference):
+ return reference in self._inst_recipes_develop
+
+ def get_info(self):
+ return self.get_install_info()
+
def get_install_info(self):
ret = {"error": self.install_errored,
"installed": []}
@@ -98,11 +109,15 @@ class ActionRecorder(object):
error = None if the_action.type != INSTALL_ERROR else the_action.doc
doc = {"id": str(the_ref),
"downloaded": the_action.type == INSTALL_DOWNLOADED,
- "built": the_action.type == INSTALL_BUILT,
"cache": the_action.type == INSTALL_CACHE,
"error": error,
"remote": the_action.doc.get("remote", None),
"time": the_action.time}
+ if isinstance(the_ref, ConanFileReference):
+ doc["dependency"] = not self.in_development_recipe(the_ref)
+ else:
+ doc["built"] = the_action.type == INSTALL_BUILT
+
if doc["remote"] is None and error:
doc["remote"] = error.get("remote", None)
return doc
@@ -111,7 +126,6 @@ class ActionRecorder(object):
# Could be a download and then an access to cache, we want the first one
action = actions[0]
recipe_doc = get_doc_for_ref(ref, action)
- del recipe_doc["built"] # Avoid confusions
packages = self._get_installed_packages(ref)
tmp = {"recipe": recipe_doc,
"packages": []}
diff --git a/conans/client/command.py b/conans/client/command.py
index 19513161d..f805caf51 100644
--- a/conans/client/command.py
+++ b/conans/client/command.py
@@ -220,17 +220,21 @@ class Command(object):
cwd = os.getcwd()
+ info = None
try:
- self._conan.create(args.path, name, version, user, channel,
- args.profile, args.settings, args.options,
- args.env, args.test_folder, args.not_export,
- args.build, args.keep_source, args.keep_build, args.verify,
- args.manifests, args.manifests_interactive,
- args.remote, args.update,
- test_build_folder=args.test_build_folder)
+ info = self._conan.create(args.path, name, version, user, channel,
+ args.profile, args.settings, args.options,
+ args.env, args.test_folder, args.not_export,
+ args.build, args.keep_source, args.keep_build, args.verify,
+ args.manifests, args.manifests_interactive,
+ args.remote, args.update,
+ test_build_folder=args.test_build_folder)
+ except ConanException as exc:
+ info = exc.info
+ raise
finally:
- if args.json:
- self._outputer.json_install(self._conan.recorder.get_install_info(), args.json, cwd)
+ if args.json and info:
+ self._outputer.json_install(info, args.json, cwd)
def download(self, *args):
"""Downloads recipe and binaries to the local cache, without using settings. It works
@@ -289,34 +293,38 @@ class Command(object):
args = parser.parse_args(*args)
cwd = os.getcwd()
+ info = None
try:
try:
reference = ConanFileReference.loads(args.path_or_reference)
except ConanException:
- self._conan.install(path=args.path_or_reference,
- settings=args.settings, options=args.options,
- env=args.env,
- remote=args.remote,
- verify=args.verify, manifests=args.manifests,
- manifests_interactive=args.manifests_interactive,
- build=args.build, profile_name=args.profile,
- update=args.update, generators=args.generator,
- no_imports=args.no_imports,
- install_folder=args.install_folder)
+ info = self._conan.install(path=args.path_or_reference,
+ settings=args.settings, options=args.options,
+ env=args.env,
+ remote=args.remote,
+ verify=args.verify, manifests=args.manifests,
+ manifests_interactive=args.manifests_interactive,
+ build=args.build, profile_name=args.profile,
+ update=args.update, generators=args.generator,
+ no_imports=args.no_imports,
+ install_folder=args.install_folder)
else:
- self._conan.install_reference(reference, settings=args.settings,
- options=args.options,
- env=args.env,
- remote=args.remote,
- verify=args.verify, manifests=args.manifests,
- manifests_interactive=args.manifests_interactive,
- build=args.build, profile_name=args.profile,
- update=args.update,
- generators=args.generator,
- install_folder=args.install_folder)
+ info = self._conan.install_reference(reference, settings=args.settings,
+ options=args.options,
+ env=args.env,
+ remote=args.remote,
+ verify=args.verify, manifests=args.manifests,
+ manifests_interactive=args.manifests_interactive,
+ build=args.build, profile_name=args.profile,
+ update=args.update,
+ generators=args.generator,
+ install_folder=args.install_folder)
+ except ConanException as exc:
+ info = exc.info
+ raise
finally:
- if args.json:
- self._outputer.json_install(self._conan.recorder.get_install_info(), args.json, cwd)
+ if args.json and info:
+ self._outputer.json_install(info, args.json, cwd)
def config(self, *args):
"""Manages Conan configuration. Edits the conan.conf or installs config files.
diff --git a/conans/client/conan_api.py b/conans/client/conan_api.py
index 98ee25d5a..dd50689b2 100644
--- a/conans/client/conan_api.py
+++ b/conans/client/conan_api.py
@@ -65,15 +65,21 @@ def api_method(f):
the_self = args[0]
try:
log_command(f.__name__, kwargs)
+ the_self._init_manager()
with tools.environment_append(the_self._client_cache.conan_config.env_vars):
# Patch the globals in tools
- return f(*args, **kwargs)
+ ret = f(*args, **kwargs)
+ if ret is None: # FIXME: Probably each method should manage its return
+ return the_self._recorder.get_info()
+ return ret
except Exception as exc:
msg = exception_message_safe(exc)
try:
log_exception(exc, msg)
except:
pass
+ if isinstance(exc, ConanException):
+ exc.info = the_self._recorder.get_info()
raise
return wrapper
@@ -207,13 +213,22 @@ class ConanAPIV1(object):
self._user_io = user_io
self._runner = runner
self._remote_manager = remote_manager
- self.recorder = ActionRecorder()
+ self._search_manager = search_manager
+ self._settings_preprocessor = _settings_preprocessor
self._registry = RemoteRegistry(self._client_cache.registry, self._user_io.out)
- self._manager = ConanManager(client_cache, user_io, runner, remote_manager, search_manager,
- _settings_preprocessor, self.recorder, self._registry)
+ self._recorder = None
+ self._manager = None
+
if not interactive:
self._user_io.disable_input()
+ def _init_manager(self):
+ """Every api call gets a new recorder and new manager"""
+ self._recorder = ActionRecorder()
+ self._manager = ConanManager(self._client_cache, self._user_io, self._runner,
+ self._remote_manager, self._search_manager,
+ self._settings_preprocessor, self._recorder, self._registry)
+
@api_method
def new(self, name, header=False, pure_c=False, test=False, exports_sources=False, bare=False,
cwd=None, visual_versions=None, linux_gcc_versions=None, linux_clang_versions=None,
@@ -319,6 +334,7 @@ class ConanAPIV1(object):
"or it doesn't have a conanfile.py" % tf)
test_conanfile_path = get_test_conanfile_path(test_folder)
+ self._recorder.add_recipe_being_developed(reference)
if test_conanfile_path:
pt = PackageTester(self._manager, self._user_io)
diff --git a/conans/errors.py b/conans/errors.py
index 256f20a3f..e5b73a087 100644
--- a/conans/errors.py
+++ b/conans/errors.py
@@ -68,7 +68,9 @@ class ConanException(Exception):
"""
Generic conans exception
"""
- pass
+ def __init__(self, *args, **kwargs):
+ self.info = None
+ super(ConanException, self).__init__(*args, **kwargs)
class NoRemoteAvailable(ConanException):
| Issues with the JSON created by conan create
Conan version: 1.2.0
Command: `conan create --json ./foo.json . myteam/unstable`
The generated JSON does not separate the dependency packages of the built project, from the built package. This makes it difficult to parse. | conan-io/conan | diff --git a/conans/test/command/json_output_test.py b/conans/test/command/json_output_test.py
index a9f64b87a..0cf3ba030 100644
--- a/conans/test/command/json_output_test.py
+++ b/conans/test/command/json_output_test.py
@@ -23,6 +23,7 @@ class JsonOutputTest(unittest.TestCase):
my_json = json.loads(load(os.path.join(self.client.current_folder, "myfile.json")))
self.assertFalse(my_json["error"])
self.assertEquals(my_json["installed"][0]["recipe"]["id"], "CC/1.0@private_user/channel")
+ self.assertFalse(my_json["installed"][0]["recipe"]["dependency"])
self.assertTrue(my_json["installed"][0]["recipe"]["cache"])
self.assertIsNone(my_json["installed"][0]["recipe"]["remote"])
self.assertTrue(my_json["installed"][0]["packages"][0]["built"])
@@ -37,6 +38,7 @@ class JsonOutputTest(unittest.TestCase):
self.assertIn("T", the_time_str) # Weak validation of the ISO 8601
self.assertFalse(my_json["error"])
self.assertEquals(my_json["installed"][0]["recipe"]["id"], "CC/1.0@private_user/channel")
+ self.assertTrue(my_json["installed"][0]["recipe"]["dependency"])
self.assertFalse(my_json["installed"][0]["recipe"]["cache"])
self.assertTrue(my_json["installed"][0]["recipe"]["downloaded"])
self.assertIsNotNone(my_json["installed"][0]["recipe"]["remote"])
@@ -164,6 +166,10 @@ AA*: CC/1.0@private_user/channel
my_json = load(os.path.join(self.client.current_folder, "myfile.json"))
my_json = json.loads(my_json)
+ self.assertTrue(my_json["installed"][0]["recipe"]["dependency"])
+ self.assertTrue(my_json["installed"][1]["recipe"]["dependency"])
+ self.assertTrue(my_json["installed"][2]["recipe"]["dependency"])
+
# Installed the build require CC with two options
self.assertEquals(len(my_json["installed"][2]["packages"]), 2)
self.assertEquals(my_json["installed"][2]["recipe"]["id"], "CC/1.0@private_user/channel")
diff --git a/conans/test/model/version_ranges_test.py b/conans/test/model/version_ranges_test.py
index 71012b623..919b29a8c 100644
--- a/conans/test/model/version_ranges_test.py
+++ b/conans/test/model/version_ranges_test.py
@@ -1,4 +1,5 @@
import unittest
+
from conans.test.utils.tools import TestBufferConanOutput
from conans.paths import CONANFILE
import os
diff --git a/conans/test/util/action_recorder_test.py b/conans/test/util/action_recorder_test.py
index 0a93a1570..ac85817b5 100644
--- a/conans/test/util/action_recorder_test.py
+++ b/conans/test/util/action_recorder_test.py
@@ -19,9 +19,11 @@ class ActionRecorderTest(unittest.TestCase):
def incomplete_process_test(self):
tracer = ActionRecorder()
tracer.recipe_install_error(self.ref1, INSTALL_ERROR_NETWORK, "SSL wtf", "http://drl.com")
- install_info = tracer.get_install_info()
+ tracer.add_recipe_being_developed(self.ref1)
+ install_info = tracer.get_info()
self.assertTrue(install_info["error"])
self.assertEquals(install_info["installed"][0]["packages"], [])
+ self.assertEquals(install_info["installed"][0]["recipe"]["dependency"], False)
def double_actions_test(self):
tracer = ActionRecorder()
@@ -30,7 +32,7 @@ class ActionRecorderTest(unittest.TestCase):
tracer.package_downloaded(self.ref_p1, "http://drl.com")
tracer.package_fetched_from_cache(self.ref_p1)
- install_info = tracer.get_install_info()
+ install_info = tracer.get_info()
self.assertFalse(install_info["error"])
first_installed = install_info["installed"][0]
@@ -55,12 +57,15 @@ class ActionRecorderTest(unittest.TestCase):
tracer.recipe_fetched_from_cache(self.ref3)
tracer.package_built(self.ref_p3)
+ tracer.add_recipe_being_developed(self.ref1)
- install_info = tracer.get_install_info()
+ install_info = tracer.get_info()
self.assertTrue(install_info["error"])
first_installed = install_info["installed"][0]
+
self.assertTrue(first_installed["recipe"]["cache"])
+ self.assertFalse(first_installed["recipe"]["dependency"])
self.assertFalse(first_installed["recipe"]["downloaded"])
self.assertIsNone(first_installed["recipe"]["error"])
self.assertEquals(str(first_installed["recipe"]["id"]), "lib1/1.0@conan/stable")
@@ -73,6 +78,7 @@ class ActionRecorderTest(unittest.TestCase):
second_installed = install_info["installed"][1]
self.assertFalse(second_installed["recipe"]["cache"])
+ self.assertTrue(second_installed["recipe"]["dependency"])
self.assertTrue(second_installed["recipe"]["downloaded"])
self.assertIsNone(second_installed["recipe"]["error"])
self.assertEquals(str(second_installed["recipe"]["id"]), "lib2/1.0@conan/stable")
@@ -85,6 +91,7 @@ class ActionRecorderTest(unittest.TestCase):
self.assertEquals(str(second_installed["packages"][0]["id"]), "2")
third_installed = install_info["installed"][2]
+ self.assertTrue(third_installed["recipe"]["dependency"])
self.assertFalse(third_installed["packages"][0]["cache"])
self.assertFalse(third_installed["packages"][0]["error"])
self.assertTrue(third_installed["packages"][0]["built"])
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 4
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"nose-cov",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"conans/requirements.txt",
"conans/requirements_osx.txt",
"conans/requirements_server.txt",
"conans/requirements_dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asn1crypto==1.5.1
astroid==1.6.6
attrs==22.2.0
beautifulsoup4==4.12.3
bottle==0.12.25
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
codecov==2.1.13
colorama==0.3.9
-e git+https://github.com/conan-io/conan.git@35c00decd88eb8dfb87871209907ddce4f7ce170#egg=conan
cov-core==1.15.0
coverage==4.2
cryptography==2.1.4
deprecation==2.0.7
distro==1.1.0
fasteners==0.19
future==0.16.0
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
mock==1.3.0
ndg-httpsclient==0.4.4
node-semver==0.2.0
nose==1.3.7
nose-cov==1.6
packaging==21.3
parameterized==0.8.1
patch==1.16
pbr==6.1.1
pluggy==1.0.0
pluginbase==0.7
py==1.11.0
pyasn==1.5.0b7
pyasn1==0.5.1
pycparser==2.21
Pygments==2.14.0
PyJWT==1.7.1
pylint==1.8.4
pyOpenSSL==17.5.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==3.12
requests==2.27.1
six==1.17.0
soupsieve==2.3.2.post1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
waitress==2.0.0
WebOb==1.8.9
WebTest==2.0.35
wrapt==1.16.0
zipp==3.6.0
| name: conan
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asn1crypto==1.5.1
- astroid==1.6.6
- attrs==22.2.0
- beautifulsoup4==4.12.3
- bottle==0.12.25
- cffi==1.15.1
- charset-normalizer==2.0.12
- codecov==2.1.13
- colorama==0.3.9
- cov-core==1.15.0
- coverage==4.2
- cryptography==2.1.4
- deprecation==2.0.7
- distro==1.1.0
- fasteners==0.19
- future==0.16.0
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- mock==1.3.0
- ndg-httpsclient==0.4.4
- node-semver==0.2.0
- nose==1.3.7
- nose-cov==1.6
- packaging==21.3
- parameterized==0.8.1
- patch==1.16
- pbr==6.1.1
- pluggy==1.0.0
- pluginbase==0.7
- py==1.11.0
- pyasn==1.5.0b7
- pyasn1==0.5.1
- pycparser==2.21
- pygments==2.14.0
- pyjwt==1.7.1
- pylint==1.8.4
- pyopenssl==17.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==3.12
- requests==2.27.1
- six==1.17.0
- soupsieve==2.3.2.post1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- waitress==2.0.0
- webob==1.8.9
- webtest==2.0.35
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/conan
| [
"conans/test/util/action_recorder_test.py::ActionRecorderTest::test_install"
]
| [
"conans/test/command/json_output_test.py::JsonOutputTest::test_errors",
"conans/test/command/json_output_test.py::JsonOutputTest::test_json_generation",
"conans/test/command/json_output_test.py::JsonOutputTest::test_simple_fields"
]
| [
"conans/test/model/version_ranges_test.py::VersionRangesTest::test_local_basic",
"conans/test/model/version_ranges_test.py::VersionRangesTest::test_remote_basic"
]
| []
| MIT License | 2,381 | [
"conans/client/conan_api.py",
"conans/errors.py",
"conans/client/action_recorder.py",
"conans/client/command.py"
]
| [
"conans/client/conan_api.py",
"conans/errors.py",
"conans/client/action_recorder.py",
"conans/client/command.py"
]
|
oasis-open__cti-taxii-client-31 | 0201af14e578714a297aff795413babc425a2219 | 2018-04-09 18:02:08 | 0201af14e578714a297aff795413babc425a2219 | diff --git a/.gitignore b/.gitignore
index 1f8d3e2..f21feea 100644
--- a/.gitignore
+++ b/.gitignore
@@ -46,6 +46,7 @@ nosetests.xml
coverage.xml
*,cover
.hypothesis/
+.pytest_cache/
# Translations
*.mo
diff --git a/taxii2client/__init__.py b/taxii2client/__init__.py
index 85a7b7f..358892f 100644
--- a/taxii2client/__init__.py
+++ b/taxii2client/__init__.py
@@ -198,8 +198,8 @@ class Status(_TAXIIEndpoint):
__bool__ = __nonzero__
- def refresh(self):
- response = self._conn.get(self.url, accept=MEDIA_TYPE_TAXII_V20)
+ def refresh(self, accept=MEDIA_TYPE_TAXII_V20):
+ response = self._conn.get(self.url, accept=accept)
self._populate_fields(**response)
def wait_until_final(self, poll_interval=1, timeout=60):
@@ -386,30 +386,31 @@ class Collection(_TAXIIEndpoint):
msg = "Collection '{}' does not allow writing."
raise AccessError(msg.format(self.url))
- def refresh(self):
- response = self._conn.get(self.url, accept=MEDIA_TYPE_TAXII_V20)
+ def refresh(self, accept=MEDIA_TYPE_TAXII_V20):
+ response = self._conn.get(self.url, accept=accept)
self._populate_fields(**response)
self._loaded = True
- def get_objects(self, **filter_kwargs):
+ def get_objects(self, accept=MEDIA_TYPE_STIX_V20, **filter_kwargs):
"""Implement the ``Get Objects`` endpoint (section 5.3)"""
self._verify_can_read()
query_params = _filter_kwargs_to_query_params(filter_kwargs)
- return self._conn.get(self.objects_url, accept=MEDIA_TYPE_STIX_V20,
+ return self._conn.get(self.objects_url, accept=accept,
params=query_params)
- def get_object(self, obj_id, version=None):
+ def get_object(self, obj_id, version=None, accept=MEDIA_TYPE_STIX_V20):
"""Implement the ``Get an Object`` endpoint (section 5.5)"""
self._verify_can_read()
url = self.objects_url + str(obj_id) + "/"
query_params = None
if version:
query_params = _filter_kwargs_to_query_params({"version": version})
- return self._conn.get(url, accept=MEDIA_TYPE_STIX_V20,
+ return self._conn.get(url, accept=accept,
params=query_params)
def add_objects(self, bundle, wait_for_completion=True, poll_interval=1,
- timeout=60):
+ timeout=60, accept=MEDIA_TYPE_TAXII_V20,
+ content_type=MEDIA_TYPE_STIX_V20):
"""Implement the ``Add Objects`` endpoint (section 5.4)
Add objects to the collection. This may be performed either
@@ -427,10 +428,13 @@ class Collection(_TAXIIEndpoint):
parsed into native Python)
wait_for_completion (bool): Whether to wait for the add operation
to complete before returning
- poll_interval: If waiting for completion, how often to poll
+ poll_interval (int): If waiting for completion, how often to poll
the status service (seconds)
- timeout: If waiting for completion, how long to poll until giving
- up (seconds). Use <= 0 to wait forever
+ timeout (int): If waiting for completion, how long to poll until
+ giving up (seconds). Use <= 0 to wait forever
+ accept (str): media type to include in the ``Accept:`` header.
+ content_type (str): media type to include in the ``Content-Type:``
+ header.
Returns:
If ``wait_for_completion`` is False, a Status object corresponding
@@ -446,8 +450,8 @@ class Collection(_TAXIIEndpoint):
self._verify_can_write()
headers = {
- "Accept": MEDIA_TYPE_TAXII_V20,
- "Content-Type": MEDIA_TYPE_STIX_V20,
+ "Accept": accept,
+ "Content-Type": content_type,
}
if isinstance(bundle, dict):
@@ -473,12 +477,12 @@ class Collection(_TAXIIEndpoint):
return status
- def get_manifest(self, **filter_kwargs):
+ def get_manifest(self, accept=MEDIA_TYPE_TAXII_V20, **filter_kwargs):
"""Implement the ``Get Object Manifests`` endpoint (section 5.6)."""
self._verify_can_read()
query_params = _filter_kwargs_to_query_params(filter_kwargs)
return self._conn.get(self.url + "manifest/",
- accept=MEDIA_TYPE_TAXII_V20,
+ accept=accept,
params=query_params)
@@ -545,17 +549,17 @@ class ApiRoot(_TAXIIEndpoint):
if not self._loaded_information:
self.refresh_information()
- def refresh(self):
+ def refresh(self, accept=MEDIA_TYPE_TAXII_V20):
"""Update the API Root's information and list of Collections"""
- self.refresh_information()
- self.refresh_collections()
+ self.refresh_information(accept)
+ self.refresh_collections(accept)
- def refresh_information(self):
+ def refresh_information(self, accept=MEDIA_TYPE_TAXII_V20):
"""Update the properties of this API Root.
This invokes the ``Get API Root Information`` endpoint.
"""
- response = self._conn.get(self.url, accept=MEDIA_TYPE_TAXII_V20)
+ response = self._conn.get(self.url, accept=accept)
self._title = response["title"]
self._description = response["description"]
@@ -564,13 +568,13 @@ class ApiRoot(_TAXIIEndpoint):
self._loaded_information = True
- def refresh_collections(self):
+ def refresh_collections(self, accept=MEDIA_TYPE_TAXII_V20):
"""Update the list of Collections contained by this API Root.
This invokes the ``Get Collections`` endpoint.
"""
url = self.url + "collections/"
- response = self._conn.get(url, accept=MEDIA_TYPE_TAXII_V20)
+ response = self._conn.get(url, accept=accept)
self._collections = []
for item in response["collections"]:
@@ -580,9 +584,9 @@ class ApiRoot(_TAXIIEndpoint):
self._loaded_collections = True
- def get_status(self, status_id):
+ def get_status(self, status_id, accept=MEDIA_TYPE_TAXII_V20):
status_url = self.url + "status/" + status_id + "/"
- info = self._conn.get(status_url, accept=MEDIA_TYPE_TAXII_V20)
+ info = self._conn.get(status_url, accept=accept)
return Status(status_url, conn=self._conn, **info)
@@ -700,6 +704,23 @@ class _HTTPConnection(object):
if user and password:
self.session.auth = requests.auth.HTTPBasicAuth(user, password)
+ def valid_content_type(self, content_type, accept):
+ """Check that the server is returning a valid Content-Type
+
+ Args:
+ content_type (str): ``Content-Type:`` header value
+ accept (str): media type to include in the ``Accept:`` header.
+
+ """
+ accept_tokens = accept.replace(' ', '').split(';')
+ content_type_tokens = content_type.replace(' ', '').split(';')
+
+ return (
+ all(elem in content_type_tokens for elem in accept_tokens) and
+ (content_type_tokens[0] == 'application/vnd.oasis.taxii+json' or
+ content_type_tokens[0] == 'application/vnd.oasis.stix+json')
+ )
+
def get(self, url, accept, params=None):
"""Perform an HTTP GET, using the saved requests.Session and auth info.
@@ -720,9 +741,10 @@ class _HTTPConnection(object):
resp.raise_for_status()
content_type = resp.headers["Content-Type"]
- if not content_type.startswith(accept):
- msg = "Unexpected Response Content-Type: {}"
- raise TAXIIServiceException(msg.format(content_type))
+
+ if not self.valid_content_type(content_type=content_type, accept=accept):
+ msg = "Unexpected Response. Got Content-Type: '{}' for Accept: '{}'"
+ raise TAXIIServiceException(msg.format(content_type, accept))
return resp.json()
| More flexible content-type matching
Building on #10, we should make the content-type matching more robust than just a string's `.startwith()`. | oasis-open/cti-taxii-client | diff --git a/taxii2client/test/test_client.py b/taxii2client/test/test_client.py
index de2e303..165e2bc 100644
--- a/taxii2client/test/test_client.py
+++ b/taxii2client/test/test_client.py
@@ -490,7 +490,8 @@ def test_content_type_invalid(collection):
with pytest.raises(TAXIIServiceException) as excinfo:
collection.get_object("indicator--252c7c11-daf2-42bd-843b-be65edca9f61")
- assert "Unexpected Response Content-Type" in str(excinfo.value)
+ assert ("Unexpected Response. Got Content-Type: 'taxii' for "
+ "Accept: 'application/vnd.oasis.stix+json; version=2.0'") in str(excinfo.value)
def test_url_filter_type():
@@ -563,3 +564,30 @@ def test_taxii_endpoint_raises_exception():
_TAXIIEndpoint("https://example.com/api1/collections/", conn, "other", "test")
assert "A connection and user/password may not both be provided." in str(excinfo.value)
+
+
[email protected]
+def test_valid_content_type_for_connection():
+ """The server responded with charset=utf-8, but the media types are correct
+ and first."""
+ responses.add(responses.GET, COLLECTION_URL, COLLECTIONS_RESPONSE,
+ status=200,
+ content_type=MEDIA_TYPE_TAXII_V20 + "; charset=utf-8")
+
+ conn = _HTTPConnection(user="foo", password="bar", verify=False)
+ conn.get("https://example.com/api1/collections/91a7b528-80eb-42ed-a74d-c6fbd5a26116/", MEDIA_TYPE_TAXII_V20, None)
+
+
[email protected]
+def test_invalid_content_type_for_connection():
+ responses.add(responses.GET, COLLECTION_URL, COLLECTIONS_RESPONSE,
+ status=200,
+ content_type=MEDIA_TYPE_TAXII_V20)
+
+ with pytest.raises(TAXIIServiceException) as excinfo:
+ conn = _HTTPConnection(user="foo", password="bar", verify=False)
+ conn.get("https://example.com/api1/collections/91a7b528-80eb-42ed-a74d-c6fbd5a26116/", MEDIA_TYPE_TAXII_V20 + "; charset=utf-8", None)
+
+ assert ("Unexpected Response. Got Content-Type: 'application/vnd.oasis.taxii+json; "
+ "version=2.0' for Accept: 'application/vnd.oasis.taxii+json; version=2.0; "
+ "charset=utf-8'") == str(excinfo.value)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [],
"python": "3.6",
"reqs_path": [
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
distlib==0.3.9
filelock==3.4.1
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
pytz==2025.2
requests==2.27.1
responses==0.17.0
six==1.17.0
-e git+https://github.com/oasis-open/cti-taxii-client.git@0201af14e578714a297aff795413babc425a2219#egg=taxii2_client
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==3.28.0
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
virtualenv==20.17.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: cti-taxii-client
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- charset-normalizer==2.0.12
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- platformdirs==2.4.0
- pytest-cov==4.0.0
- pytz==2025.2
- requests==2.27.1
- responses==0.17.0
- six==1.17.0
- tomli==1.2.3
- tox==3.28.0
- urllib3==1.26.20
- virtualenv==20.17.1
prefix: /opt/conda/envs/cti-taxii-client
| [
"taxii2client/test/test_client.py::test_content_type_invalid",
"taxii2client/test/test_client.py::test_invalid_content_type_for_connection"
]
| []
| [
"taxii2client/test/test_client.py::test_server_discovery",
"taxii2client/test/test_client.py::test_minimal_discovery_response",
"taxii2client/test/test_client.py::test_discovery_with_no_default",
"taxii2client/test/test_client.py::test_api_root",
"taxii2client/test/test_client.py::test_api_root_collections",
"taxii2client/test/test_client.py::test_get_collection_by_id_exists",
"taxii2client/test/test_client.py::test_get_collection_by_id_not_present",
"taxii2client/test/test_client.py::test_collection",
"taxii2client/test/test_client.py::test_collection_unexpected_kwarg",
"taxii2client/test/test_client.py::test_get_collection_objects",
"taxii2client/test/test_client.py::test_get_object",
"taxii2client/test/test_client.py::test_cannot_write_to_readonly_collection",
"taxii2client/test/test_client.py::test_add_object_to_collection",
"taxii2client/test/test_client.py::test_add_object_to_collection_dict",
"taxii2client/test/test_client.py::test_add_object_rases_error_when_collection_id_does_not_match_url",
"taxii2client/test/test_client.py::test_cannot_read_from_writeonly_collection",
"taxii2client/test/test_client.py::test_get_manifest",
"taxii2client/test/test_client.py::test_get_status",
"taxii2client/test/test_client.py::test_content_type_valid",
"taxii2client/test/test_client.py::test_url_filter_type",
"taxii2client/test/test_client.py::test_filter_id",
"taxii2client/test/test_client.py::test_filter_version",
"taxii2client/test/test_client.py::test_filter_added_after",
"taxii2client/test/test_client.py::test_filter_combo",
"taxii2client/test/test_client.py::test_params_filter_unknown",
"taxii2client/test/test_client.py::test_taxii_endpoint_raises_exception",
"taxii2client/test/test_client.py::test_valid_content_type_for_connection"
]
| []
| BSD 3-Clause "New" or "Revised" License | 2,382 | [
".gitignore",
"taxii2client/__init__.py"
]
| [
".gitignore",
"taxii2client/__init__.py"
]
|
|
oasis-open__cti-taxii-client-32 | 0201af14e578714a297aff795413babc425a2219 | 2018-04-09 20:08:53 | 0201af14e578714a297aff795413babc425a2219 | diff --git a/taxii2client/__init__.py b/taxii2client/__init__.py
index 85a7b7f..1fddedb 100644
--- a/taxii2client/__init__.py
+++ b/taxii2client/__init__.py
@@ -220,23 +220,50 @@ class Status(_TAXIIEndpoint):
self.refresh()
elapsed = time.time() - start_time
- def _populate_fields(self, id, status, total_count, success_count,
- failure_count, pending_count, request_timestamp=None,
+ def _populate_fields(self, id=None, status=None, total_count=None,
+ success_count=None, failure_count=None,
+ pending_count=None, request_timestamp=None,
successes=None, failures=None, pendings=None):
- self.id = id
- self.status = status
- self.request_timestamp = request_timestamp
- self.total_count = total_count
- self.success_count = success_count
- self.failure_count = failure_count
- self.pending_count = pending_count
- self.successes = successes or []
- self.failures = failures or []
- self.pendings = pendings or []
+ self.id = id # required
+ self.status = status # required
+ self.request_timestamp = request_timestamp # optional
+ self.total_count = total_count # required
+ self.success_count = success_count # required
+ self.failure_count = failure_count # required
+ self.pending_count = pending_count # required
+ self.successes = successes or [] # optional
+ self.failures = failures or [] # optional
+ self.pendings = pendings or [] # optional
self._validate_status()
def _validate_status(self):
+ """Validates Status information. Raises errors for required
+ properties."""
+ if not self.id:
+ msg = "No 'id' in Status for request '{}'"
+ raise ValidationError(msg.format(self.url))
+
+ if not self.status:
+ msg = "No 'status' in Status for request '{}'"
+ raise ValidationError(msg.format(self.url))
+
+ if self.total_count is None:
+ msg = "No 'total_count' in Status for request '{}'"
+ raise ValidationError(msg.format(self.url))
+
+ if self.success_count is None:
+ msg = "No 'success_count' in Status for request '{}'"
+ raise ValidationError(msg.format(self.url))
+
+ if self.failure_count is None:
+ msg = "No 'failure_count' in Status for request '{}'"
+ raise ValidationError(msg.format(self.url))
+
+ if self.pending_count is None:
+ msg = "No 'pending_count' in Status for request '{}'"
+ raise ValidationError(msg.format(self.url))
+
if len(self.successes) != self.success_count:
msg = "Found successes={}, but success_count={} in status '{}'"
raise ValidationError(msg.format(self.successes,
@@ -356,18 +383,34 @@ class Collection(_TAXIIEndpoint):
def _populate_fields(self, id=None, title=None, description=None,
can_read=None, can_write=None, media_types=None):
- if media_types is None:
- media_types = []
- self._id = id
- self._title = title
- self._description = description
- self._can_read = can_read
- self._can_write = can_write
- self._media_types = media_types
+ self._id = id # required
+ self._title = title # required
+ self._description = description # optional
+ self._can_read = can_read # required
+ self._can_write = can_write # required
+ self._media_types = media_types or [] # optional
self._validate_collection()
def _validate_collection(self):
+ """Validates Collection information. Raises errors for required
+ properties."""
+ if not self._id:
+ msg = "No 'id' in Collection for request '{}'"
+ raise ValidationError(msg.format(self.url))
+
+ if not self._title:
+ msg = "No 'title' in Collection for request '{}'"
+ raise ValidationError(msg.format(self.url))
+
+ if self._can_read is None:
+ msg = "No 'can_read' in Collection for request '{}'"
+ raise ValidationError(msg.format(self.url))
+
+ if self._can_write is None:
+ msg = "No 'can_write' in Collection for request '{}'"
+ raise ValidationError(msg.format(self.url))
+
if self._id not in self.url:
msg = "The collection '{}' does not match the url for queries '{}'"
raise ValidationError(msg.format(self._id, self.url))
@@ -545,6 +588,21 @@ class ApiRoot(_TAXIIEndpoint):
if not self._loaded_information:
self.refresh_information()
+ def _validate_api_root(self):
+ """Validates API Root information. Raises errors for required
+ properties."""
+ if not self._title:
+ msg = "No 'title' in API Root for request '{}'"
+ raise ValidationError(msg.format(self.url))
+
+ if not self._versions:
+ msg = "No 'versions' in API Root for request '{}'"
+ raise ValidationError(msg.format(self.url))
+
+ if self._max_content_length is None:
+ msg = "No 'max_content_length' in API Root for request '{}'"
+ raise ValidationError(msg.format(self.url))
+
def refresh(self):
"""Update the API Root's information and list of Collections"""
self.refresh_information()
@@ -557,11 +615,12 @@ class ApiRoot(_TAXIIEndpoint):
"""
response = self._conn.get(self.url, accept=MEDIA_TYPE_TAXII_V20)
- self._title = response["title"]
- self._description = response["description"]
- self._versions = response["versions"]
- self._max_content_length = response["max_content_length"]
+ self._title = response.get("title") # required
+ self._description = response.get("description") # optional
+ self._versions = response.get("versions", []) # required
+ self._max_content_length = response.get("max_content_length") # required
+ self._validate_api_root()
self._loaded_information = True
def refresh_collections(self):
@@ -573,7 +632,7 @@ class ApiRoot(_TAXIIEndpoint):
response = self._conn.get(url, accept=MEDIA_TYPE_TAXII_V20)
self._collections = []
- for item in response["collections"]:
+ for item in response.get("collections", []): # optional
collection_url = url + item["id"] + "/"
collection = Collection(collection_url, conn=self._conn, **item)
self._collections.append(collection)
@@ -582,8 +641,8 @@ class ApiRoot(_TAXIIEndpoint):
def get_status(self, status_id):
status_url = self.url + "status/" + status_id + "/"
- info = self._conn.get(status_url, accept=MEDIA_TYPE_TAXII_V20)
- return Status(status_url, conn=self._conn, **info)
+ response = self._conn.get(status_url, accept=MEDIA_TYPE_TAXII_V20)
+ return Status(status_url, conn=self._conn, **response)
class Server(_TAXIIEndpoint):
@@ -649,13 +708,20 @@ class Server(_TAXIIEndpoint):
if not self._loaded:
self.refresh()
+ def _validate_server(self):
+ """Validates server information. Raises errors for required properties.
+ """
+ if not self._title:
+ msg = "No 'title' in Server Discovery for request '{}'"
+ raise ValidationError(msg.format(self.url))
+
def refresh(self):
response = self._conn.get(self.url, accept=MEDIA_TYPE_TAXII_V20)
- self._title = response.get("title")
- self._description = response.get("description")
- self._contact = response.get("contact")
- roots = response.get("api_roots", [])
+ self._title = response.get("title") # required
+ self._description = response.get("description") # optional
+ self._contact = response.get("contact") # optional
+ roots = response.get("api_roots", []) # optional
self._api_roots = [ApiRoot(url,
user=self._user,
password=self._password,
@@ -665,7 +731,8 @@ class Server(_TAXIIEndpoint):
# rather than creating a duplicate. The TAXII 2.0 spec says that the
# `default` API Root MUST be an item in `api_roots`.
root_dict = dict(zip(roots, self._api_roots))
- self._default = root_dict.get(response.get("default"))
+ self._default = root_dict.get(response.get("default")) # optional
+ self._validate_server()
self._loaded = True
| Verify correct handling of all optional fields.
For example: If the server's API root is without description, the client will throw an exception.
But the description is optional according to the spec, I expected there is no error here. | oasis-open/cti-taxii-client | diff --git a/taxii2client/test/test_client.py b/taxii2client/test/test_client.py
index de2e303..afdb0fe 100644
--- a/taxii2client/test/test_client.py
+++ b/taxii2client/test/test_client.py
@@ -7,7 +7,7 @@ import six
from taxii2client import (
MEDIA_TYPE_STIX_V20, MEDIA_TYPE_TAXII_V20, AccessError, ApiRoot,
- Collection, InvalidArgumentsError, Server, TAXIIServiceException,
+ Collection, InvalidArgumentsError, Server, Status, TAXIIServiceException,
ValidationError, _filter_kwargs_to_query_params, _HTTPConnection,
_TAXIIEndpoint, get_collection_by_id
)
@@ -187,6 +187,46 @@ STATUS_RESPONSE = """{
}"""
[email protected]
+def status_dict():
+ return {
+ "id": "2d086da7-4bdc-4f91-900e-d77486753710",
+ "status": "pending",
+ "request_timestamp": "2016-11-02T12:34:34.12345Z",
+ "total_count": 4,
+ "success_count": 1,
+ "successes": [
+ "indicator--c410e480-e42b-47d1-9476-85307c12bcbf"
+ ],
+ "failure_count": 1,
+ "failures": [
+ {
+ "id": "malware--664fa29d-bf65-4f28-a667-bdb76f29ec98",
+ "message": "Unable to process object"
+ }
+ ],
+ "pending_count": 2,
+ "pendings": [
+ "indicator--252c7c11-daf2-42bd-843b-be65edca9f61",
+ "relationship--045585ad-a22f-4333-af33-bfd503a683b5"
+ ]
+ }
+
+
[email protected]
+def collection_dict():
+ return {
+ "id": "e278b87e-0f9b-4c63-a34c-c8f0b3e91acb",
+ "title": "Writable Collection",
+ "description": "This collection is a dropbox for submitting indicators",
+ "can_read": False,
+ "can_write": True,
+ "media_types": [
+ "application/vnd.oasis.stix+json; version=2.0"
+ ]
+ }
+
+
@pytest.fixture
def server():
"""Default server object for example.com"""
@@ -222,6 +262,11 @@ def bad_writable_collection():
return Collection(COLLECTION_URL)
+def set_api_root_response(response):
+ responses.add(responses.GET, API_ROOT_URL, body=response,
+ status=200, content_type=MEDIA_TYPE_TAXII_V20)
+
+
def set_discovery_response(response):
responses.add(responses.GET, DISCOVERY_URL, body=response, status=200,
content_type=MEDIA_TYPE_TAXII_V20)
@@ -282,6 +327,66 @@ def test_discovery_with_no_default(server):
assert server.default is None
[email protected]
+def test_discovery_with_no_title(server):
+ response = """{
+ "description": "This TAXII Server contains a listing of...",
+ "contact": "string containing contact information",
+ "api_roots": [
+ "https://example.com/api1/",
+ "https://example.com/api2/",
+ "https://example.net/trustgroup1/"
+ ]
+ }"""
+ set_discovery_response(response)
+ with pytest.raises(ValidationError) as excinfo:
+ server.refresh()
+
+ assert "No 'title' in Server Discovery for request 'https://example.com/taxii/'" == str(excinfo.value)
+
+
[email protected]
+def test_api_root_no_title(api_root):
+ set_api_root_response("""{
+ "description": "A trust group setup for malware researchers",
+ "versions": ["taxii-2.0"],
+ "max_content_length": 9765625
+ }""")
+ with pytest.raises(ValidationError) as excinfo:
+ assert api_root._loaded_information is False
+ api_root.refresh_information()
+
+ assert "No 'title' in API Root for request 'https://example.com/api1/'" == str(excinfo.value)
+
+
[email protected]
+def test_api_root_no_versions(api_root):
+ set_api_root_response("""{
+ "title": "Malware Research Group",
+ "description": "A trust group setup for malware researchers",
+ "max_content_length": 9765625
+ }""")
+ with pytest.raises(ValidationError) as excinfo:
+ assert api_root._loaded_information is False
+ api_root.refresh_information()
+
+ assert "No 'versions' in API Root for request 'https://example.com/api1/'" == str(excinfo.value)
+
+
[email protected]
+def test_api_root_no_max_content_length(api_root):
+ set_api_root_response("""{
+ "title": "Malware Research Group",
+ "description": "A trust group setup for malware researchers",
+ "versions": ["taxii-2.0"]
+ }""")
+ with pytest.raises(ValidationError) as excinfo:
+ assert api_root._loaded_information is False
+ api_root.refresh_information()
+
+ assert "No 'max_content_length' in API Root for request 'https://example.com/api1/'" == str(excinfo.value)
+
+
@responses.activate
def test_api_root(api_root):
responses.add(responses.GET, API_ROOT_URL, API_ROOT_RESPONSE,
@@ -563,3 +668,93 @@ def test_taxii_endpoint_raises_exception():
_TAXIIEndpoint("https://example.com/api1/collections/", conn, "other", "test")
assert "A connection and user/password may not both be provided." in str(excinfo.value)
+
+
+def test_status_missing_id_property(status_dict):
+ with pytest.raises(ValidationError) as excinfo:
+ status_dict.pop("id")
+ Status("https://example.com/api1/status/12345678-1234-1234-1234-123456789012/",
+ user="foo", password="bar", verify=False, **status_dict)
+
+ assert "No 'id' in Status for request 'https://example.com/api1/status/12345678-1234-1234-1234-123456789012/'" == str(excinfo.value)
+
+
+def test_status_missing_status_property(status_dict):
+ with pytest.raises(ValidationError) as excinfo:
+ status_dict.pop("status")
+ Status("https://example.com/api1/status/12345678-1234-1234-1234-123456789012/",
+ user="foo", password="bar", verify=False, **status_dict)
+
+ assert "No 'status' in Status for request 'https://example.com/api1/status/12345678-1234-1234-1234-123456789012/'" == str(excinfo.value)
+
+
+def test_status_missing_total_count_property(status_dict):
+ with pytest.raises(ValidationError) as excinfo:
+ status_dict.pop("total_count")
+ Status("https://example.com/api1/status/12345678-1234-1234-1234-123456789012/",
+ user="foo", password="bar", verify=False, **status_dict)
+
+ assert "No 'total_count' in Status for request 'https://example.com/api1/status/12345678-1234-1234-1234-123456789012/'" == str(excinfo.value)
+
+
+def test_status_missing_success_count_property(status_dict):
+ with pytest.raises(ValidationError) as excinfo:
+ status_dict.pop("success_count")
+ Status("https://example.com/api1/status/12345678-1234-1234-1234-123456789012/",
+ user="foo", password="bar", verify=False, **status_dict)
+
+ assert "No 'success_count' in Status for request 'https://example.com/api1/status/12345678-1234-1234-1234-123456789012/'" == str(excinfo.value)
+
+
+def test_status_missing_failure_count_property(status_dict):
+ with pytest.raises(ValidationError) as excinfo:
+ status_dict.pop("failure_count")
+ Status("https://example.com/api1/status/12345678-1234-1234-1234-123456789012/",
+ user="foo", password="bar", verify=False, **status_dict)
+
+ assert "No 'failure_count' in Status for request 'https://example.com/api1/status/12345678-1234-1234-1234-123456789012/'" == str(excinfo.value)
+
+
+def test_status_missing_pending_count_property(status_dict):
+ with pytest.raises(ValidationError) as excinfo:
+ status_dict.pop("pending_count")
+ Status("https://example.com/api1/status/12345678-1234-1234-1234-123456789012/",
+ user="foo", password="bar", verify=False, **status_dict)
+
+ assert "No 'pending_count' in Status for request 'https://example.com/api1/status/12345678-1234-1234-1234-123456789012/'" == str(excinfo.value)
+
+
+def test_collection_missing_id_property(collection_dict):
+ with pytest.raises(ValidationError) as excinfo:
+ collection_dict.pop("id")
+ Collection("https://example.com/api1/collections/91a7b528-80eb-42ed-a74d-c6fbd5a26116/",
+ user="foo", password="bar", verify=False, **collection_dict)
+
+ assert "No 'id' in Collection for request 'https://example.com/api1/collections/91a7b528-80eb-42ed-a74d-c6fbd5a26116/'" == str(excinfo.value)
+
+
+def test_collection_missing_title_property(collection_dict):
+ with pytest.raises(ValidationError) as excinfo:
+ collection_dict.pop("title")
+ Collection("https://example.com/api1/collections/91a7b528-80eb-42ed-a74d-c6fbd5a26116/",
+ user="foo", password="bar", verify=False, **collection_dict)
+
+ assert "No 'title' in Collection for request 'https://example.com/api1/collections/91a7b528-80eb-42ed-a74d-c6fbd5a26116/'" == str(excinfo.value)
+
+
+def test_collection_missing_can_read_property(collection_dict):
+ with pytest.raises(ValidationError) as excinfo:
+ collection_dict.pop("can_read")
+ Collection("https://example.com/api1/collections/91a7b528-80eb-42ed-a74d-c6fbd5a26116/",
+ user="foo", password="bar", verify=False, **collection_dict)
+
+ assert "No 'can_read' in Collection for request 'https://example.com/api1/collections/91a7b528-80eb-42ed-a74d-c6fbd5a26116/'" == str(excinfo.value)
+
+
+def test_collection_missing_can_write_property(collection_dict):
+ with pytest.raises(ValidationError) as excinfo:
+ collection_dict.pop("can_write")
+ Collection("https://example.com/api1/collections/91a7b528-80eb-42ed-a74d-c6fbd5a26116/",
+ user="foo", password="bar", verify=False, **collection_dict)
+
+ assert "No 'can_write' in Collection for request 'https://example.com/api1/collections/91a7b528-80eb-42ed-a74d-c6fbd5a26116/'" == str(excinfo.value)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"responses"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
idna==3.10
iniconfig==2.1.0
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pyproject-api==1.9.0
pytest==8.3.5
pytest-cov==6.0.0
pytz==2025.2
PyYAML==6.0.2
requests==2.32.3
responses==0.25.7
six==1.17.0
-e git+https://github.com/oasis-open/cti-taxii-client.git@0201af14e578714a297aff795413babc425a2219#egg=taxii2_client
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
| name: cti-taxii-client
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytz==2025.2
- pyyaml==6.0.2
- requests==2.32.3
- responses==0.25.7
- six==1.17.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
prefix: /opt/conda/envs/cti-taxii-client
| [
"taxii2client/test/test_client.py::test_discovery_with_no_title",
"taxii2client/test/test_client.py::test_api_root_no_title",
"taxii2client/test/test_client.py::test_api_root_no_versions",
"taxii2client/test/test_client.py::test_api_root_no_max_content_length",
"taxii2client/test/test_client.py::test_status_missing_id_property",
"taxii2client/test/test_client.py::test_status_missing_status_property",
"taxii2client/test/test_client.py::test_status_missing_total_count_property",
"taxii2client/test/test_client.py::test_status_missing_success_count_property",
"taxii2client/test/test_client.py::test_status_missing_failure_count_property",
"taxii2client/test/test_client.py::test_status_missing_pending_count_property",
"taxii2client/test/test_client.py::test_collection_missing_id_property",
"taxii2client/test/test_client.py::test_collection_missing_title_property",
"taxii2client/test/test_client.py::test_collection_missing_can_read_property",
"taxii2client/test/test_client.py::test_collection_missing_can_write_property"
]
| []
| [
"taxii2client/test/test_client.py::test_server_discovery",
"taxii2client/test/test_client.py::test_minimal_discovery_response",
"taxii2client/test/test_client.py::test_discovery_with_no_default",
"taxii2client/test/test_client.py::test_api_root",
"taxii2client/test/test_client.py::test_api_root_collections",
"taxii2client/test/test_client.py::test_get_collection_by_id_exists",
"taxii2client/test/test_client.py::test_get_collection_by_id_not_present",
"taxii2client/test/test_client.py::test_collection",
"taxii2client/test/test_client.py::test_collection_unexpected_kwarg",
"taxii2client/test/test_client.py::test_get_collection_objects",
"taxii2client/test/test_client.py::test_get_object",
"taxii2client/test/test_client.py::test_cannot_write_to_readonly_collection",
"taxii2client/test/test_client.py::test_add_object_to_collection",
"taxii2client/test/test_client.py::test_add_object_to_collection_dict",
"taxii2client/test/test_client.py::test_add_object_rases_error_when_collection_id_does_not_match_url",
"taxii2client/test/test_client.py::test_cannot_read_from_writeonly_collection",
"taxii2client/test/test_client.py::test_get_manifest",
"taxii2client/test/test_client.py::test_get_status",
"taxii2client/test/test_client.py::test_content_type_valid",
"taxii2client/test/test_client.py::test_content_type_invalid",
"taxii2client/test/test_client.py::test_url_filter_type",
"taxii2client/test/test_client.py::test_filter_id",
"taxii2client/test/test_client.py::test_filter_version",
"taxii2client/test/test_client.py::test_filter_added_after",
"taxii2client/test/test_client.py::test_filter_combo",
"taxii2client/test/test_client.py::test_params_filter_unknown",
"taxii2client/test/test_client.py::test_taxii_endpoint_raises_exception"
]
| []
| BSD 3-Clause "New" or "Revised" License | 2,383 | [
"taxii2client/__init__.py"
]
| [
"taxii2client/__init__.py"
]
|
|
dwavesystems__dimod-171 | 2859c969a064fb2b7053919c8f1b50977a408511 | 2018-04-09 20:45:34 | 8ebfffa42319aa4850cfc5a1c99a8711eac44722 | diff --git a/dimod/binary_quadratic_model.py b/dimod/binary_quadratic_model.py
index 47a6c83b..80397f47 100644
--- a/dimod/binary_quadratic_model.py
+++ b/dimod/binary_quadratic_model.py
@@ -5,6 +5,7 @@ todo - describe Ising, QUBO and BQM
"""
from __future__ import absolute_import, division
+from collections import Sized, Container, Iterable
from numbers import Number
from six import itervalues, iteritems, iterkeys
@@ -15,7 +16,7 @@ from dimod.utilities import resolve_label_conflict
from dimod.vartypes import Vartype
-class BinaryQuadraticModel(object):
+class BinaryQuadraticModel(Sized, Container, Iterable):
"""Encodes a binary quadratic model.
Binary quadratic model is the superclass that contains the `Ising model`_ and the QUBO_.
@@ -60,12 +61,23 @@ class BinaryQuadraticModel(object):
class assume that they are numeric.
Examples:
- This example creates a model with three spin variables.
+ This example creates a binary quadratic model with three spin variables.
- >>> model = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5},
- ... {(0, 1): .5, (1, 2): 1.5},
- ... 1.4,
- ... dimod.SPIN)
+ >>> bqm = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5},
+ ... {(0, 1): .5, (1, 2): 1.5},
+ ... 1.4,
+ ... dimod.SPIN)
+
+ Variables can be any hashable object
+
+ >>> bqm = dimod.BinaryQuadraticModel({'a': 0.0, 'b': -1.0, 'c': 0.5},
+ ... {('a', 'b'): -1.0, ('b', 'c'): 1.5},
+ ... 1.4,
+ ... dimod.SPIN)
+ >>> len(bqm)
+ 3
+ >>> 'b' in bqm
+ True
Attributes:
linear (dict[variable, bias]):
@@ -195,7 +207,14 @@ class BinaryQuadraticModel(object):
def __len__(self):
"""The length is number of variables."""
- return len(self.linear)
+ return self.adj.__len__()
+
+ def __contains__(self, v):
+ """The variables"""
+ return self.adj.__contains__(v)
+
+ def __iter__(self):
+ return self.adj.__iter__()
##################################################################################################
# vartype properties
diff --git a/dimod/embedding/transforms.py b/dimod/embedding/transforms.py
index 332e0ec2..2b2d3b52 100644
--- a/dimod/embedding/transforms.py
+++ b/dimod/embedding/transforms.py
@@ -386,8 +386,11 @@ def unembed_response(target_response, embedding, source_bqm, chain_break_method=
chain_break_method (function, optional, default=:func:`.majority_vote`):
The method used to resolve chain breaks.
+ Returns:
+ :obj:`.Response`
+
"""
- if any(v not in source_bqm.linear for v in embedding):
+ if any(v not in embedding for v in source_bqm):
raise ValueError("given bqm does not match the embedding")
energies = []
| BinaryQuadraticModel should have a correct abstract base class
Should be `collections.abc.Sized` as currently implemented.
Also could be `collections.abc.Container` or even `collections.abc.Collection`. | dwavesystems/dimod | diff --git a/tests/test_binary_quadratic_model.py b/tests/test_binary_quadratic_model.py
index b8593fa0..72f9793d 100644
--- a/tests/test_binary_quadratic_model.py
+++ b/tests/test_binary_quadratic_model.py
@@ -191,6 +191,25 @@ class TestBinaryQuadraticModel(unittest.TestCase):
self.assertEqual(len(bqm), len(linear))
+ def test__contains__(self):
+ bqm = dimod.BinaryQuadraticModel({'a': -1}, {}, 0.0, dimod.SPIN)
+
+ self.assertIn('a', bqm)
+ self.assertNotIn('b', bqm)
+
+ bqm.add_interaction('a', 'b', .5)
+
+ self.assertIn('b', bqm)
+
+ def test__iter__(self):
+ bqm = dimod.BinaryQuadraticModel.empty(dimod.BINARY)
+
+ self.assertEqual(set(bqm), set())
+
+ bqm.add_interaction('a', 'b', -1)
+
+ self.assertEqual(set(bqm), {'a', 'b'})
+
def test_add_variable(self):
bqm = dimod.BinaryQuadraticModel({}, {('a', 'b'): -1}, 0.0, dimod.SPIN)
bqm.add_variable('a', .5)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | 0.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"mock",
"coverage",
"coveralls",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
coveralls==3.3.1
decorator==4.4.2
-e git+https://github.com/dwavesystems/dimod.git@2859c969a064fb2b7053919c8f1b50977a408511#egg=dimod
docopt==0.6.2
enum34==1.1.6
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
jsonschema==2.6.0
mock==5.2.0
networkx==2.5.1
numpy==1.11.3
packaging==21.3
pandas==0.22.0
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.27.1
six==1.11.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: dimod
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- coverage==6.2
- coveralls==3.3.1
- decorator==4.4.2
- docopt==0.6.2
- enum34==1.1.6
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jsonschema==2.6.0
- mock==5.2.0
- networkx==2.5.1
- numpy==1.11.3
- packaging==21.3
- pandas==0.22.0
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.27.1
- six==1.11.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/dimod
| [
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test__contains__",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test__iter__"
]
| []
| [
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test__eq__",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test__len__",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test__repr__",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_interaction",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_interaction_counterpart",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_interactions_from",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_offset",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_variable",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_variable_counterpart",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_variables_from",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_binary_property",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_binary_property_relabel",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_change_vartype",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_constract_variables",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_construction",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_construction_quadratic",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_construction_vartype",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_copy",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_fix_variable",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_flip_variable",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_partial_relabel_copy",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_partial_relabel_inplace",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_relabel_typical",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_relabel_typical_copy",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_relabel_typical_inplace",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_relabel_with_identity",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_relabel_with_overlap",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_remove_interaction",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_remove_interactions_from",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_remove_offset",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_remove_variable",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_remove_variables_from",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_scale",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_spin_property",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_spin_property_relabel",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_update",
"tests/test_binary_quadratic_model.py::TestConvert::test_empty",
"tests/test_binary_quadratic_model.py::TestConvert::test_from_ising",
"tests/test_binary_quadratic_model.py::TestConvert::test_from_numpy_matrix",
"tests/test_binary_quadratic_model.py::TestConvert::test_from_qubo",
"tests/test_binary_quadratic_model.py::TestConvert::test_functional_to_and_from_json",
"tests/test_binary_quadratic_model.py::TestConvert::test_functional_to_and_from_json_empty",
"tests/test_binary_quadratic_model.py::TestConvert::test_functional_to_and_from_json_with_info",
"tests/test_binary_quadratic_model.py::TestConvert::test_info",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_ising_binary_to_ising",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_ising_spin_to_ising",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_json_file",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_json_file_empty",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_json_string",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_json_string_empty",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_networkx_graph",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_numpy_matrix",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_pandas_dataframe",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_qubo_binary_to_qubo",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_qubo_spin_to_qubo"
]
| []
| Apache License 2.0 | 2,384 | [
"dimod/binary_quadratic_model.py",
"dimod/embedding/transforms.py"
]
| [
"dimod/binary_quadratic_model.py",
"dimod/embedding/transforms.py"
]
|
|
dwavesystems__dimod-174 | 7b75e47ce4fec541e432f84367ba58393934b941 | 2018-04-10 00:05:58 | 8ebfffa42319aa4850cfc5a1c99a8711eac44722 | diff --git a/dimod/response.py b/dimod/response.py
index d3b2c38d..59d96329 100644
--- a/dimod/response.py
+++ b/dimod/response.py
@@ -87,21 +87,24 @@ class Response(Iterable, Sized):
self._samples_matrix = samples_matrix
num_samples, num_variables = samples_matrix.shape
- if not isinstance(data_vectors, dict):
+ if not isinstance(data_vectors, Mapping):
raise TypeError("expected 'data_vectors' to be a dict")
if 'energy' not in data_vectors:
raise ValueError("energy must be provided")
else:
- data_vectors = data_vectors.copy() # shallow copy
- data_vectors['energy'] = np.asarray(data_vectors['energy'])
- for vector in data_vectors.values():
- # todo - check that is a vector and that has the right length
- if isinstance(vector, (np.ndarray, list)):
- if len(vector) != num_samples:
- raise ValueError(("expected data vector {} (length {}) to be a vector of length {}"
- "").format(vector, len(vector), num_samples))
- else:
- raise TypeError("expected data vector {} to be a list of NumPy array".format(vector))
+ data_vectors = dict(data_vectors) # shallow copy
+
+ for key, vector in iteritems(data_vectors):
+ try:
+ data_vectors[key] = vector = np.asarray(vector)
+ except (ValueError, TypeError):
+ raise TypeError("expected data vector {} to be array-like".format(key))
+
+ shape = vector.shape
+ if not shape or shape[0] != num_samples:
+ raise ValueError(("expected data vector {} (shape {}) to have {} rows"
+ "").format(key, vector.shape, num_samples))
+
self._data_vectors = data_vectors
# vartype is checked by the decorator
@@ -824,10 +827,13 @@ class Response(Iterable, Sized):
# Viewing a Response
###############################################################################################
- def samples(self, sorted_by='energy'):
+ def samples(self, n=None, sorted_by='energy'):
"""Iterate over the samples in the response.
Args:
+ n (int, optional, default=None):
+ The maximum number of samples to provide. If None, all are provided.
+
sorted_by (str/None, optional, default='energy'):
Selects the `data_vector` used to sort the samples. If None, the samples are yielded in
the order given by the samples matrix.
@@ -861,13 +867,21 @@ class Response(Iterable, Sized):
{'a': -1, 'b': 1}
"""
+ num_samples = len(self)
+
+ if n is not None:
+ for sample in itertools.islice(self.samples(n=None, sorted_by=sorted_by), n):
+ yield sample
+ return
+
if sorted_by is None:
- order = np.arange(len(self))
+ order = np.arange(num_samples)
else:
order = np.argsort(self.data_vectors[sorted_by])
samples = self.samples_matrix
label_mapping = self.label_to_idx
+
for idx in order:
yield SampleView(idx, self)
| data_vectors should have either numpy array values, or a list values, but not both.
https://github.com/dwavesystems/dimod/blob/7b75e47ce4fec541e432f84367ba58393934b941/dimod/response.py#L40
Making it be more than one thing requires the parser of the response to inspect the object before using it. If we want to have the benefits of a numpy array for some of the data_vectors, I think it's worth it to make everything a numpy array
If we don't need it to be a numpy array, might as well make them all lists? | dwavesystems/dimod | diff --git a/tests/test_response.py b/tests/test_response.py
index 73c1091e..6c5f827e 100644
--- a/tests/test_response.py
+++ b/tests/test_response.py
@@ -54,7 +54,7 @@ class TestResponse(unittest.TestCase):
npt.assert_equal(samples_matrix, response.samples_matrix)
npt.assert_allclose(energies, response.data_vectors['energy'])
- def test_data_vector_copy(self):
+ def test_data_vectors_copy(self):
samples_matrix = np.matrix([[0, 1, 0, 1],
[1, 0, 1, 0],
[0, 0, 0, 0],
@@ -66,6 +66,80 @@ class TestResponse(unittest.TestCase):
self.assertIsNot(response.data_vectors, data_vectors)
+ def test_data_vectors_are_arrays(self):
+ samples_matrix = np.matrix([[0, 1, 0, 1],
+ [1, 0, 1, 0],
+ [0, 0, 0, 0],
+ [1, 1, 1, 1]])
+ energies = [2, 2, 0, 4]
+ num_occurrences = [1, 1, 2, 1]
+ objects = [object() for __ in range(4)]
+
+ data_vectors = {'energy': energies, 'occurences': num_occurrences, 'objects': objects}
+
+ response = dimod.Response(samples_matrix, data_vectors, dimod.BINARY)
+
+ self.assertEqual(len(response.data_vectors), 3)
+
+ for key in data_vectors:
+ self.assertIn(key, response.data_vectors)
+
+ vector = response.data_vectors[key]
+
+ self.assertIsInstance(vector, np.ndarray)
+
+ self.assertEqual(vector.shape, (4,))
+
+ def test_data_vectors_wrong_length(self):
+ samples_matrix = np.matrix([[0, 1, 0, 1],
+ [1, 0, 1, 0],
+ [0, 0, 0, 0],
+ [1, 1, 1, 1]])
+ energies = [2, 2, 0, 4]
+ num_occurrences = [1, 1, 2, 1, 1]
+ objects = [object() for __ in range(4)]
+
+ data_vectors = {'energy': energies, 'occurences': num_occurrences, 'objects': objects}
+
+ with self.assertRaises(ValueError):
+ response = dimod.Response(samples_matrix, data_vectors, dimod.BINARY)
+
+ def test_data_vectors_not_array_like(self):
+ samples_matrix = np.matrix([[0, 1, 0, 1],
+ [1, 0, 1, 0],
+ [0, 0, 0, 0],
+ [1, 1, 1, 1]])
+ energies = [2, 2, 0, 4]
+ num_occurrences = 'hi there'
+ objects = [object() for __ in range(4)]
+
+ data_vectors = {'energy': energies, 'occurences': num_occurrences, 'objects': objects}
+
+ with self.assertRaises(ValueError):
+ response = dimod.Response(samples_matrix, data_vectors, dimod.BINARY)
+
+ def test_samples_num_limited(self):
+ samples_matrix = np.matrix([[0, 1, 0, 1],
+ [1, 0, 1, 0],
+ [0, 0, 0, 0],
+ [1, 1, 1, 1]])
+ energies = [2, 2, 0, 4]
+ num_occurrences = [1, 1, 2, 1]
+ objects = [object() for __ in range(4)]
+
+ data_vectors = {'energy': energies, 'occurences': num_occurrences, 'objects': objects}
+
+ response = dimod.Response(samples_matrix, data_vectors, dimod.BINARY)
+
+ samples_list = list(response.samples())
+
+ self.assertEqual(len(samples_list), 4)
+
+ shortened_samples_list = list(response.samples(3))
+
+ self.assertEqual(len(shortened_samples_list), 3)
+ self.assertEqual(shortened_samples_list, samples_list[0:3])
+
def test_instantiation_without_energy(self):
samples_matrix = np.matrix([[0, 1, 0, 1],
[1, 0, 1, 0],
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt",
"tests/requirements.txt",
"docs/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
coveralls==3.3.1
decorator==5.1.1
-e git+https://github.com/dwavesystems/dimod.git@7b75e47ce4fec541e432f84367ba58393934b941#egg=dimod
docopt==0.6.2
docutils==0.18.1
enum34==1.1.6
execnet==1.9.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
jsonschema==2.6.0
MarkupSafe==2.0.1
mock==2.0.0
networkx==2.0
numpy==1.11.3
packaging==21.3
pandas==0.22.0
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.27.1
six==1.11.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: dimod
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- coveralls==3.3.1
- decorator==5.1.1
- docopt==0.6.2
- docutils==0.18.1
- enum34==1.1.6
- execnet==1.9.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- jsonschema==2.6.0
- markupsafe==2.0.1
- mock==2.0.0
- networkx==2.0
- numpy==1.11.3
- packaging==21.3
- pandas==0.22.0
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.27.1
- six==1.11.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/dimod
| [
"tests/test_response.py::TestResponse::test_data_vectors_are_arrays",
"tests/test_response.py::TestResponse::test_data_vectors_not_array_like",
"tests/test_response.py::TestResponse::test_samples_num_limited"
]
| []
| [
"tests/test_response.py::TestResponse::test__iter__",
"tests/test_response.py::TestResponse::test_change_vartype_copy",
"tests/test_response.py::TestResponse::test_change_vartype_inplace",
"tests/test_response.py::TestResponse::test_data_docstrings",
"tests/test_response.py::TestResponse::test_data_vectors_copy",
"tests/test_response.py::TestResponse::test_data_vectors_wrong_length",
"tests/test_response.py::TestResponse::test_empty",
"tests/test_response.py::TestResponse::test_from_dicts",
"tests/test_response.py::TestResponse::test_from_dicts_unlike_labels",
"tests/test_response.py::TestResponse::test_from_dicts_unsortable_labels",
"tests/test_response.py::TestResponse::test_from_futures",
"tests/test_response.py::TestResponse::test_from_futures_column_subset",
"tests/test_response.py::TestResponse::test_from_futures_extra_keys",
"tests/test_response.py::TestResponse::test_from_futures_typical",
"tests/test_response.py::TestResponse::test_from_matrix",
"tests/test_response.py::TestResponse::test_from_pandas",
"tests/test_response.py::TestResponse::test_infer_vartype",
"tests/test_response.py::TestResponse::test_instantiation",
"tests/test_response.py::TestResponse::test_instantiation_without_energy",
"tests/test_response.py::TestResponse::test_partial_relabel",
"tests/test_response.py::TestResponse::test_partial_relabel_inplace",
"tests/test_response.py::TestResponse::test_relabel_copy",
"tests/test_response.py::TestResponse::test_relabel_docstring",
"tests/test_response.py::TestResponse::test_update",
"tests/test_response.py::TestResponse::test_update_energy"
]
| []
| Apache License 2.0 | 2,385 | [
"dimod/response.py"
]
| [
"dimod/response.py"
]
|
|
google__mobly-432 | 02b9d84acfe775a6fe73e2b960ba7e47765184d6 | 2018-04-10 03:22:26 | 95286a01a566e056d44acfa9577a45bc7f37f51d | xpconanfan: Is there any test we can add for this?
winterfroststrom: I did write a unit test for this, but I originally decided against including it because it's kinda specific.
---
Review status: 0 of 2 files reviewed at latest revision, all discussions resolved.
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/432#-:-L9l4tLx5kob-0O5JS3w:bgipfst)*
<!-- Sent from Reviewable.io -->
| diff --git a/mobly/controllers/android_device.py b/mobly/controllers/android_device.py
index f1a4636..14828a4 100644
--- a/mobly/controllers/android_device.py
+++ b/mobly/controllers/android_device.py
@@ -436,9 +436,8 @@ class AndroidDevice(object):
self._log_path = os.path.join(self._log_path_base,
'AndroidDevice%s' % self._serial)
self._debug_tag = self._serial
- self.log = AndroidDeviceLoggerAdapter(logging.getLogger(), {
- 'tag': self.debug_tag
- })
+ self.log = AndroidDeviceLoggerAdapter(logging.getLogger(),
+ {'tag': self.debug_tag})
self.sl4a = None
self.ed = None
self._adb_logcat_process = None
@@ -937,6 +936,7 @@ class AndroidDevice(object):
f_name = os.path.basename(self.adb_logcat_file_path)
out_name = f_name.replace('adblog,', '').replace('.txt', '')
out_name = ',%s,%s.txt' % (begin_time, out_name)
+ out_name = out_name.replace(':', '-')
tag_len = utils.MAX_FILENAME_LEN - len(out_name)
tag = tag[:tag_len]
out_name = tag + out_name
diff --git a/mobly/controllers/android_device_lib/jsonrpc_client_base.py b/mobly/controllers/android_device_lib/jsonrpc_client_base.py
index 07dc50d..dd32769 100644
--- a/mobly/controllers/android_device_lib/jsonrpc_client_base.py
+++ b/mobly/controllers/android_device_lib/jsonrpc_client_base.py
@@ -316,6 +316,17 @@ class JsonRpcClientBase(object):
ad=self._ad)
return result['result']
+ def disable_hidden_api_blacklist(self):
+ """If necessary and possible, disables hidden api blacklist."""
+ version_codename = self._ad.adb.getprop('ro.build.version.codename')
+ sdk_version = int(self._ad.adb.getprop('ro.build.version.sdk'))
+ # we check version_codename in addition to sdk_version because P builds
+ # in development report sdk_version 27, but still enforce the blacklist.
+ if self._ad.is_rootable and (sdk_version >= 28 or
+ version_codename == 'P'):
+ self._ad.adb.shell(
+ 'settings put global hidden_api_blacklist_exemptions "*"')
+
def __getattr__(self, name):
"""Wrapper for python magic to turn method calls into RPC calls."""
diff --git a/mobly/controllers/android_device_lib/sl4a_client.py b/mobly/controllers/android_device_lib/sl4a_client.py
index 4b5396a..1222925 100644
--- a/mobly/controllers/android_device_lib/sl4a_client.py
+++ b/mobly/controllers/android_device_lib/sl4a_client.py
@@ -61,6 +61,7 @@ class Sl4aClient(jsonrpc_client_base.JsonRpcClientBase):
raise jsonrpc_client_base.AppStartError(
self._ad, '%s is not installed on %s' % (_APP_NAME,
self._adb.serial))
+ self.disable_hidden_api_blacklist()
# sl4a has problems connecting after disconnection, so kill the apk and
# try connecting again.
diff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py
index 1f7ec0d..7f4893f 100644
--- a/mobly/controllers/android_device_lib/snippet_client.py
+++ b/mobly/controllers/android_device_lib/snippet_client.py
@@ -90,6 +90,7 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
def start_app_and_connect(self):
"""Overrides superclass. Launches a snippet app and connects to it."""
self._check_app_installed()
+ self.disable_hidden_api_blacklist()
persists_shell_cmd = self._get_persist_command()
# Use info here so people can follow along with the snippet startup
| Instrumentation parser output is concatenated in python3.
Python3 outputs newline characters as literals for byte strings.
This causes an issue because `run_instrumentation_test` outputs the raw instrumentation output, which ends up being concatenated since the newlines doesn't properly display with python3.
When fixing https://github.com/google/mobly/issues/429, I did not actually run against a real device with python3 (only ran unit tests with python 2 and 3), which meant that I did not actually make sure the output looked correct. | google/mobly | diff --git a/mobly/base_instrumentation_test.py b/mobly/base_instrumentation_test.py
index 4966cd4..bb72075 100644
--- a/mobly/base_instrumentation_test.py
+++ b/mobly/base_instrumentation_test.py
@@ -927,7 +927,7 @@ class BaseInstrumentationTestClass(base_test.BaseTestClass):
package=package,
options=options,
runner=runner,
- )
+ ).decode('utf-8')
logging.info('Outputting instrumentation test log...')
logging.info(instrumentation_output)
@@ -935,5 +935,5 @@ class BaseInstrumentationTestClass(base_test.BaseTestClass):
instrumentation_block = _InstrumentationBlock(prefix=prefix)
for line in instrumentation_output.splitlines():
instrumentation_block = self._parse_line(instrumentation_block,
- line.decode('utf-8'))
+ line)
return self._finish_parsing(instrumentation_block)
diff --git a/tests/mobly/base_instrumentation_test_test.py b/tests/mobly/base_instrumentation_test_test.py
index 2256475..3908015 100755
--- a/tests/mobly/base_instrumentation_test_test.py
+++ b/tests/mobly/base_instrumentation_test_test.py
@@ -34,6 +34,17 @@ MOCK_PREFIX = 'my_prefix'
# A mock name for the instrumentation test subclass.
MOCK_INSTRUMENTATION_TEST_CLASS_NAME = 'MockInstrumentationTest'
+MOCK_EMPTY_INSTRUMENTATION_TEST = """\
+INSTRUMENTATION_RESULT: stream=
+
+Time: 0.001
+
+OK (0 tests)
+
+
+INSTRUMENTATION_CODE: -1
+"""
+
class MockInstrumentationTest(BaseInstrumentationTestClass):
def __init__(self, tmp_dir, user_params={}):
@@ -229,18 +240,21 @@ INSTRUMENTATION_STATUS_CODE: -1
instrumentation_output, expected_has_error=True)
def test_run_instrumentation_test_with_no_tests(self):
- instrumentation_output = """\
-INSTRUMENTATION_RESULT: stream=
-
-Time: 0.001
-
-OK (0 tests)
-
+ instrumentation_output = MOCK_EMPTY_INSTRUMENTATION_TEST
+ self.assert_run_instrumentation_test(
+ instrumentation_output, expected_completed_and_passed=True)
-INSTRUMENTATION_CODE: -1
-"""
+ @unittest.skipUnless(
+ sys.version_info >= (3, 0),
+ 'Only python3 displays different string types differently.')
+ @mock.patch('logging.info')
+ def test_run_instrumentation_test_logs_correctly(self, mock_info_logger):
+ instrumentation_output = MOCK_EMPTY_INSTRUMENTATION_TEST
self.assert_run_instrumentation_test(
instrumentation_output, expected_completed_and_passed=True)
+ for mock_call in mock_info_logger.mock_calls:
+ logged_format = mock_call[1][0]
+ self.assertIsInstance(logged_format, str)
def test_run_instrumentation_test_with_passing_test(self):
instrumentation_output = """\
diff --git a/tests/mobly/controllers/android_device_lib/sl4a_client_test.py b/tests/mobly/controllers/android_device_lib/sl4a_client_test.py
index e75bd53..bc06220 100755
--- a/tests/mobly/controllers/android_device_lib/sl4a_client_test.py
+++ b/tests/mobly/controllers/android_device_lib/sl4a_client_test.py
@@ -34,6 +34,12 @@ class MockAdbProxy(object):
return bytes('', 'utf-8')
return bytes('package:com.googlecode.android_scripting', 'utf-8')
+ def getprop(self, params):
+ if params == 'ro.build.version.codename':
+ return 'Z'
+ elif params == 'ro.build.version.sdk':
+ return '28'
+
def __getattr__(self, name):
"""All calls to the none-existent functions in adb proxy would
simply return the adb command string.
diff --git a/tests/mobly/controllers/android_device_lib/snippet_client_test.py b/tests/mobly/controllers/android_device_lib/snippet_client_test.py
index 28dc3d8..2c875d8 100755
--- a/tests/mobly/controllers/android_device_lib/snippet_client_test.py
+++ b/tests/mobly/controllers/android_device_lib/snippet_client_test.py
@@ -55,6 +55,12 @@ class MockAdbProxy(object):
elif 'which' in params:
return b''
+ def getprop(self, params):
+ if params == 'ro.build.version.codename':
+ return 'Z'
+ elif params == 'ro.build.version.sdk':
+ return '28'
+
def __getattr__(self, name):
"""All calls to the none-existent functions in adb proxy would
simply return the adb command string.
diff --git a/tests/mobly/controllers/android_device_test.py b/tests/mobly/controllers/android_device_test.py
index b1428ec..f175f17 100755
--- a/tests/mobly/controllers/android_device_test.py
+++ b/tests/mobly/controllers/android_device_test.py
@@ -630,7 +630,7 @@ class AndroidDeviceTest(unittest.TestCase):
ad.cat_adb_log('some_test', MOCK_ADB_LOGCAT_BEGIN_TIME)
cat_file_path = os.path.join(
ad.log_path, 'AdbLogExcerpts',
- ('some_test,02-29 14:02:20.123,%s,%s.txt') % (ad.model, ad.serial))
+ ('some_test,02-29 14-02-20.123,%s,%s.txt') % (ad.model, ad.serial))
with open(cat_file_path, 'r') as f:
actual_cat = f.read()
self.assertEqual(actual_cat, ''.join(MOCK_ADB_LOGCAT_CAT_RESULT))
diff --git a/tests/mobly/controllers/monsoon_test.py b/tests/mobly/controllers/monsoon_test.py
index a726049..c8b6eeb 100755
--- a/tests/mobly/controllers/monsoon_test.py
+++ b/tests/mobly/controllers/monsoon_test.py
@@ -12,12 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import platform
+
from future.tests.base import unittest
class MonsoonTest(unittest.TestCase):
-
+ @unittest.skipIf(platform.system() == 'Windows',
+ 'fcntl does not exist on Windows')
def test_monsoon_import(self):
+ # TODO: Replace 'fnctl' with a Windows equivalent when on Windows
from mobly.controllers import monsoon
diff --git a/tests/mobly/logger_test.py b/tests/mobly/logger_test.py
index b1cf839..1ac9f1d 100755
--- a/tests/mobly/logger_test.py
+++ b/tests/mobly/logger_test.py
@@ -1,11 +1,11 @@
# Copyright 2016 Google Inc.
-#
+#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
diff --git a/tests/mobly/output_test.py b/tests/mobly/output_test.py
index 9655f4c..ff5cae8 100755
--- a/tests/mobly/output_test.py
+++ b/tests/mobly/output_test.py
@@ -15,6 +15,7 @@
import logging
import mock
import os
+import platform
import shutil
import tempfile
import unittest
@@ -28,6 +29,10 @@ from tests.lib import mock_controller
from tests.lib import integration_test
from tests.lib import teardown_class_failure_test
+if platform.system() == 'Windows':
+ import win32file
+ from win32com import client
+
class OutputTest(unittest.TestCase):
"""This test class has unit tests for the implementation of Mobly's output
@@ -87,6 +92,39 @@ class OutputTest(unittest.TestCase):
for item in blacklist:
self.assertNotIn(item, content)
+ @unittest.skipIf(platform.system() == 'Windows',
+ 'Symlinks are usually specific to Unix operating systems')
+ def test_symlink(self):
+ """Verifies the symlink is created and links properly."""
+ mock_test_config = self.create_mock_test_config(
+ self.base_mock_test_config)
+ tr = test_runner.TestRunner(self.log_dir, self.test_bed_name)
+ tr.setup_logger()
+ symlink = os.path.join(self.log_dir, self.test_bed_name, 'latest')
+ self.assertEqual(os.readlink(symlink), logging.log_path)
+
+ @unittest.skipIf(platform.system() != 'Windows',
+ 'Shortcuts are specific to Windows operating systems')
+ def test_shortcut(self):
+ """Verifies the shortcut is created and links properly."""
+ shortcut_path = os.path.join(self.log_dir, self.test_bed_name,
+ 'latest.lnk')
+ shell = client.Dispatch("WScript.Shell")
+ shortcut = shell.CreateShortCut(shortcut_path)
+ self.assertFalse(shortcut.Targetpath)
+ mock_test_config = self.create_mock_test_config(
+ self.base_mock_test_config)
+ tr = test_runner.TestRunner(self.log_dir, self.test_bed_name)
+ tr.setup_logger()
+ tr._teardown_logger()
+ shortcut = shell.CreateShortCut(shortcut_path)
+ # Normalize paths for case and truncation
+ normalized_shortcut_path = os.path.normcase(
+ win32file.GetLongPathName(shortcut.Targetpath))
+ normalized_logger_path = os.path.normcase(
+ win32file.GetLongPathName(logging.log_path))
+ self.assertEqual(normalized_shortcut_path, normalized_logger_path)
+
def test_setup_logger_before_run(self):
"""Verifies the expected output files from a test run.
@@ -103,7 +141,7 @@ class OutputTest(unittest.TestCase):
logging.debug(debug_uuid)
tr.add_test_class(mock_test_config, integration_test.IntegrationTest)
tr.run()
- output_dir = os.path.join(self.log_dir, self.test_bed_name, 'latest')
+ output_dir = logging.log_path
(summary_file_path, debug_log_path,
info_log_path) = self.assert_output_logs_exist(output_dir)
self.assert_log_contents(
@@ -190,7 +228,7 @@ class OutputTest(unittest.TestCase):
tr = test_runner.TestRunner(self.log_dir, self.test_bed_name)
tr.add_test_class(mock_test_config, integration_test.IntegrationTest)
tr.run()
- output_dir = os.path.join(self.log_dir, self.test_bed_name, 'latest')
+ output_dir = logging.log_path
(summary_file_path, debug_log_path,
info_log_path) = self.assert_output_logs_exist(output_dir)
summary_entries = []
@@ -211,7 +249,7 @@ class OutputTest(unittest.TestCase):
tr.add_test_class(mock_test_config,
teardown_class_failure_test.TearDownClassFailureTest)
tr.run()
- output_dir = os.path.join(self.log_dir, self.test_bed_name, 'latest')
+ output_dir = logging.log_path
summary_file_path = os.path.join(output_dir,
records.OUTPUT_FILE_SUMMARY)
found = False
diff --git a/tests/mobly/test_runner_test.py b/tests/mobly/test_runner_test.py
index 35eeded..ab26971 100755
--- a/tests/mobly/test_runner_test.py
+++ b/tests/mobly/test_runner_test.py
@@ -12,8 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import logging
import mock
import os
+import platform
+import re
import shutil
import tempfile
import yaml
@@ -201,8 +204,7 @@ class TestRunnerTest(unittest.TestCase):
tr = test_runner.TestRunner(self.log_dir, self.test_bed_name)
tr.add_test_class(mock_test_config, integration_test.IntegrationTest)
tr.run()
- summary_path = os.path.join(mock_test_config.log_path,
- mock_test_config.test_bed_name, 'latest',
+ summary_path = os.path.join(logging.log_path,
records.OUTPUT_FILE_SUMMARY)
with open(summary_path, 'r') as f:
summary_entries = list(yaml.load_all(f))
@@ -297,7 +299,7 @@ class TestRunnerTest(unittest.TestCase):
test_runner.Error,
'TestRunner\'s log folder is "/different/log/dir", but a test '
r'config with a different log folder \("%s"\) was added.' %
- self.log_dir):
+ re.escape(self.log_dir)):
tr.add_test_class(self.base_mock_test_config,
integration_test.IntegrationTest)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 4
} | 1.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
future==1.0.0
iniconfig==2.1.0
-e git+https://github.com/google/mobly.git@02b9d84acfe775a6fe73e2b960ba7e47765184d6#egg=mobly
mock==1.0.1
packaging==24.2
pluggy==1.5.0
portpicker==1.6.0
psutil==7.0.0
pyserial==3.5
pytest==8.3.5
pytz==2025.2
PyYAML==6.0.2
timeout-decorator==0.5.0
tomli==2.2.1
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- future==1.0.0
- iniconfig==2.1.0
- mock==1.0.1
- packaging==24.2
- pluggy==1.5.0
- portpicker==1.6.0
- psutil==7.0.0
- pyserial==3.5
- pytest==8.3.5
- pytz==2025.2
- pyyaml==6.0.2
- timeout-decorator==0.5.0
- tomli==2.2.1
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_cat_adb_log"
]
| [
"tests/mobly/output_test.py::OutputTest::test_basic_output",
"tests/mobly/output_test.py::OutputTest::test_teardown_class_output",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_summary_file_entries"
]
| [
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test__Instrumentation_block_set_key_on_multiple_equals_sign",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_parse_instrumentation_options_with_mixed_user_params",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_parse_instrumentation_options_with_no_instrumentation_params",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_parse_instrumentation_options_with_no_user_params",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_parse_instrumentation_options_with_only_instrumentation_params",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_logs_correctly",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_assumption_failure_test",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_crashed_test",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_crashing_test",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_failing_test",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_ignored_test",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_invalid_syntax",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_missing_runner",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_missing_test_package",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_multiple_tests",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_no_output",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_no_tests",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_passing_test",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_prefix_test",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_random_whitespace",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_runner_setup_crash",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_runner_teardown_crash",
"tests/mobly/controllers/android_device_lib/sl4a_client_test.py::Sl4aClientTest::test_app_not_installed",
"tests/mobly/controllers/android_device_lib/sl4a_client_test.py::Sl4aClientTest::test_start_app_and_connect",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_app_not_installed",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_not_instrumented",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_target_not_installed",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_normal",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_restore_event_client",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_header_junk",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_no_valid_line",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_persistent_session",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_unknown_protocol",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_crash",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_event_client",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice__enable_logpersist_with_logpersist",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice__enable_logpersist_with_missing_all_logpersist",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice__enable_logpersist_with_missing_logpersist_start",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice__enable_logpersist_with_missing_logpersist_stop",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_build_info",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path_no_log_exists",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path_with_existing_file",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path_with_service",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_debug_tag",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_device_info",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_instantiation",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_attribute_name",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_package",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_snippet_name",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_fail_cleanup_also_fail",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_failure",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_precheck_failure",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_start_app_fails",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_serial_is_valid",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_snippet_cleanup",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_fail",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_fallback",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_with_destination",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_logcat",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_logcat_with_user_param",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_update_serial",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_update_serial_with_service_running",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_dict_list",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_empty_config",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_no_valid_config",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_not_list_config",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_pickup_all",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_string_list",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_usb_id",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_no_match",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_success_with_serial",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_success_with_serial_and_extra_field",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_too_many_matches",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_devices_no_match",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_devices_success_with_extra_field",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_start_services_on_ads",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_start_services_on_ads_skip_logcat",
"tests/mobly/controllers/monsoon_test.py::MonsoonTest::test_monsoon_import",
"tests/mobly/logger_test.py::LoggerTest::test_epoch_to_log_line_timestamp",
"tests/mobly/output_test.py::OutputTest::test_run_twice_for_two_sets_of_logs",
"tests/mobly/output_test.py::OutputTest::test_setup_logger_before_run",
"tests/mobly/output_test.py::OutputTest::test_symlink",
"tests/mobly/output_test.py::OutputTest::test_teardown_erases_logs",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_add_test_class_mismatched_log_path",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_add_test_class_mismatched_test_bed_name",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_main_parse_args",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_register_controller_change_return_value",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_register_controller_dup_register",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_register_controller_less_than_min_number",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_register_controller_no_config",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_register_controller_no_config_no_register",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_register_controller_no_get_info",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_register_controller_return_value",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_run_no_tests",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_run_twice",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_run_two_test_classes",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_run_two_test_classes_different_configs",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_run_with_abort_all",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_teardown_logger_before_setup_logger",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_verify_controller_module",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_verify_controller_module_missing_attr",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_verify_controller_module_null_attr"
]
| []
| Apache License 2.0 | 2,386 | [
"mobly/controllers/android_device.py",
"mobly/controllers/android_device_lib/sl4a_client.py",
"mobly/controllers/android_device_lib/jsonrpc_client_base.py",
"mobly/controllers/android_device_lib/snippet_client.py"
]
| [
"mobly/controllers/android_device.py",
"mobly/controllers/android_device_lib/sl4a_client.py",
"mobly/controllers/android_device_lib/jsonrpc_client_base.py",
"mobly/controllers/android_device_lib/snippet_client.py"
]
|
uisautomation__sms2jwplayer-16 | 924d6ab5dd9b7776a68198fa0eee744990fc8c6e | 2018-04-10 12:52:39 | 924d6ab5dd9b7776a68198fa0eee744990fc8c6e | diff --git a/doc/intro.rst b/doc/intro.rst
index 9013aa7..2ed3ce1 100644
--- a/doc/intro.rst
+++ b/doc/intro.rst
@@ -33,6 +33,7 @@ installed via the ``pip`` command:
$ git clone $REPO sms2jwplayer
$ cd sms2jwplayer
+ $ pip install -r requirements.txt # for specific package versions
$ pip install .
Where ``$REPO`` is replaced with the location of the ``sms2jwplayer``
diff --git a/doc/reference.rst b/doc/reference.rst
index e9391a4..8f0a1e9 100644
--- a/doc/reference.rst
+++ b/doc/reference.rst
@@ -45,3 +45,9 @@ Extracting video view stats
.. automodule:: sms2jwplayer.analytics
:members:
+
+Tidy database
+-------------
+
+.. automodule:: sms2jwplayer.tidy
+ :members:
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..33f16be
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,2 @@
+# Special version of jwplatform with bug fixes
+git+https://github.com/uisautomation/third-party-jwplatform-py#egg=jwplatform
diff --git a/sms2jwplayer/__init__.py b/sms2jwplayer/__init__.py
index 30bc537..18564b4 100644
--- a/sms2jwplayer/__init__.py
+++ b/sms2jwplayer/__init__.py
@@ -7,9 +7,10 @@ Usage:
[--output=FILE] [--limit=NUMBER] [--offset=NUMBER] <csv>
sms2jwplayer fetch [--verbose] [--base-name=NAME]
sms2jwplayer genupdatejob [--verbose] [--strip-leading=N]
- [--output=FILE] <csv> <metadata>...
- sms2jwplayer applyupdatejob [--verbose] [<update>]
+ [--output=FILE] --base=URL --base-image-url=URL <csv> <metadata>...
+ sms2jwplayer applyupdatejob [--verbose] [--log-file=FILE] [<update>]
sms2jwplayer analytics [--output=FILE] [--verbose] <date>
+ sms2jwplayer tidy [--output=FILE] [--verbose] <metadata>...
Options:
-h, --help Show a brief usage summary.
@@ -42,7 +43,9 @@ Sub commands:
genmrss Generate an MRSS feed for the export.
fetch Fetch details on all videos in jwplayer.
genupdatejob Generate list of missing metadata for each video key.
+ applyupdatejob Use JWPlatform API to update videos based on a job description file.
analytics Generate SMS analytics for a given day.
+ tidy Generate an update job which tidies the jwplayer database.
"""
import logging
@@ -67,3 +70,6 @@ def main():
elif opts['analytics']:
from . import analytics
analytics.main(opts)
+ elif opts['tidy']:
+ from . import tidy
+ tidy.main(opts)
diff --git a/sms2jwplayer/applyupdatejob.py b/sms2jwplayer/applyupdatejob.py
index 294d889..bcd4403 100644
--- a/sms2jwplayer/applyupdatejob.py
+++ b/sms2jwplayer/applyupdatejob.py
@@ -4,19 +4,44 @@ jwplayer API are performed with exponential backoff.
Takes as input a JSON document with the following schema.
-.. code:: json
+.. code:: js
{
- "updates": [
- ...
- {
- "key": <jwplayer key>,
- "custom": {
- <dictionary of custom properties to set>
- }
- },
- ...
- ],
+ "create": List<Create>,
+ "update": List<Update>
+ }
+
+The Create object specifies a list of JWPlatform resources which should be created:
+
+.. code:: js
+
+ {
+ "type": "videos", // or "thumbnails", etc
+ "resource: {
+ // dictionary of resource properties
+ }
+ }
+
+The Update object specifies a list of JWPlatform resources which need to be updated:
+
+.. code:: js
+
+ {
+ "type": "videos", // or "thumbnails", etc
+ "resource": {
+ // dictionary of properties to update
+ }
+ }
+
+The Delete object specifies a list of JWPlatform resources which need to be deleted:
+
+.. code:: js
+
+ {
+ "type": "videos", // or "thumbnails", etc
+ "resource": {
+ // dictionary of parameters to delete request
+ }
}
"""
@@ -25,11 +50,10 @@ import logging
import sys
import time
-import dateutil.parser
import tqdm
from jwplatform.errors import JWPlatformRateLimitExceededError
-from .util import input_stream, get_jwplatform_client, JWPlatformClientError
+from . import util
LOG = logging.getLogger('applyupdatejob')
@@ -40,46 +64,136 @@ MAX_ATTEMPTS = 10
def main(opts):
try:
- client = get_jwplatform_client()
- except JWPlatformClientError as e:
+ client = util.get_jwplatform_client()
+ except util.JWPlatformClientError as e:
LOG.error('jwplatform error: %s', e)
sys.exit(1)
- with input_stream(opts, '<update>') as f:
- updates = json.load(f).get('updates', [])
+ with util.input_stream(opts, '<update>') as f:
+ jobs = json.load(f)
- LOG.info('Update jobs to process: %s', len(updates))
+ updates, creates, deletes = [jobs.get(k, []) for k in ['update', 'create', 'delete']]
+
+ LOG.info('Number of update jobs to process: %s', len(updates))
+ LOG.info('Number of create jobs to process: %s', len(creates))
+ LOG.info('Number of delete jobs to process: %s', len(deletes))
# If verbose flag is present, give a nice progress bar
if opts['--verbose'] is not None:
- update_iterable = tqdm.tqdm(updates)
- else:
- update_iterable = updates
-
+ updates = tqdm.tqdm(updates)
+ creates = tqdm.tqdm(creates)
+ deletes = tqdm.tqdm(deletes)
+
+ create_responses = list(
+ execute_api_calls_respecting_rate_limit(create_calls(client, creates))
+ )
+
+ update_responses = list(
+ execute_api_calls_respecting_rate_limit(update_calls(client, updates))
+ )
+
+ delete_responses = list(
+ execute_api_calls_respecting_rate_limit(delete_calls(client, deletes))
+ )
+
+ if opts['--log-file'] is not None:
+ with util.output_stream(opts, '--log-file') as f:
+ json.dump({
+ 'create_responses': create_responses,
+ 'update_responses': update_responses,
+ 'delete_responses': delete_responses,
+ }, f)
+
+
+def create_calls(client, updates):
+ """
+ Return an iterator of callables representing the API calls for each create job.
+ """
+ for update in updates:
+ type_, resource = update.get('type'), update.get('resource', {})
+
+ if type_ == 'videos':
+ params = resource_to_params(resource)
+
+ # We wrap the entire create/update process in a function since we make use of two API
+ # calls (one is via key_for_media_id). Hence we want to re-try the entire thing if we
+ # hit the API rate limit.
+ def do_create():
+ # If video_key is set to anything other than None, an update of that video key will
+ # be done instead.
+ video_key = None
+
+ # See if the resource already exists. If so, perform an update instead.
+ media_id_prop = params.get('custom.sms_media_id')
+ if media_id_prop is not None:
+ try:
+ media_id = int(util.parse_custom_prop('media', media_id_prop))
+ except ValueError:
+ LOG.warning('Skipping video with bad media id prop: %s', media_id_prop)
+ else:
+ # Attempt to find a matching video for this media id. If None found, that's
+ # OK.
+ try:
+ video_key = util.key_for_media_id(media_id)
+ except util.VideoNotFoundError:
+ pass
+
+ if video_key is not None:
+ LOG.warning('Updating video %(video_key)s instead of creating new one',
+ {'video_key': video_key})
+ return client.videos.update(
+ http_method='POST', video_key=video_key, **params)
+ else:
+ return client.videos.create(http_method='POST', **params)
+
+ yield do_create
+ else:
+ LOG.warning('Skipping unknown update type: %s', type_)
+
+
+def update_calls(client, updates):
+ """
+ Return an iterator of callables representing the API calls for each update job.
+ """
+ for update in updates:
+ type_, resource = update.get('type'), update.get('resource', {})
+
+ if type_ == 'videos':
+ yield lambda: client.videos.update(http_method='POST', **resource_to_params(resource))
+ else:
+ LOG.warning('Skipping unknown update type: %s', type_)
+
+
+def delete_calls(client, deletes):
+ """
+ Return an iterator of callables representing the API calls for each delete job.
+ """
+ for delete in deletes:
+ type_, resource = delete.get('type'), delete.get('resource', {})
+
+ if type_ == 'videos':
+ yield lambda: client.videos.delete(http_method='POST', **resource_to_params(resource))
+ else:
+ LOG.warning('Skipping unknown delete type: %s', type_)
+
+
+def execute_api_calls_respecting_rate_limit(call_iterable):
+ """
+ A generator which takes an iterable of callables which represent calls to the JWPlatform API
+ and run them one after another. If a JWPlatformRateLimitExceededError is raised by the
+ callable, exponentially back off and retry. Since retries are possible, callables from
+ call_iterable may be called multiple times.
+
+ Yields the results of calling the update job.
+
+ """
# delay between calls to not hit rate limit
delay = 0.1 # seconds
- for update in update_iterable:
- try:
- key = update['key']
- except KeyError:
- LOG.warning('Update lacks key: %s', repr(update))
-
- params = {
- 'video_key': key
- }
-
- created_at = update.get('custom', {}).get('sms_created_at')
- if created_at is not None:
- date_str = ':'.join(created_at.split(':')[1:-1])
- params['date'] = int(dateutil.parser.parse(date_str).timestamp())
-
- for custom_key, custom_value in update.get('custom', {}).items():
- params['custom.' + custom_key] = str(custom_value)
-
+ for api_call in call_iterable:
for _ in range(MAX_ATTEMPTS):
try:
- client.videos.update(**params)
+ yield api_call()
# On a successful call, slightly shorten the delay
delay = max(1e-2, min(2., delay * 0.8))
@@ -87,3 +201,14 @@ def main(opts):
break
except JWPlatformRateLimitExceededError:
delay = max(1e-2, min(2., 2. * delay))
+
+
+def resource_to_params(resource):
+ def iterate(d, prefix=''):
+ for k, v in d.items():
+ if isinstance(v, dict):
+ for p in iterate(v, prefix+k+'.'):
+ yield p
+ else:
+ yield (prefix+k, v)
+ return dict(iterate(resource))
diff --git a/sms2jwplayer/genupdatejob.py b/sms2jwplayer/genupdatejob.py
index 0608a31..dad7593 100644
--- a/sms2jwplayer/genupdatejob.py
+++ b/sms2jwplayer/genupdatejob.py
@@ -1,35 +1,21 @@
"""
The genupdatejob subcommand examines video metadata from jwplayer and the current SMS export and
-generates a list of updates which should be applied.
-
-It outputs a single JSON object with the following schema:
-
-.. code:: json
-
- {
- "updates": [
- ...
- {
- "key": <jwplayer key>,
- "custom": {
- <dictionary of custom properties to set>
- }
- },
- ...
- ],
- }
+generates a list of updates which should be applied. See the documentation for
+:py:mod:`.applyupdatejob` for a description of the update job format.
"""
import json
import logging
import re
-from urllib.parse import urlsplit
+import urllib.parse
+import dateutil.parser
+
from sms2jwplayer.institutions import INSTIDS
from . import csv as smscsv
-from .util import output_stream, get_key_path
+from .util import output_stream, get_key_path, parse_custom_prop
-LOG = logging.getLogger('genmrss')
+LOG = logging.getLogger(__name__)
def main(opts):
@@ -50,10 +36,10 @@ def main(opts):
LOG.info('Loaded %s media item(s) from export', len(items))
with output_stream(opts) as fobj:
- process_videos(fobj, items, videos)
+ process_videos(opts, fobj, items, videos)
-def process_videos(fobj, items, videos):
+def process_videos(opts, fobj, items, videos):
"""
Process video metadata records with reference to a dictionary of media items keyed by the
stripped path. Write results to file as JSON document.
@@ -61,86 +47,218 @@ def process_videos(fobj, items, videos):
"""
# Statistics we record
n_skipped = 0
- n_unmatched = 0
- n_matched = 0
- updates = []
+ # The list of create, update and delete jobs which need to be performed.
+ creates, updates = [], []
+
+ # A set of item media_ids which could not be matched to a corresponding JWPlatform video. This
+ # starts full of all items but items are removed as matching happens.
+ new_media_ids = set([item.media_id for item in items.values()])
+
+ # A set of clip ids which already exist in JWPlatform
+ existing_clip_ids = set()
+
+ # A list of JWPlatform video resources which could not be matched to an SMS media object and
+ # hence should be deleted.
+ unmatched_videos = []
+
+ # A list of (item, video resource) tuples representing that a given SMS media item is
+ # represented by a JWPlatform video resource. This may be a one-to-many mapping; a single SMS
+ # media item may have more than one JWPlatform video resource associated with it.
+ associations = []
+
+ # A dictionary which allows retrieving media items by clip id. Stores an (item, path) tuple.
+ items_by_clip_id = dict((item.clip_id, (item, path)) for path, item in items.items())
+
+ # A dictionary, keyed by media id, of sequences of items associated with that media
+ items_by_media_id = {}
+ for _, item in items.items():
+ media_items = items_by_media_id.get(item.media_id, list())
+ media_items.append(item)
+ items_by_media_id[item.media_id] = media_items
# Match jwplayer videos to SMS items
for video in videos:
- # Find original fetch URL
- orig_url = get_key_path(video, 'custom.import_guid')
- if orig_url is None:
+ # Find an existing SMS clip id
+ clip_id_prop = get_key_path(video, 'custom.sms_clip_id')
+ if clip_id_prop is None:
n_skipped += 1
continue
- # Parse path components
- path_components = urlsplit(orig_url).path.split('/')
+ # Retrieve the matching SMS media item (or record the inability to do so)
+ try:
+ item, _ = items_by_clip_id[int(parse_custom_prop('clip', clip_id_prop))]
+ except KeyError:
+ unmatched_videos.append(video)
+ continue
+
+ # Remove matched item from new_items set
+ new_media_ids -= {item.media_id}
+ existing_clip_ids.add(item.clip_id)
+
+ # We now have a match between a video and SMS media item. Record the match.
+ associations.append((item, video))
+
+ # Generate updates for existing videos
+ for item, video in associations:
+ expected_video = video_resource(opts, item)
+
+ # Calculate delta from resource which exists to expected resource
+ delta = updated_keys(video, expected_video)
+ if len(delta) > 0:
+ # The delta is non-empty, so construct an update request. FSR, the *update* request for
+ # JWPlatform requires the video be specified via 'video_key' but said key appears in
+ # the video resource returned by /videos/list as 'key'.
+ update = {'video_key': video['key']}
+ update.update(delta)
+ updates.append({
+ 'type': 'videos',
+ 'resource': update,
+ })
+
+ # Generate creates for new videos
+ create_clip_ids = set()
+ for media_items in (items_by_media_id[media_id] for media_id in new_media_ids):
+ video_item, audio_item = None, None
+ for item in media_items:
+ if item.format is smscsv.MediaFormat.VIDEO:
+ video_item = item
+ elif item.format is smscsv.MediaFormat.AUDIO:
+ audio_item = item
+ else:
+ LOG.warning('Unknown format: %s', item.format)
- # Try to find item by joining path components
- item = None
- while len(path_components) > 0 and item is None:
- item = items.get('/'.join(path_components))
- path_components = path_components[1:]
+ # Prefer video items over audio ones
+ item = video_item if video_item is not None else audio_item
if item is None:
- n_unmatched += 1
+ LOG.warning('Could not match items %s to video or audio clip', [
+ i.clip_id for i in media_items
+ ])
continue
- # video and item now match
- n_matched += 1
-
- # form list of expected custom properties. We cuddle the id numbers in <type>:...: so that
- # we can search for "exactly" the media id or clip id rather than simply a video whose id
- # contains another. (E.g. searching for clip "10" is likely to being up "210", "310",
- # "1045", etc.)
- custom_props = {
- 'sms_media_id': 'media:{}:'.format(item.media_id),
- 'sms_clip_id': 'clip:{}:'.format(item.clip_id),
- # format - migration not required
- # filename - migration not required
- 'sms_created_at': 'created_at:{}:'.format(item.created_at.isoformat()),
- # title - migrated as media item title
- # description - migrated as media item description
- 'sms_collection_id': 'collection:{}:'.format(item.collection_id),
- 'sms_instid': 'instid:{}:'.format(item.instid),
- 'sms_aspect_ratio': 'aspect_ratio:{}:'.format(item.aspect_ratio),
- 'sms_created_by': 'created_by:{}:'.format(item.creator),
- # in_dspace - migration not required
- 'sms_publisher': 'publisher:{}:'.format(item.publisher),
- 'sms_copyright': 'copyright:{}:'.format(item.copyright),
- 'sms_language': 'language:{}:'.format(item.language),
- 'sms_keywords': 'keywords:{}:'.format(item.keywords),
- # visibility - migration merged with sms_acl
- 'sms_acl': 'acl:{}:'.format(convert_acl(item.visibility, item.acl)),
- 'sms_screencast': 'screencast:{}:'.format(item.screencast),
- 'sms_image_id': 'image_id:{}:'.format(item.image_id),
- # dspace_path - migration not required
- 'sms_featured': 'featured:{}:'.format(item.featured),
- 'sms_branding': 'branding:{}:'.format(item.branding),
- 'sms_last_updated_at': 'last_updated_at:{}:'.format(item.last_updated_at),
- 'sms_updated_by': 'updated_by:{}:'.format(item.updated_by),
- 'sms_downloadable': 'downloadable:{}:'.format(item.downloadable),
- 'sms_withdrawn': 'withdrawn:{}:'.format(item.withdrawn),
- # abstract - migration impractical
- # priority - migration not required
- }
-
- # remove those which match
- for k, v in list(custom_props.items()):
- if get_key_path(video, 'custom.' + k) == v:
- del custom_props[k]
-
- # write a row if there is work to do
- if len(custom_props) > 0:
- updates.append({'key': get_key_path(video, 'key'), 'custom': custom_props})
-
- LOG.info('Number of jwplayer videos matched to SMS media items: %s', n_matched)
- LOG.info('Number of jwplayer videos not matched to SMS media items: %s', n_unmatched)
- LOG.info('Number of jwplayer videos with no import URL: %s', n_skipped)
+ # If we've ended up with an existing clip, don't bother
+ if item.clip_id in existing_clip_ids or item.clip_id in create_clip_ids:
+ continue
+
+ create_clip_ids.add(item.clip_id)
+
+ video = video_resource(opts, item)
+ video.update({
+ 'download_url': url(opts, item),
+ })
+ creates.append({
+ 'type': 'videos',
+ 'resource': video,
+ })
+
+ LOG.info('Number of JWPlatform videos matched to SMS media items: %s', len(associations))
+ LOG.info('Number of SMS media items with no existing video: %s', len(new_media_ids))
+ LOG.info('Number of JWPlatform videos not matched to SMS media items: %s',
+ len(unmatched_videos))
+ LOG.info('Number of JWPlatform videos with no import URL: %s', n_skipped)
+ LOG.info('Number of video creations: %s', len(creates))
LOG.info('Number of video updates: %s', len(updates))
- json.dump({'updates': updates}, fobj)
+ json.dump({'create': creates, 'update': updates}, fobj)
+
+
+def updated_keys(source, target):
+ """Return a dict which is the delta between source and target. Keys in target which have
+ different values or do not exist in source are returned.
+
+ """
+ # Initially, the delta is empty
+ delta = {}
+
+ for key, value in target.items():
+ try:
+ source_value = source[key]
+ except KeyError:
+ # Key is not in source, set it in delta
+ delta[key] = source_value
+ else:
+ # Key is in source
+ if isinstance(value, dict):
+ # Value is itself a dict so recurse
+ sub_delta = updated_keys(source_value, value)
+ if len(sub_delta) > 1:
+ delta[key] = sub_delta
+ elif value != source_value:
+ # Value differs between source and target. Return delta.
+ delta[key] = value
+
+ return delta
+
+
+def video_resource(opts, item):
+ """
+ Construct what the JWPlatform video resource for a SMS media item should look like.
+
+ """
+ # Custom props
+ custom_props = custom_props_for_item(item)
+
+ # Start making the video resource
+ resource = {
+ "custom": custom_props,
+ }
+
+ # Add title and description if present
+ for key, value in (('title', item.title), ('description', item.description)):
+ value = sanitise(value)
+ if value.strip() != '':
+ resource[key] = value
+
+ # Add a created at date
+ created_at = custom_props.get('sms_created_at')
+ if created_at is not None:
+ date_str = ':'.join(created_at.split(':')[1:-1])
+ resource['date'] = int(dateutil.parser.parse(date_str).timestamp())
+
+ return resource
+
+
+def custom_props_for_item(item):
+ """
+ Return a dictionary of custom props which should be set on a particular video item.
+
+ """
+ # form list of expected custom properties. We cuddle the id numbers in <type>:...: so that
+ # we can search for "exactly" the media id or clip id rather than simply a video whose id
+ # contains another. (E.g. searching for clip "10" is likely to being up "210", "310",
+ # "1045", etc.)
+ return {
+ 'sms_media_id': 'media:{}:'.format(item.media_id),
+ 'sms_clip_id': 'clip:{}:'.format(item.clip_id),
+ # format - migration not required
+ # filename - migration not required
+ 'sms_created_at': 'created_at:{}:'.format(item.created_at.isoformat()),
+ # title - migrated as media item title
+ # description - migrated as media item description
+ 'sms_collection_id': 'collection:{}:'.format(item.collection_id),
+ 'sms_instid': 'instid:{}:'.format(item.instid),
+ 'sms_aspect_ratio': 'aspect_ratio:{}:'.format(item.aspect_ratio),
+ 'sms_created_by': 'created_by:{}:'.format(item.creator),
+ # in_dspace - migration not required
+ 'sms_publisher': 'publisher:{}:'.format(item.publisher),
+ 'sms_copyright': 'copyright:{}:'.format(item.copyright),
+ 'sms_language': 'language:{}:'.format(item.language),
+ 'sms_keywords': 'keywords:{}:'.format(item.keywords),
+ # visibility - migration merged with sms_acl
+ 'sms_acl': 'acl:{}:'.format(convert_acl(item.visibility, item.acl)),
+ 'sms_screencast': 'screencast:{}:'.format(item.screencast),
+ 'sms_image_id': 'image_id:{}:'.format(item.image_id),
+ # dspace_path - migration not required
+ 'sms_featured': 'featured:{}:'.format(item.featured),
+ 'sms_branding': 'branding:{}:'.format(item.branding),
+ 'sms_last_updated_at': 'last_updated_at:{}:'.format(item.last_updated_at),
+ 'sms_updated_by': 'updated_by:{}:'.format(item.updated_by),
+ 'sms_downloadable': 'downloadable:{}:'.format(item.downloadable),
+ 'sms_withdrawn': 'withdrawn:{}:'.format(item.withdrawn),
+ # abstract - migration impractical
+ # priority - migration not required
+ }
# A regex pattern for CRSID matching.
@@ -179,3 +297,28 @@ def convert_acl(visibility, acl):
LOG.warning('The ACE "{}" cannot be resolved'.format(ace))
return ",".join(new_acl)
+
+
+def url(opts, item):
+ """Return the URL for an item."""
+ path_items = item.filename.strip('/').split('/')
+ path_items = path_items[int(opts['--strip-leading']):]
+ return urllib.parse.urljoin(opts['--base'] + '/', '/'.join(path_items))
+
+
+def image_url(opts, item):
+ """Return the URL for an image_id."""
+ return urllib.parse.urljoin(opts['--base-image-url'], str(item.image_id)+".jpg")
+
+
+def sanitise(s, max_length=4096):
+ """
+ Strip odd characters from a string and sanitise the length to avoid JWPlatform complaining.
+
+ """
+ # Map control characters to empty string
+ s = s.translate(dict.fromkeys(range(32)))
+
+ # Truncate
+ s = s[:max_length]
+ return s
diff --git a/sms2jwplayer/tidy.py b/sms2jwplayer/tidy.py
new file mode 100644
index 0000000..0ac5279
--- /dev/null
+++ b/sms2jwplayer/tidy.py
@@ -0,0 +1,86 @@
+"""
+The tidy subcommand will examine the JWPlatform metadata and generate an update job which can be
+applied via applyupdatejob. This update job will "tidy" the JWPlatform database in the following
+ways:
+
+- Each media id will have exactly one video associated with it with preference given to ones of
+ type "video". Other videos will be deleted.
+
+"""
+import json
+import logging
+
+from . import util
+
+
+LOG = logging.getLogger(__name__)
+
+
+def main(opts):
+ videos = []
+ for metadata_fn in opts['<metadata>']:
+ with open(metadata_fn) as f:
+ videos.extend(json.load(f).get('videos', []))
+ LOG.info('Loaded metadata for %s videos', len(videos))
+
+ with util.output_stream(opts) as fobj:
+ process_videos(opts, fobj, videos)
+
+
+def process_videos(opts, fobj, videos):
+ """
+ Process videos and write update job to fobj.
+
+ """
+ # Delete jobs
+ deletes = []
+
+ # Group videos by media id
+ videos_by_media_id = {}
+ n_grouped = 0
+ for video in videos:
+ media_id_prop = util.get_key_path(video, 'custom.sms_media_id')
+ if media_id_prop is None:
+ continue
+
+ try:
+ media_id = int(util.parse_custom_prop('media', media_id_prop))
+ except ValueError:
+ LOG.error('Could not parse media id prop: %s', media_id_prop)
+ else:
+ group = videos_by_media_id.get(media_id, [])
+ group.append(video)
+ videos_by_media_id[media_id] = group
+ n_grouped += 1
+
+ LOG.info('Grouped %s videos by media id into %s groups', n_grouped, len(videos_by_media_id))
+ LOG.info('Videos without media id: %s', len(videos) - n_grouped)
+
+ for media_id, group in videos_by_media_id.items():
+ video_keys = set(video['key'] for video in group)
+ blessed_key = None
+
+ # Attempt to find a video clip first
+ for video in group:
+ if video['mediatype'] == 'video':
+ blessed_key = video['key']
+ break
+
+ # If failed, find an audio clip
+ if blessed_key is None:
+ for video in group:
+ if video['mediatype'] == 'audio':
+ blessed_key = video['key']
+ break
+
+ if blessed_key is None:
+ LOG.warning('Could not find video or audio media for media id %s', media_id)
+ continue
+
+ # Remove the blessed key from the video keys, the rest should be deleted
+ video_keys.remove(blessed_key)
+ for key in video_keys:
+ deletes.append({'type': 'videos', 'resource': {'video_key': key}})
+
+ LOG.info('Number of delete jobs: %s', len(deletes))
+ json.dump({'delete': deletes}, fobj)
diff --git a/sms2jwplayer/util.py b/sms2jwplayer/util.py
index 5662dc9..aada4b4 100644
--- a/sms2jwplayer/util.py
+++ b/sms2jwplayer/util.py
@@ -6,11 +6,16 @@ program to use.
import contextlib
import os
+import re
import sys
import jwplatform
+#: regex for parsing a custom prop field
+CUSTOM_PROP_VALUE_RE = re.compile(r'^([a-z][a-z0-9_]*):(.*):$')
+
+
class JWPlatformClientError(RuntimeError):
"""
An error which is thrown if appropriate credentials for the jwplatform cannot be found in the
@@ -82,3 +87,74 @@ def get_key_path(obj, keypath):
return None
obj = obj.get(key)
return obj
+
+
+def parse_custom_prop(expected_type, field):
+ """
+ Parses a custom prop content of the form "<type>:<value>:". Returns the value tuple.
+ Raises ValueError if the field is of the wrong form or has wrong type.
+
+ """
+ match = CUSTOM_PROP_VALUE_RE.match(field)
+ if not match:
+ raise ValueError('Field has invalid format: {field}'.format(field=field))
+ prop_type, value = match.groups()
+ if prop_type != expected_type:
+ raise ValueError(
+ 'Field has unexpected type "{prop_type}". Expected "{expected_type}".'.format(
+ prop_type=prop_type, expected_type=expected_type
+ ))
+ return value
+
+
+class VideoNotFoundError(RuntimeError):
+ """
+ The provided SMS media ID does not have a corresponding JWPlatform video.
+ """
+
+
+def key_for_media_id(media_id, preferred_media_type='video', client=None):
+ """
+ :param media_id: the SMS media ID of the required video
+ :type media_id: int
+ :param preferred_media_type: (optional) the preferred media type to return. One of ``'video'``
+ or ``'audio'``.
+ :param client: (options) an authenticated JWPlatform client as returned by
+ :py:func:`.get_jwplatform_client`. If ``None``, call :py:func:`.get_jwplatform_client`.
+ :raises: :py:class:`.VideoNotFoundError` if the media id does not correspond to a JWPlatform
+ video.
+
+ """
+ client = client if client is not None else get_jwplatform_client()
+
+ # The value of the sms_media_id custom property we search for
+ media_id_value = 'media:{:d}:'.format(media_id)
+
+ # Search for videos
+ response = client.videos.list(**{
+ 'search:custom.sms_media_id': media_id_value,
+ })
+
+ # Loop through "videos" to find the preferred one based on mediatype
+ video_resource = None
+ for video in response.get('videos', []):
+ # Sanity check: skip videos with wrong media id since video search is
+ # not "is equal to", it is "contains".
+ if video.get('custom', {}).get('sms_media_id') != media_id_value:
+ continue
+
+ # use this video if it has the preferred mediatype or if we have nothing
+ # else
+ if (video.get('mediatype') == preferred_media_type
+ or video_resource is None):
+ video_resource = video
+
+ # If no video found, raise error
+ if video_resource is None:
+ raise VideoNotFoundError()
+
+ # Check the video we found has a non-None key
+ if video_resource.get('key') is None:
+ raise VideoNotFoundError()
+
+ return video_resource['key']
diff --git a/tox.ini b/tox.ini
index 4e78b76..8af1113 100644
--- a/tox.ini
+++ b/tox.ini
@@ -7,6 +7,7 @@ commands=python setup.py test --addopts="--cov=sms2jwplayer" {posargs}
# Any version of Python 3
[testenv:py3]
basepython=python3
+deps=-rrequirements.txt
# Build documentation
[testenv:doc]
| Only import audio if that is the source format
.. ie it isn't extracted from source video as a seperate clip
| uisautomation/sms2jwplayer | diff --git a/sms2jwplayer/test/test_applyupdatejob.py b/sms2jwplayer/test/test_applyupdatejob.py
index 271d70c..15e20e4 100644
--- a/sms2jwplayer/test/test_applyupdatejob.py
+++ b/sms2jwplayer/test/test_applyupdatejob.py
@@ -31,16 +31,19 @@ class ApplyUpdateJobTests(JWPlatformTestCase):
jobfile = os.path.join(tmp_dir, 'job.json')
with open(jobfile, 'w') as f:
json.dump({
- 'updates': [
- {'key': 'abc', 'custom': {'one': 1}},
- {'key': 'def', 'custom': {'foo': 'bar', 'buzz': 3}},
+ 'update': [
+ {'type': 'videos', 'resource': {'video_key': 'abc', 'custom': {'one': 1}}},
+ {'type': 'videos', 'resource': {
+ 'video_key': 'def', 'custom': {'foo': 'bar', 'buzz': 3}
+ }},
]
}, f)
applyupdatejob(jobfile)
self.client.videos.update.assert_has_calls([
- mock.call(**{'video_key': 'abc', 'custom.one': '1'}),
- mock.call(**{'video_key': 'def', 'custom.foo': 'bar', 'custom.buzz': '3'}),
+ mock.call(**{'video_key': 'abc', 'custom.one': 1, 'http_method': 'POST'}),
+ mock.call(**{'video_key': 'def', 'custom.foo': 'bar', 'custom.buzz': 3,
+ 'http_method': 'POST'}),
], any_order=True)
diff --git a/sms2jwplayer/test/test_genupdatejob.py b/sms2jwplayer/test/test_genupdatejob.py
index 8e14eb5..a559162 100644
--- a/sms2jwplayer/test/test_genupdatejob.py
+++ b/sms2jwplayer/test/test_genupdatejob.py
@@ -96,4 +96,5 @@ class ConvertAclTests(unittest.TestCase):
convert_acl('acl-overrule', ['hpcr', 'aj333']),
'USER_aj333'
)
- log.check(('genmrss', 'WARNING', 'The ACE "hpcr" cannot be resolved'))
+ log.check(('sms2jwplayer.genupdatejob', 'WARNING',
+ 'The ACE "hpcr" cannot be resolved'))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 7
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"feedparser",
"testfixtures"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
docopt==0.6.2
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
feedparser==6.0.11
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
jwplatform==2.2.2
MarkupSafe==3.0.2
neterr==1.1.1
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
requests==2.32.3
sgmllib3k==1.0.0
six==1.17.0
-e git+https://github.com/uisautomation/sms2jwplayer.git@924d6ab5dd9b7776a68198fa0eee744990fc8c6e#egg=sms2jwplayer
testfixtures==8.3.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
tqdm==4.67.1
urllib3==2.3.0
| name: sms2jwplayer
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- docopt==0.6.2
- feedparser==6.0.11
- idna==3.10
- jinja2==3.1.6
- jwplatform==2.2.2
- markupsafe==3.0.2
- neterr==1.1.1
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- requests==2.32.3
- sgmllib3k==1.0.0
- six==1.17.0
- testfixtures==8.3.0
- tqdm==4.67.1
- urllib3==2.3.0
prefix: /opt/conda/envs/sms2jwplayer
| [
"sms2jwplayer/test/test_genupdatejob.py::ConvertAclTests::test_ace_not_resolved"
]
| [
"sms2jwplayer/test/test_applyupdatejob.py::ApplyUpdateJobTests::test_basic_call"
]
| [
"sms2jwplayer/test/test_genupdatejob.py::ConvertAclTests::test_lowercase_institutions",
"sms2jwplayer/test/test_genupdatejob.py::ConvertAclTests::test_null_acls",
"sms2jwplayer/test/test_genupdatejob.py::ConvertAclTests::test_visibility_acl_combinations"
]
| []
| null | 2,387 | [
"sms2jwplayer/__init__.py",
"doc/intro.rst",
"tox.ini",
"doc/reference.rst",
"sms2jwplayer/tidy.py",
"sms2jwplayer/applyupdatejob.py",
"requirements.txt",
"sms2jwplayer/genupdatejob.py",
"sms2jwplayer/util.py"
]
| [
"sms2jwplayer/__init__.py",
"doc/intro.rst",
"tox.ini",
"doc/reference.rst",
"sms2jwplayer/tidy.py",
"sms2jwplayer/applyupdatejob.py",
"requirements.txt",
"sms2jwplayer/genupdatejob.py",
"sms2jwplayer/util.py"
]
|
|
dwavesystems__dwave-cloud-client-96 | ef95323a55249230fb6673697edcdbe8cac2d6c1 | 2018-04-10 17:05:56 | 0314a6761ba389bb20ba48ef65476a286d1bf38c | diff --git a/dwave/cloud/cli.py b/dwave/cloud/cli.py
index 5914660..3189154 100644
--- a/dwave/cloud/cli.py
+++ b/dwave/cloud/cli.py
@@ -8,8 +8,8 @@ from dwave.cloud.utils import readline_input
from dwave.cloud.exceptions import (
SolverAuthenticationError, InvalidAPIResponseError, UnsupportedSolverError)
from dwave.cloud.config import (
- load_config_from_file, get_default_config,
- detect_configfile_path, get_default_configfile_path)
+ load_config_from_files, get_default_config,
+ get_configfile_path, get_default_configfile_path)
@click.group()
@@ -30,7 +30,7 @@ def configure(config_file, profile):
print("Using config file:", config_file)
else:
# path not given, try to detect; or use default, but allow user to override
- config_file = detect_configfile_path()
+ config_file = get_configfile_path()
if config_file:
print("Found existing config file:", config_file)
else:
@@ -40,8 +40,8 @@ def configure(config_file, profile):
# try loading existing config, or use defaults
try:
- config = load_config_from_file(config_file)
- except ValueError:
+ config = load_config_from_files([config_file])
+ except:
config = get_default_config()
# determine profile
diff --git a/dwave/cloud/config.py b/dwave/cloud/config.py
index 1593e5c..424f95a 100644
--- a/dwave/cloud/config.py
+++ b/dwave/cloud/config.py
@@ -9,32 +9,39 @@ CONF_AUTHOR = "dwavesystem"
CONF_FILENAME = "dwave.conf"
-def detect_configfile_path():
- """Returns the first existing file that it finds in a list of possible
- candidates, and `None` if the list was exhausted, but no candidate config
- file exists.
+def detect_existing_configfile_paths():
+ """Returns the list of existing config files found on disk.
- For details, see :func:`load_config_from_file`.
+ Candidates examined depend on the OS, but for Linux possible list is:
+ ``dwave.conf`` in CWD, user-local ``.config/dwave/``, system-wide
+ ``/etc/dwave/``. For details, see :func:`load_config_from_file`.
"""
- # look for `./dwave.conf`
- candidates = ["."]
- # then for something like `~/.config/dwave/dwave.conf`
+ # system-wide has the lowest priority, `/etc/dwave/dwave.conf`
+ candidates = homebase.site_config_dir_list(
+ app_author=CONF_AUTHOR, app_name=CONF_APP,
+ use_virtualenv=False, create=False)
+
+ # user-local will override it, `~/.config/dwave/dwave.conf`
candidates.append(homebase.user_config_dir(
app_author=CONF_AUTHOR, app_name=CONF_APP, roaming=False,
use_virtualenv=False, create=False))
- # and finally for e.g. `/etc/dwave/dwave.conf`
- candidates.extend(homebase.site_config_dir_list(
- app_author=CONF_AUTHOR, app_name=CONF_APP,
- use_virtualenv=False, create=False))
+ # highest priority (overrides all): `./dwave.conf`
+ candidates.append(".")
+
+ paths = [os.path.join(base, CONF_FILENAME) for base in candidates]
+ existing_paths = [path for path in paths if os.path.exists(path)]
+
+ return existing_paths
- for base in candidates:
- path = os.path.join(base, CONF_FILENAME)
- if os.path.exists(path):
- return path
- return None
+def get_configfile_path():
+ """Returns the highest-priority existing config file from a list
+ of possible candidates returned by `detect_existing_configfile_paths()`, and
+ ``None`` if no candidate config file exists."""
+ paths = detect_existing_configfile_paths()
+ return paths[-1] if paths else None
def get_default_configfile_path():
@@ -47,12 +54,15 @@ def get_default_configfile_path():
return path
-def load_config_from_file(filename=None):
- """Load D-Wave cloud client configuration from ``filename``.
+def load_config_from_files(filenames=None):
+ """Load D-Wave cloud client configuration from a list of ``filenames``.
The format of the config file is the standard Windows INI-like format,
parsable with the Python's :mod:`configparser`.
+ Each filename in the list (each config file loaded) progressively upgrades
+ the final configuration (on a key by key basis, per each section).
+
The section containing default values inherited by other sections is called
``defaults``. For example::
@@ -75,19 +85,19 @@ def load_config_from_file(filename=None):
token = ...
Args:
- filename (str, default=None):
- D-Wave cloud client configuration file location.
+ filenames (list[str], default=None):
+ D-Wave cloud client configuration file locations.
- If unspecified, a config file named ``dwave.conf`` is searched for in
- the current directory, then in the user-local config dir, and then
- in all system-wide config dirs. For example, on Unix, we try to load
- the config from these paths (in order) and possibly others
+ If set to ``None``, a config file named ``dwave.conf`` is searched for
+ in all system-wide config dirs, then in the user-local config dir,
+ and finally in the current directory. For example, on Unix, we try
+ to load the config from these paths (in order) and possibly others
(depending on your Unix flavour)::
- ./dwave.conf
- ~/.config/dwave/dwave.conf
- /usr/local/share/dwave/dwave.conf
/usr/share/dwave/dwave.conf
+ /usr/local/share/dwave/dwave.conf
+ ~/.config/dwave/dwave.conf
+ ./dwave.conf
On Windows 7+, config file should be located in:
``C:\\Users\\<username>\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf``,
@@ -102,31 +112,24 @@ def load_config_from_file(filename=None):
mapping of per-profile keys holding values.
Raises:
- :exc:`ValueError`:
- Config file location unspecified and undetected.
-
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
"""
- if filename is None:
- filename = detect_configfile_path()
- if not filename:
- raise ValueError("Config file not given, and could not be detected")
+ if filenames is None:
+ filenames = detect_existing_configfile_paths()
config = configparser.ConfigParser(default_section="defaults")
- try:
- with open(filename, 'r') as f:
- config.read_file(f, filename)
-
- except (IOError, OSError):
- raise ConfigFileReadError("Failed to read {!r}".format(filename))
-
- except configparser.Error:
- raise ConfigFileParseError("Failed to parse {!r}".format(filename))
-
+ for filename in filenames:
+ try:
+ with open(filename, 'r') as f:
+ config.read_file(f, filename)
+ except (IOError, OSError):
+ raise ConfigFileReadError("Failed to read {!r}".format(filename))
+ except configparser.Error:
+ raise ConfigFileParseError("Failed to parse {!r}".format(filename))
return config
@@ -159,11 +162,6 @@ def get_default_config():
return config
-def load_profile(name, filename=None):
- """Load profile with ``name`` from config file ``filename``."""
- return load_config_from_file(filename)[name]
-
-
def load_config(config_file=None, profile=None, client=None,
endpoint=None, token=None, solver=None, proxy=None):
"""Load D-Wave cloud client configuration from ``config_file`` (either
@@ -292,23 +290,22 @@ def load_config(config_file=None, profile=None, client=None,
"""
if config_file is None:
config_file = os.getenv("DWAVE_CONFIG_FILE")
- try:
- config = load_config_from_file(config_file)
- # determine profile name fallback:
- # (1) profile key under [defaults],
- # (2) first non-[defaults] section
- first_section = next(iter(config.sections() + [None]))
- config_defaults = config.defaults()
- default_profile = config_defaults.get('profile', first_section)
- except ValueError:
- # config file not specified, or not detected: start with null-config
- config = {}
- config_defaults = {}
- default_profile = None
- except (ConfigFileReadError, ConfigFileParseError):
- # unable to access/read/parse config file(s): explicitly fail
- raise
+ # auto-detect if not specified with arg or env
+ filenames = [config_file] if config_file else None
+
+ # progressively build config from a file, or a list of auto-detected files
+ # raises ConfigFileReadError/ConfigFileParseError on error
+ config = load_config_from_files(filenames)
+
+ # determine profile name fallback:
+ # (1) profile key under [defaults],
+ # (2) first non-[defaults] section
+ first_section = next(iter(config.sections() + [None]))
+ config_defaults = config.defaults()
+ default_profile = config_defaults.get('profile', first_section)
+
+ # select profile from the config
if profile is None:
profile = os.getenv("DWAVE_PROFILE", default_profile)
if profile:
| Progressive config files override
Make CWD config partially overrides user-local config, which partially overrides the system-wide config files.
For example, if `/usr/local/share/dwave/dwave.conf` has:
```
[defaults]
endpoint = <production>
client = qpu
[prod]
token = <token>
```
and `~/.config/dwave/dwave.conf` has:
```
[alpha]
endpoint = <alpha>
token = <token>
```
Then the user should be able to run `dwave ping --profile prod` **and** `dwave ping --profile alpha`. | dwavesystems/dwave-cloud-client | diff --git a/tests/test_config.py b/tests/test_config.py
index 671c29b..3f5b3cd 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -33,7 +33,7 @@ except ImportError:
from dwave.cloud.config import (
- detect_configfile_path, load_config_from_file, load_profile, load_config)
+ detect_existing_configfile_paths, load_config_from_files, load_config)
class TestConfig(unittest.TestCase):
@@ -66,7 +66,7 @@ class TestConfig(unittest.TestCase):
def test_config_load_from_file(self):
with mock.patch('dwave.cloud.config.open', iterable_mock_open(self.config_body), create=True):
- config = load_config_from_file(filename="filename")
+ config = load_config_from_files(filenames=["filename"])
self.assertEqual(config.sections(), ['dw2000', 'software', 'alpha'])
self.assertEqual(config['dw2000']['client'], 'qpu')
self.assertEqual(config['software']['client'], 'sw')
@@ -88,26 +88,22 @@ class TestConfig(unittest.TestCase):
key = val
"""
with mock.patch('dwave.cloud.config.open', iterable_mock_open(myconfig), create=True):
- self.assertRaises(ConfigFileParseError, load_config_from_file, filename="filename")
+ self.assertRaises(ConfigFileParseError, load_config_from_files, filenames=["filename"])
self.assertRaises(ConfigFileParseError, load_config, config_file="filename", profile="section")
def test_no_config_detected(self):
- with mock.patch("dwave.cloud.config.detect_configfile_path", lambda: None):
- self.assertRaises(ValueError, load_config_from_file)
+ """When no config file detected, `load_config_from_files` should return
+ empty config."""
+ with mock.patch("dwave.cloud.config.detect_existing_configfile_paths", lambda: []):
+ self.assertFalse(load_config_from_files().sections())
def test_invalid_filename_given(self):
- self.assertRaises(ConfigFileReadError, load_config_from_file, filename='/path/to/non/existing/config')
-
- def test_config_load_profile(self):
- with mock.patch('dwave.cloud.config.open', iterable_mock_open(self.config_body), create=True):
- profile = load_profile(name="alpha", filename="filename")
- self.assertEqual(profile['token'], 'alpha-token')
- self.assertRaises(KeyError, load_profile, name="non-existing-section", filename="filename")
+ self.assertRaises(ConfigFileReadError, load_config_from_files, filenames=['/path/to/non/existing/config'])
def test_config_file_detection_cwd(self):
configpath = "./dwave.conf"
with mock.patch("os.path.exists", lambda path: path == configpath):
- self.assertEqual(detect_configfile_path(), configpath)
+ self.assertEqual(detect_existing_configfile_paths(), [configpath])
def test_config_file_detection_user(self):
if sys.platform == 'win32':
@@ -119,7 +115,7 @@ class TestConfig(unittest.TestCase):
configpath = os.path.expanduser("~/.config/dwave/dwave.conf")
with mock.patch("os.path.exists", lambda path: path == configpath):
- self.assertEqual(detect_configfile_path(), configpath)
+ self.assertEqual(detect_existing_configfile_paths(), [configpath])
def test_config_file_detection_system(self):
if sys.platform == 'win32':
@@ -131,11 +127,11 @@ class TestConfig(unittest.TestCase):
configpath = "/etc/xdg/dwave/dwave.conf"
with mock.patch("os.path.exists", lambda path: path == configpath):
- self.assertEqual(detect_configfile_path(), configpath)
+ self.assertEqual(detect_existing_configfile_paths(), [configpath])
def test_config_file_detection_nonexisting(self):
with mock.patch("os.path.exists", lambda path: False):
- self.assertEqual(detect_configfile_path(), None)
+ self.assertEqual(detect_existing_configfile_paths(), [])
def _assert_config_valid(self, config):
@@ -144,7 +140,7 @@ class TestConfig(unittest.TestCase):
# default values are inherited
self.assertEqual(config['client'], "qpu")
- def _load_config_from_file(self, asked, provided, data=None):
+ def _load_config_from_files(self, asked, provided, data=None):
self.assertEqual(asked, provided)
if data is None:
data = self.config_body
@@ -152,38 +148,38 @@ class TestConfig(unittest.TestCase):
def test_config_load_configfile_arg(self):
- with mock.patch("dwave.cloud.config.load_config_from_file",
- partial(self._load_config_from_file, provided='myfile')):
+ with mock.patch("dwave.cloud.config.load_config_from_files",
+ partial(self._load_config_from_files, provided=['myfile'])):
self._assert_config_valid(load_config(config_file='myfile', profile='alpha'))
def test_config_load_configfile_env(self):
- with mock.patch("dwave.cloud.config.load_config_from_file",
- partial(self._load_config_from_file, provided='myfile')):
+ with mock.patch("dwave.cloud.config.load_config_from_files",
+ partial(self._load_config_from_files, provided=['myfile'])):
with mock.patch.dict(os.environ, {'DWAVE_CONFIG_FILE': 'myfile'}):
self._assert_config_valid(load_config(config_file=None, profile='alpha'))
def test_config_load_configfile_detect(self):
- with mock.patch("dwave.cloud.config.load_config_from_file",
- partial(self._load_config_from_file, provided=None)):
+ with mock.patch("dwave.cloud.config.load_config_from_files",
+ partial(self._load_config_from_files, provided=None)):
self._assert_config_valid(load_config(config_file=None, profile='alpha'))
def test_config_load_configfile_detect_profile_env(self):
- with mock.patch("dwave.cloud.config.load_config_from_file",
- partial(self._load_config_from_file, provided=None)):
+ with mock.patch("dwave.cloud.config.load_config_from_files",
+ partial(self._load_config_from_files, provided=None)):
with mock.patch.dict(os.environ, {'DWAVE_PROFILE': 'alpha'}):
self._assert_config_valid(load_config())
def test_config_load_configfile_env_profile_env(self):
- with mock.patch("dwave.cloud.config.load_config_from_file",
- partial(self._load_config_from_file, provided='myfile')):
+ with mock.patch("dwave.cloud.config.load_config_from_files",
+ partial(self._load_config_from_files, provided=['myfile'])):
with mock.patch.dict(os.environ, {'DWAVE_CONFIG_FILE': 'myfile',
'DWAVE_PROFILE': 'alpha'}):
self._assert_config_valid(load_config())
def test_config_load_configfile_env_profile_env_key_arg(self):
"""Explicitly provided values should override env/file."""
- with mock.patch("dwave.cloud.config.load_config_from_file",
- partial(self._load_config_from_file, provided='myfile')):
+ with mock.patch("dwave.cloud.config.load_config_from_files",
+ partial(self._load_config_from_files, provided=['myfile'])):
with mock.patch.dict(os.environ, {'DWAVE_CONFIG_FILE': 'myfile',
'DWAVE_PROFILE': 'alpha'}):
self.assertEqual(load_config(endpoint='manual')['endpoint'], 'manual')
@@ -196,8 +192,8 @@ class TestConfig(unittest.TestCase):
"""load_config should fail if the profile specified in kwargs or env in
non-existing.
"""
- with mock.patch("dwave.cloud.config.load_config_from_file",
- partial(self._load_config_from_file, provided=None)):
+ with mock.patch("dwave.cloud.config.load_config_from_files",
+ partial(self._load_config_from_files, provided=None)):
self.assertRaises(ValueError, load_config, profile="nonexisting")
with mock.patch.dict(os.environ, {'DWAVE_PROFILE': 'nonexisting'}):
self.assertRaises(ValueError, load_config)
@@ -206,8 +202,8 @@ class TestConfig(unittest.TestCase):
"""Check the right profile is loaded when `profile` specified only in
[defaults] section.
"""
- with mock.patch("dwave.cloud.config.load_config_from_file",
- partial(self._load_config_from_file, provided='myfile')):
+ with mock.patch("dwave.cloud.config.load_config_from_files",
+ partial(self._load_config_from_files, provided=['myfile'])):
profile = load_config(config_file='myfile')
self.assertEqual(profile['solver'], 'c4-sw_sample')
@@ -219,8 +215,8 @@ class TestConfig(unittest.TestCase):
[first]
solver = DW_2000Q_1
"""
- with mock.patch("dwave.cloud.config.load_config_from_file",
- partial(self._load_config_from_file,
+ with mock.patch("dwave.cloud.config.load_config_from_files",
+ partial(self._load_config_from_files,
provided=None, data=myconfig)):
profile = load_config()
self.assertIn('solver', profile)
@@ -235,8 +231,8 @@ class TestConfig(unittest.TestCase):
[defaults]
solver = DW_2000Q_1
"""
- with mock.patch("dwave.cloud.config.load_config_from_file",
- partial(self._load_config_from_file,
+ with mock.patch("dwave.cloud.config.load_config_from_files",
+ partial(self._load_config_from_files,
provided=None, data=myconfig)):
profile = load_config()
self.assertIn('solver', profile)
@@ -253,7 +249,41 @@ class TestConfig(unittest.TestCase):
[some]
solver = DW_2000Q_1
"""
- with mock.patch("dwave.cloud.config.load_config_from_file",
- partial(self._load_config_from_file,
- provided='myfile', data=myconfig)):
+ with mock.patch("dwave.cloud.config.load_config_from_files",
+ partial(self._load_config_from_files,
+ provided=['myfile'], data=myconfig)):
self.assertRaises(ValueError, load_config, config_file='myfile')
+
+ def test_config_load_multiple_configfiles(self):
+ """Test more specific config overrides less specific one,
+ on a key by key basis."""
+
+ config_system = u"""
+ [alpha]
+ endpoint = alpha
+ solver = DW_2000Q_1
+ """
+ config_user = u"""
+ [alpha]
+ solver = DW_2000Q_2
+ [beta]
+ endpoint = beta
+ """
+
+ with mock.patch("dwave.cloud.config.detect_existing_configfile_paths",
+ lambda: ['config_system', 'config_user']):
+
+ # test per-key override
+ with mock.patch('dwave.cloud.config.open', create=True) as m:
+ m.side_effect=[iterable_mock_open(config_system)(),
+ iterable_mock_open(config_user)()]
+ section = load_config(profile='alpha')
+ self.assertEqual(section['endpoint'], 'alpha')
+ self.assertEqual(section['solver'], 'DW_2000Q_2')
+
+ # test per-section override (section addition)
+ with mock.patch('dwave.cloud.config.open', create=True) as m:
+ m.side_effect=[iterable_mock_open(config_system)(),
+ iterable_mock_open(config_user)()]
+ section = load_config(profile='beta')
+ self.assertEqual(section['endpoint'], 'beta')
diff --git a/tests/test_mock_solver_loading.py b/tests/test_mock_solver_loading.py
index 2010c9e..2e8dc38 100644
--- a/tests/test_mock_solver_loading.py
+++ b/tests/test_mock_solver_loading.py
@@ -204,7 +204,7 @@ alpha|file-alpha-url,file-alpha-token,,alpha-solver
# patch the new config loading mechanism, to test only legacy config loading
[email protected]("dwave.cloud.config.detect_configfile_path", lambda: None)
[email protected]("dwave.cloud.config.detect_existing_configfile_paths", lambda: [])
class MockConfiguration(unittest.TestCase):
"""Ensure that the precedence of configuration sources is followed."""
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 2
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"coverage",
"coveralls"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
coverage==6.2
coveralls==3.3.1
docopt==0.6.2
-e git+https://github.com/dwavesystems/dwave-cloud-client.git@ef95323a55249230fb6673697edcdbe8cac2d6c1#egg=dwave_cloud_client
homebase==1.0.1
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
mock==5.2.0
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pyreadline==2.1
pytest==7.0.1
requests==2.27.1
requests-mock==1.12.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: dwave-cloud-client
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- click==8.0.4
- coverage==6.2
- coveralls==3.3.1
- docopt==0.6.2
- homebase==1.0.1
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- mock==5.2.0
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pyreadline==2.1
- pytest==7.0.1
- requests==2.27.1
- requests-mock==1.12.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/dwave-cloud-client
| [
"tests/test_config.py::TestConfig::test_config_file_detection_cwd",
"tests/test_config.py::TestConfig::test_config_file_detection_nonexisting",
"tests/test_config.py::TestConfig::test_config_file_detection_system",
"tests/test_config.py::TestConfig::test_config_file_detection_user",
"tests/test_config.py::TestConfig::test_config_load__profile_arg_nonexisting",
"tests/test_config.py::TestConfig::test_config_load__profile_first_section",
"tests/test_config.py::TestConfig::test_config_load__profile_from_defaults",
"tests/test_config.py::TestConfig::test_config_load_configfile_arg",
"tests/test_config.py::TestConfig::test_config_load_configfile_arg_profile_default",
"tests/test_config.py::TestConfig::test_config_load_configfile_arg_profile_default_nonexisting",
"tests/test_config.py::TestConfig::test_config_load_configfile_detect",
"tests/test_config.py::TestConfig::test_config_load_configfile_detect_profile_env",
"tests/test_config.py::TestConfig::test_config_load_configfile_env",
"tests/test_config.py::TestConfig::test_config_load_configfile_env_profile_env",
"tests/test_config.py::TestConfig::test_config_load_configfile_env_profile_env_key_arg",
"tests/test_config.py::TestConfig::test_config_load_from_file",
"tests/test_config.py::TestConfig::test_config_load_from_file__invalid_format__duplicate_sections",
"tests/test_config.py::TestConfig::test_config_load_multiple_configfiles",
"tests/test_config.py::TestConfig::test_invalid_filename_given",
"tests/test_config.py::TestConfig::test_no_config_detected",
"tests/test_mock_solver_loading.py::MockConnectivityTests::test_bad_token",
"tests/test_mock_solver_loading.py::MockConnectivityTests::test_bad_url",
"tests/test_mock_solver_loading.py::MockConnectivityTests::test_good_connection",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_all_solvers",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_missing_solver",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_solver",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_solver_broken_response",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_solver_missing_data",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_solver_filtering_in_client",
"tests/test_mock_solver_loading.py::MockConfiguration::test_env_args_set",
"tests/test_mock_solver_loading.py::MockConfiguration::test_env_with_file_set",
"tests/test_mock_solver_loading.py::MockConfiguration::test_explicit_only",
"tests/test_mock_solver_loading.py::MockConfiguration::test_explicit_with_file",
"tests/test_mock_solver_loading.py::MockConfiguration::test_file_format_error",
"tests/test_mock_solver_loading.py::MockConfiguration::test_file_read_error",
"tests/test_mock_solver_loading.py::MockConfiguration::test_nonexisting_file",
"tests/test_mock_solver_loading.py::MockConfiguration::test_only_file",
"tests/test_mock_solver_loading.py::MockConfiguration::test_only_file_key"
]
| []
| []
| []
| Apache License 2.0 | 2,388 | [
"dwave/cloud/cli.py",
"dwave/cloud/config.py"
]
| [
"dwave/cloud/cli.py",
"dwave/cloud/config.py"
]
|
|
dwavesystems__dwave-cloud-client-103 | a81beae09f37190af2ae487012b7abdc24e469cc | 2018-04-11 02:54:52 | 0314a6761ba389bb20ba48ef65476a286d1bf38c | diff --git a/dwave/cloud/client.py b/dwave/cloud/client.py
index a0390e0..62d9e91 100644
--- a/dwave/cloud/client.py
+++ b/dwave/cloud/client.py
@@ -152,31 +152,19 @@ class Client(object):
# try loading configuration from a preferred new config subsystem
# (`./dwave.conf`, `~/.config/dwave/dwave.conf`, etc)
- try:
- config = load_config(
- config_file=config_file, profile=profile, client=client,
- endpoint=endpoint, token=token, solver=solver, proxy=proxy)
- except ValueError:
- config = dict(
- endpoint=endpoint, token=token, solver=solver, proxy=proxy,
- client=client)
+ config = load_config(
+ config_file=config_file, profile=profile, client=client,
+ endpoint=endpoint, token=token, solver=solver, proxy=proxy)
- # and fallback to the legacy `.dwrc`
+ # fallback to legacy `.dwrc` if key variables missing
if legacy_config_fallback and (
- config.get('token') is None or config.get('endpoint') is None):
- try:
- _endpoint, _token, _proxy, _solver = legacy_load_config(
- key=profile,
- endpoint=endpoint, token=token, solver=solver, proxy=proxy)
- config = dict(
- endpoint=_endpoint, token=_token, solver=_solver, proxy=_proxy,
- client=client)
- except (ValueError, IOError):
- pass
+ not config.get('token') or not config.get('endpoint')):
+ config = legacy_load_config(
+ profile=profile, client=client,
+ endpoint=endpoint, token=token, solver=solver, proxy=proxy)
# manual override of other (client-custom) arguments
- for var, val in kwargs.items():
- config[var] = val
+ config.update(kwargs)
from dwave.cloud import qpu, sw
_clients = {'qpu': qpu.Client, 'sw': sw.Client}
diff --git a/dwave/cloud/config.py b/dwave/cloud/config.py
index 2a1883c..893d731 100644
--- a/dwave/cloud/config.py
+++ b/dwave/cloud/config.py
@@ -1,9 +1,13 @@
import os
import configparser
+from collections import OrderedDict
+
import homebase
+from dwave.cloud.utils import uniform_get
from dwave.cloud.exceptions import ConfigFileReadError, ConfigFileParseError
+
CONF_APP = "dwave"
CONF_AUTHOR = "dwavesystem"
CONF_FILENAME = "dwave.conf"
@@ -356,25 +360,28 @@ def load_config(config_file=None, profile=None, client=None,
return section
-def legacy_load_config(key=None, endpoint=None, token=None, solver=None, proxy=None):
+def legacy_load_config(profile=None, endpoint=None, token=None, solver=None,
+ proxy=None, **kwargs):
"""Load the configured URLs and token for the SAPI server.
.. warning:: Included only for backward compatibility, please use
:func:`load_config` instead, or the client factory
:meth:`~dwave.cloud.client.Client.from_config`.
- First, this method tries to read from environment variables.
- If these are not set, it tries to load a configuration file from ``~/.dwrc``.
+ This method tries to load a configuration file from ``~/.dwrc``, select a
+ specified `profile` (or first if not specified), and then override
+ individual keys with the values read from environment variables, and finally
+ with values given explicitly through function arguments.
The environment variables searched are:
- ``DW_INTERNAL__HTTPLINK``
- ``DW_INTERNAL__TOKEN``
- - ``DW_INTERNAL__HTTPPROXY`` (optional)
- - ``DW_INTERNAL__SOLVER`` (optional)
+ - ``DW_INTERNAL__HTTPPROXY``
+ - ``DW_INTERNAL__SOLVER``
The configuration file format is a modified CSV where the first comma is
- replaced with a bar character ``|``. Each line encodes a single connection.
+ replaced with a bar character ``|``. Each line encodes a single profile.
The columns are::
@@ -383,7 +390,7 @@ def legacy_load_config(key=None, endpoint=None, token=None, solver=None, proxy=N
Everything after the ``authentication_token`` is optional.
When there are multiple connections in a file, the first one is taken to be
- the default. Any commas in the urls are percent encoded.
+ the default. Any commas in the urls must be percent encoded.
Example:
@@ -406,7 +413,7 @@ def legacy_load_config(key=None, endpoint=None, token=None, solver=None, proxy=N
# Will try to connect with the url `https://two.com` and the token `new-token`.
Args:
- key (str):
+ profile (str):
The profile name in the legacy config file.
endpoint (str, default=None):
@@ -426,46 +433,56 @@ def legacy_load_config(key=None, endpoint=None, token=None, solver=None, proxy=N
connect directly to the API (unless you use a system-level proxy).
Returns:
- A tuple of SAPI info, as (url, token, proxy, default_solver_name)
+ A dictionary with keys: endpoint, token, solver, proxy.
"""
- # Try to load environment variables
- url = endpoint or os.environ.get('DW_INTERNAL__HTTPLINK')
- token = token or os.environ.get('DW_INTERNAL__TOKEN')
- proxy = proxy or os.environ.get('DW_INTERNAL__HTTPPROXY')
- solver = solver or os.environ.get('DW_INTERNAL__SOLVER')
-
- if url is not None and token is not None:
- return url, token, proxy, solver
-
- # Load the configuration file
- user_path = os.path.expanduser('~')
- file_path = os.path.join(user_path, '.dwrc')
-
- # Parse the config file
- try:
- with open(file_path, 'r') as handle:
- lines = handle.readlines()
- except (IOError, OSError):
- # Make sure python 2 and 3 raise the same error
- raise IOError("Could not load configuration from {}".format(file_path))
-
- # Clean whitespace and empty lines
- lines = [line.strip() for line in lines]
- lines = [line for line in lines if line != '']
-
- # Go through the connections and select entry matching the key
- for line in lines:
+
+ def _parse_config(fp, filename):
+ fields = ('endpoint', 'token', 'proxy', 'solver')
+ config = OrderedDict()
+ for line in fp:
+ # strip whitespace, skip blank and comment lines
+ line = line.strip()
+ if not line or line.startswith('#'):
+ continue
+ # parse each record, store in dict with label as key
+ try:
+ label, data = line.split('|', 1)
+ values = [v.strip() or None for v in data.split(',')]
+ config[label] = dict(zip(fields, values))
+ except:
+ raise ConfigFileParseError(
+ "Failed to parse {!r}, line {!r}".format(filename, line))
+ return config
+
+ def _read_config(filename):
try:
- label, data = line.split('|', 1)
- data = {index: value for index, value in enumerate(data.split(','))}
-
- if label == key or data[0] == key or key is None:
- return (endpoint or data[0] or None,
- token or data[1] or None,
- proxy or data.get(2),
- solver or data.get(3))
- except:
- pass # Just ignore any malformed lines
- # TODO issue a warning
-
- raise ValueError("No configuration for the client could be discovered.")
+ with open(filename, 'r') as f:
+ return _parse_config(f, filename)
+ except (IOError, OSError):
+ raise ConfigFileReadError("Failed to read {!r}".format(filename))
+
+ config = {}
+ filename = os.path.expanduser('~/.dwrc')
+ if os.path.exists(filename):
+ config = _read_config(filename)
+
+ # load profile if specified, or first one in file
+ if profile:
+ try:
+ section = config[profile]
+ except KeyError:
+ raise ValueError("Config profile {!r} not found".format(profile))
+ else:
+ try:
+ _, section = next(iter(config.items()))
+ except StopIteration:
+ section = {}
+
+ # override config variables (if any) with environment and then with arguments
+ section['endpoint'] = endpoint or os.getenv("DW_INTERNAL__HTTPLINK", section.get('endpoint'))
+ section['token'] = token or os.getenv("DW_INTERNAL__TOKEN", section.get('token'))
+ section['proxy'] = proxy or os.getenv("DW_INTERNAL__HTTPPROXY", section.get('proxy'))
+ section['solver'] = solver or os.getenv("DW_INTERNAL__SOLVER", section.get('solver'))
+ section.update(kwargs)
+
+ return section
| Fix legacy config load
- make it more robust (fix things like #19)
- follow the same logic the new config has:
- load from file, override with environment, override with arguments,
- instead of: if minimal set of required variables is present in environment (or via arguments), use them and skip the file load
- unfortunately, the format has to stay the same
- return the first *valid* profile from file, don't fail if the first is invalid | dwavesystems/dwave-cloud-client | diff --git a/tests/test_client.py b/tests/test_client.py
index 8012a42..0323ad1 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -12,16 +12,16 @@ try:
except ImportError:
import mock
-from dwave.cloud.config import legacy_load_config
+from dwave.cloud.config import load_config
from dwave.cloud.qpu import Client
from dwave.cloud.exceptions import SolverAuthenticationError
import dwave.cloud
try:
- config_url, config_token, _, config_solver = legacy_load_config()
- if None in [config_url, config_token, config_solver]:
- raise ValueError()
+ config = load_config()
+ if not config['endpoint'] or not config['token'] or not config['solver']:
+ raise ValueError
skip_live = False
except:
skip_live = True
@@ -34,20 +34,20 @@ class ConnectivityTests(unittest.TestCase):
def test_bad_url(self):
"""Connect with a bad URL."""
with self.assertRaises(IOError):
- with Client("not-a-url", config_token) as client:
+ with Client("not-a-url", config['token']) as client:
client.get_solvers()
@unittest.skipIf(skip_live, "No live server available.")
def test_bad_token(self):
"""Connect with a bad token."""
with self.assertRaises(SolverAuthenticationError):
- with Client(config_url, 'not-a-token') as client:
+ with Client(config['endpoint'], 'not-a-token') as client:
client.get_solvers()
@unittest.skipIf(skip_live, "No live server available.")
def test_good_connection(self):
"""Connect with a valid URL and token."""
- with Client(config_url, config_token) as client:
+ with Client(config['endpoint'], config['token']) as client:
self.assertTrue(len(client.get_solvers()) > 0)
@@ -57,28 +57,28 @@ class SolverLoading(unittest.TestCase):
@unittest.skipIf(skip_live, "No live server available.")
def test_list_all_solvers(self):
"""List all the solvers."""
- with Client(config_url, config_token) as client:
+ with Client(config['endpoint'], config['token']) as client:
self.assertTrue(len(client.get_solvers()) > 0)
@unittest.skipIf(skip_live, "No live server available.")
def test_load_all_solvers(self):
"""List and retrieve all the solvers."""
- with Client(config_url, config_token) as client:
+ with Client(config['endpoint'], config['token']) as client:
for name in client.get_solvers():
self.assertEqual(client.get_solver(name).id, name)
@unittest.skipIf(skip_live, "No live server available.")
def test_load_bad_solvers(self):
"""Try to load a nonexistent solver."""
- with Client(config_url, config_token) as client:
+ with Client(config['endpoint'], config['token']) as client:
with self.assertRaises(KeyError):
client.get_solver("not-a-solver")
@unittest.skipIf(skip_live, "No live server available.")
def test_load_any_solver(self):
"""Load a single solver without calling get_solvers (which caches data)."""
- with Client(config_url, config_token) as client:
- self.assertEqual(client.get_solver(config_solver).id, config_solver)
+ with Client(config['endpoint'], config['token']) as client:
+ self.assertEqual(client.get_solver(config['solver']).id, config['solver'])
class ClientFactory(unittest.TestCase):
@@ -110,7 +110,7 @@ class ClientFactory(unittest.TestCase):
endpoint='endpoint', token='token', custom='new-custom')
def test_legacy_config_load_fallback(self):
- conf = 'endpoint token proxy solver'.split()
+ conf = {k: k for k in 'endpoint token proxy solver'.split()}
with mock.patch("dwave.cloud.client.load_config", return_value={}):
with mock.patch("dwave.cloud.client.legacy_load_config", lambda **kwargs: conf):
# test fallback works (legacy config is loaded)
diff --git a/tests/test_mock_solver_loading.py b/tests/test_mock_solver_loading.py
index 2e8dc38..4baba26 100644
--- a/tests/test_mock_solver_loading.py
+++ b/tests/test_mock_solver_loading.py
@@ -12,8 +12,9 @@ except ImportError:
import mock
from dwave.cloud.qpu import Client, Solver
-from dwave.cloud.exceptions import InvalidAPIResponseError, ConfigFileReadError
-from dwave.cloud.config import legacy_load_config
+from dwave.cloud.exceptions import (
+ InvalidAPIResponseError, ConfigFileReadError, ConfigFileParseError)
+from dwave.cloud.config import legacy_load_config, load_config
from .test_config import iterable_mock_open, configparser_open_namespace
@@ -197,11 +198,22 @@ class GetEvent(Exception):
raise GetEvent(path)
-config_body = """
+legacy_config_body = """
prod|file-prod-url,file-prod-token
alpha|file-alpha-url,file-alpha-token,,alpha-solver
"""
+config_body = """
+[prod]
+endpoint = file-prod-url
+token = file-prod-token
+
+[alpha]
+endpoint = file-alpha-url
+token = file-alpha-token
+solver = alpha-solver
+"""
+
# patch the new config loading mechanism, to test only legacy config loading
@mock.patch("dwave.cloud.config.detect_existing_configfile_paths", lambda: [])
@@ -234,7 +246,7 @@ class MockConfiguration(unittest.TestCase):
def test_explicit_with_file(self):
"""With arguments and a config file, the config file should be ignored."""
- with mock.patch("dwave.cloud.config.open", mock.mock_open(read_data=config_body), create=True):
+ with mock.patch("dwave.cloud.config.open", iterable_mock_open(config_body), create=True):
with Client.from_config(endpoint='arg-url', token='arg-token') as client:
client.session.get = GetEvent.handle
try:
@@ -246,8 +258,8 @@ class MockConfiguration(unittest.TestCase):
def test_only_file(self):
"""With no arguments or environment variables, the default connection from the config file should be used."""
- with mock.patch("dwave.cloud.config.open", mock.mock_open(read_data=config_body), create=True):
- with Client.from_config() as client:
+ with mock.patch("dwave.cloud.config.open", iterable_mock_open(config_body), create=True):
+ with Client.from_config('config_file') as client:
client.session.get = GetEvent.handle
try:
client.get_solver('arg-solver')
@@ -258,10 +270,8 @@ class MockConfiguration(unittest.TestCase):
def test_only_file_key(self):
"""If give a name from the config file the proper URL should be loaded."""
- with mock.patch("dwave.cloud.config.open", mock.mock_open(read_data=config_body), create=True):
- with mock.patch(configparser_open_namespace, iterable_mock_open(config_body), create=True):
- # this will try parsing legacy format as new, fail,
- # then try parsing it as legacy config
+ with mock.patch("dwave.cloud.config.open", iterable_mock_open(config_body), create=True):
+ with mock.patch("dwave.cloud.config.detect_existing_configfile_paths", lambda *x: ['file']):
with Client.from_config(profile='alpha') as client:
client.session.get = GetEvent.handle
try:
@@ -273,9 +283,9 @@ class MockConfiguration(unittest.TestCase):
def test_env_with_file_set(self):
"""With environment variables and a config file, the config file should be ignored."""
- with mock.patch("dwave.cloud.config.open", mock.mock_open(read_data=config_body), create=True):
+ with mock.patch("dwave.cloud.config.open", iterable_mock_open(legacy_config_body), create=True):
with mock.patch.dict(os.environ, {'DW_INTERNAL__HTTPLINK': 'env-url', 'DW_INTERNAL__TOKEN': 'env-token'}):
- with Client.from_config() as client:
+ with Client.from_config(False) as client:
client.session.get = GetEvent.handle
try:
client.get_solver('arg-solver')
@@ -297,13 +307,8 @@ class MockConfiguration(unittest.TestCase):
self.fail()
def test_file_read_error(self):
- """On config file read error, we should fail with `IOError`."""
+ """On config file read error, we should fail with `ConfigFileReadError`,
+ but only if .dwrc actually exists on disk."""
with mock.patch("dwave.cloud.config.open", side_effect=OSError, create=True):
- self.assertRaises(IOError, legacy_load_config)
-
- def test_file_format_error(self):
- """Config parsing error should be suppressed."""
- with mock.patch("dwave.cloud.config.open", mock.mock_open(read_data="|\na|b,c"), create=True):
- self.assertEqual(legacy_load_config(key='a'), ('b', 'c', None, None))
- with mock.patch("dwave.cloud.config.open", mock.mock_open(read_data="|"), create=True):
- self.assertRaises(ValueError, legacy_load_config)
+ with mock.patch("os.path.exists", lambda fn: True):
+ self.assertRaises(ConfigFileReadError, legacy_load_config)
diff --git a/tests/test_solver.py b/tests/test_solver.py
index d993a2b..f6e74a0 100644
--- a/tests/test_solver.py
+++ b/tests/test_solver.py
@@ -12,16 +12,16 @@ import random
import numpy
from dwave.cloud.utils import evaluate_ising
-from dwave.cloud.config import legacy_load_config
+from dwave.cloud.config import load_config
from dwave.cloud.qpu import Client
from dwave.cloud.exceptions import CanceledFutureError
import dwave.cloud.computation
try:
- config_url, config_token, _, config_solver = legacy_load_config()
- if None in [config_url, config_token, config_solver]:
- raise ValueError()
+ config = load_config()
+ if not config['endpoint'] or not config['token'] or not config['solver']:
+ raise ValueError
skip_live = False
except:
skip_live = True
@@ -33,22 +33,22 @@ class PropertyLoading(unittest.TestCase):
@unittest.skipIf(skip_live, "No live server available.")
def test_load_properties(self):
"""Ensure that the propreties are populated."""
- with Client(config_url, config_token) as client:
- solver = client.get_solver(config_solver)
+ with Client(config['endpoint'], config['token']) as client:
+ solver = client.get_solver(config['solver'])
self.assertTrue(len(solver.properties) > 0)
@unittest.skipIf(skip_live, "No live server available.")
def test_load_parameters(self):
"""Make sure the parameters are populated."""
- with Client(config_url, config_token) as client:
- solver = client.get_solver(config_solver)
+ with Client(config['endpoint'], config['token']) as client:
+ solver = client.get_solver(config['solver'])
self.assertTrue(len(solver.parameters) > 0)
@unittest.skipIf(skip_live, "No live server available.")
def test_submit_invalid_parameter(self):
"""Ensure that the parameters are populated."""
- with Client(config_url, config_token) as client:
- solver = client.get_solver(config_solver)
+ with Client(config['endpoint'], config['token']) as client:
+ solver = client.get_solver(config['solver'])
assert 'not_a_parameter' not in solver.parameters
with self.assertRaises(KeyError):
solver.sample_ising({}, {}, not_a_parameter=True)
@@ -56,8 +56,8 @@ class PropertyLoading(unittest.TestCase):
@unittest.skipIf(skip_live, "No live server available.")
def test_read_connectivity(self):
"""Ensure that the edge set is populated."""
- with Client(config_url, config_token) as client:
- solver = client.get_solver(config_solver)
+ with Client(config['endpoint'], config['token']) as client:
+ solver = client.get_solver(config['solver'])
self.assertTrue(len(solver.edges) > 0)
@@ -80,8 +80,8 @@ class Submission(_QueryTest):
@unittest.skipIf(skip_live, "No live server available.")
def test_result_structure(self):
- with Client(config_url, config_token) as client:
- solver = client.get_solver(config_solver)
+ with Client(config['endpoint'], config['token']) as client:
+ solver = client.get_solver(config['solver'])
computation = solver.sample_ising({}, {})
result = computation.result()
self.assertIn('samples', result)
@@ -93,8 +93,8 @@ class Submission(_QueryTest):
def test_submit_extra_qubit(self):
"""Submit a defective problem with an unsupported variable."""
# Connect
- with Client(config_url, config_token) as client:
- solver = client.get_solver(config_solver)
+ with Client(config['endpoint'], config['token']) as client:
+ solver = client.get_solver(config['solver'])
# Build a linear problem
linear = [0] * (max(solver.nodes) + 1)
@@ -113,8 +113,8 @@ class Submission(_QueryTest):
def test_submit_linear_problem(self):
"""Submit a problem with all the linear terms populated."""
# Connect
- with Client(config_url, config_token) as client:
- solver = client.get_solver(config_solver)
+ with Client(config['endpoint'], config['token']) as client:
+ solver = client.get_solver(config['solver'])
# Build a linear problem
linear = [0] * (max(solver.nodes) + 1)
@@ -129,8 +129,8 @@ class Submission(_QueryTest):
def test_submit_full_problem(self):
"""Submit a problem with all supported coefficients set."""
# Connect
- with Client(config_url, config_token) as client:
- solver = client.get_solver(config_solver)
+ with Client(config['endpoint'], config['token']) as client:
+ solver = client.get_solver(config['solver'])
# Build a linear problem
linear = [0] * (max(solver.nodes) + 1)
@@ -147,8 +147,8 @@ class Submission(_QueryTest):
def test_submit_dict_problem(self):
"""Submit a problem using a dict for the linear terms."""
# Connect
- with Client(config_url, config_token) as client:
- solver = client.get_solver(config_solver)
+ with Client(config['endpoint'], config['token']) as client:
+ solver = client.get_solver(config['solver'])
# Build a problem
linear = {index: random.choice([-1, 1]) for index in solver.nodes}
@@ -161,8 +161,8 @@ class Submission(_QueryTest):
def test_submit_partial_problem(self):
"""Submit a problem with only some of the terms set."""
# Connect
- with Client(config_url, config_token) as client:
- solver = client.get_solver(config_solver)
+ with Client(config['endpoint'], config['token']) as client:
+ solver = client.get_solver(config['solver'])
# Build a linear problem
linear = [0] * (max(solver.nodes) + 1)
@@ -185,8 +185,8 @@ class Submission(_QueryTest):
def test_submit_batch(self):
"""Submit batch of problems."""
# Connect
- with Client(config_url, config_token) as client:
- solver = client.get_solver(config_solver)
+ with Client(config['endpoint'], config['token']) as client:
+ solver = client.get_solver(config['solver'])
result_list = []
for _ in range(100):
@@ -214,8 +214,8 @@ class Submission(_QueryTest):
def test_cancel_batch(self):
"""Submit batch of problems, then cancel them."""
# Connect
- with Client(config_url, config_token) as client:
- solver = client.get_solver(config_solver)
+ with Client(config['endpoint'], config['token']) as client:
+ solver = client.get_solver(config['solver'])
# Build a linear problem
linear = [0] * (max(solver.nodes) + 1)
@@ -248,8 +248,8 @@ class Submission(_QueryTest):
def test_wait_many(self):
"""Submit a batch of problems then use `wait_multiple` to wait on all of them."""
# Connect
- with Client(config_url, config_token) as client:
- solver = client.get_solver(config_solver)
+ with Client(config['endpoint'], config['token']) as client:
+ solver = client.get_solver(config['solver'])
# Build a linear problem
linear = [0] * (max(solver.nodes) + 1)
@@ -283,8 +283,8 @@ class Submission(_QueryTest):
all of them."""
# Connect
- with Client(config_url, config_token) as client:
- solver = client.get_solver(config_solver)
+ with Client(config['endpoint'], config['token']) as client:
+ solver = client.get_solver(config['solver'])
# Build a problem
linear = [0] * (max(solver.nodes) + 1)
@@ -325,9 +325,9 @@ class DecodingMethod(_QueryTest):
def test_request_matrix_with_no_numpy(self):
"""Submit a problem using a dict for the linear terms."""
# Connect
- with Client(config_url, config_token) as client:
+ with Client(config['endpoint'], config['token']) as client:
dwave.cloud.computation._numpy = False
- solver = client.get_solver(config_solver)
+ solver = client.get_solver(config['solver'])
solver.return_matrix = True
# Build a problem
@@ -342,9 +342,9 @@ class DecodingMethod(_QueryTest):
def test_request_matrix_with_numpy(self):
"""Submit a problem using a dict for the linear terms."""
# Connect
- with Client(config_url, config_token) as client:
+ with Client(config['endpoint'], config['token']) as client:
assert dwave.cloud.computation._numpy
- solver = client.get_solver(config_solver)
+ solver = client.get_solver(config['solver'])
solver.return_matrix = True
# Build a problem
@@ -361,9 +361,9 @@ class DecodingMethod(_QueryTest):
def test_request_list_with_no_numpy(self):
"""Submit a problem using a dict for the linear terms."""
# Connect
- with Client(config_url, config_token) as client:
+ with Client(config['endpoint'], config['token']) as client:
dwave.cloud.computation._numpy = False
- solver = client.get_solver(config_solver)
+ solver = client.get_solver(config['solver'])
solver.return_matrix = False
# Build a problem
@@ -377,9 +377,9 @@ class DecodingMethod(_QueryTest):
def test_request_list_with_numpy(self):
"""Submit a problem using a dict for the linear terms."""
# Connect
- with Client(config_url, config_token) as client:
+ with Client(config['endpoint'], config['token']) as client:
assert dwave.cloud.computation._numpy
- solver = client.get_solver(config_solver)
+ solver = client.get_solver(config['solver'])
solver.return_matrix = False
# Build a problem
@@ -393,9 +393,9 @@ class DecodingMethod(_QueryTest):
def test_request_raw_matrix_with_no_numpy(self):
"""Submit a problem using a dict for the linear terms."""
# Connect
- with Client(config_url, config_token) as client:
+ with Client(config['endpoint'], config['token']) as client:
dwave.cloud.computation._numpy = False
- solver = client.get_solver(config_solver)
+ solver = client.get_solver(config['solver'])
solver.return_matrix = True
# Build a problem
@@ -410,9 +410,9 @@ class DecodingMethod(_QueryTest):
def test_request_raw_matrix_with_numpy(self):
"""Submit a problem using a dict for the linear terms."""
# Connect
- with Client(config_url, config_token) as client:
+ with Client(config['endpoint'], config['token']) as client:
assert dwave.cloud.computation._numpy
- solver = client.get_solver(config_solver)
+ solver = client.get_solver(config['solver'])
solver.return_matrix = True
# Build a problem
@@ -429,9 +429,9 @@ class DecodingMethod(_QueryTest):
def test_request_raw_list_with_no_numpy(self):
"""Submit a problem using a dict for the linear terms."""
# Connect
- with Client(config_url, config_token) as client:
+ with Client(config['endpoint'], config['token']) as client:
dwave.cloud.computation._numpy = False
- solver = client.get_solver(config_solver)
+ solver = client.get_solver(config['solver'])
solver.return_matrix = False
# Build a problem
@@ -445,9 +445,9 @@ class DecodingMethod(_QueryTest):
def test_request_raw_list_with_numpy(self):
"""Submit a problem using a dict for the linear terms."""
# Connect
- with Client(config_url, config_token) as client:
+ with Client(config['endpoint'], config['token']) as client:
assert dwave.cloud.computation._numpy
- solver = client.get_solver(config_solver)
+ solver = client.get_solver(config['solver'])
solver.return_matrix = False
# Build a problem
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock",
"requests_mock",
"coverage",
"coveralls"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
coverage==6.2
coveralls==3.3.1
docopt==0.6.2
-e git+https://github.com/dwavesystems/dwave-cloud-client.git@a81beae09f37190af2ae487012b7abdc24e469cc#egg=dwave_cloud_client
homebase==1.0.1
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
mock==5.2.0
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pyreadline==2.1
pytest==7.0.1
requests==2.27.1
requests-mock==1.12.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: dwave-cloud-client
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- click==8.0.4
- coverage==6.2
- coveralls==3.3.1
- docopt==0.6.2
- homebase==1.0.1
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- mock==5.2.0
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pyreadline==2.1
- pytest==7.0.1
- requests==2.27.1
- requests-mock==1.12.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/dwave-cloud-client
| [
"tests/test_mock_solver_loading.py::MockConfiguration::test_file_read_error"
]
| []
| [
"tests/test_client.py::ClientFactory::test_custom_kwargs",
"tests/test_client.py::ClientFactory::test_custom_kwargs_overrides_config",
"tests/test_client.py::ClientFactory::test_default",
"tests/test_client.py::ClientFactory::test_legacy_config_load_fallback",
"tests/test_mock_solver_loading.py::MockConnectivityTests::test_bad_token",
"tests/test_mock_solver_loading.py::MockConnectivityTests::test_bad_url",
"tests/test_mock_solver_loading.py::MockConnectivityTests::test_good_connection",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_all_solvers",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_missing_solver",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_solver",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_solver_broken_response",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_solver_missing_data",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_solver_filtering_in_client",
"tests/test_mock_solver_loading.py::MockConfiguration::test_env_args_set",
"tests/test_mock_solver_loading.py::MockConfiguration::test_env_with_file_set",
"tests/test_mock_solver_loading.py::MockConfiguration::test_explicit_only",
"tests/test_mock_solver_loading.py::MockConfiguration::test_explicit_with_file",
"tests/test_mock_solver_loading.py::MockConfiguration::test_nonexisting_file",
"tests/test_mock_solver_loading.py::MockConfiguration::test_only_file",
"tests/test_mock_solver_loading.py::MockConfiguration::test_only_file_key"
]
| []
| Apache License 2.0 | 2,389 | [
"dwave/cloud/config.py",
"dwave/cloud/client.py"
]
| [
"dwave/cloud/config.py",
"dwave/cloud/client.py"
]
|
|
elastic__rally-466 | fab6a504616af4b0de6584652e96bd30a466c246 | 2018-04-11 09:13:26 | a5408e0d0d07b271b509df8057a7c73303604c10 | diff --git a/docs/install.rst b/docs/install.rst
index ef8af7e6..592ad278 100644
--- a/docs/install.rst
+++ b/docs/install.rst
@@ -69,7 +69,12 @@ We recommend that you use `Homebrew <https://brew.sh/>`_::
git
~~~
-``git 1.9`` or better is required. Verify with ``git --version``.
+Git is not required if **all** of the following conditions are met:
+
+* You are using Rally only as a load generator (``--pipeline=benchmark-only``) or you are referring to Elasticsearch configurations with ``--team-path``.
+* You create your own tracks and refer to them with ``--track-path``.
+
+In all other cases, Rally requires ``git 1.9`` or better. Verify with ``git --version``.
**Debian / Ubuntu**
@@ -139,16 +144,6 @@ Whenever you want to use Rally, run the activation script (step 2 above) first.
.. _install_offline-install:
-Kubernetes Job
---------------
-
-You can run Rally as a Kubernetes `Job <https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/>`_ via `this <https://github.com/gdmello/elasticsearch-rally>`_ `Helm Chart <https://helm.sh/>`_.
-
-Docker
-------
-
-You can run Rally as a docker container too. Follow the instructions `here <https://github.com/gdmello/elasticsearch-rally/tree/master/docker>`_.
-
Offline Install
---------------
diff --git a/esrally/config.py b/esrally/config.py
index 694520b6..7d6df330 100644
--- a/esrally/config.py
+++ b/esrally/config.py
@@ -106,6 +106,8 @@ def auto_load_local_config(base_config, additional_sections=None, config_file_cl
class Config:
+ EARLIEST_SUPPORTED_VERSION = 12
+
CURRENT_CONFIG_VERSION = 15
"""
@@ -586,6 +588,10 @@ class Prompter:
def migrate(config_file, current_version, target_version, out=print, i=input):
+ if current_version < Config.EARLIEST_SUPPORTED_VERSION:
+ raise ConfigError("The config file in {} is too old. Please delete it and reconfigure Rally from scratch with {} configure."
+ .format(config_file.location, PROGRAM_NAME))
+
prompter = Prompter(i=i, o=out, assume_defaults=False)
logger.info("Upgrading configuration from version [%s] to [%s]." % (current_version, target_version))
# Something is really fishy. We don't want to downgrade the configuration.
@@ -596,178 +602,6 @@ def migrate(config_file, current_version, target_version, out=print, i=input):
config_file.backup()
config = config_file.load(interpolation=None)
- if current_version == 0 and target_version > current_version:
- logger.info("Migrating config from version [0] to [1]")
- current_version = 1
- config["meta"] = {}
- config["meta"]["config.version"] = str(current_version)
- # in version 1 we changed some directories from being absolute to being relative
- config["system"]["log.root.dir"] = "logs"
- config["provisioning"]["local.install.dir"] = "install"
- config["reporting"]["report.base.dir"] = "reports"
- if current_version == 1 and target_version > current_version:
- logger.info("Migrating config from version [1] to [2]")
- current_version = 2
- config["meta"]["config.version"] = str(current_version)
- # no need to ask the user now if we are about to upgrade to version 4
- config["reporting"]["datastore.type"] = "in-memory"
- config["reporting"]["datastore.host"] = ""
- config["reporting"]["datastore.port"] = ""
- config["reporting"]["datastore.secure"] = ""
- config["reporting"]["datastore.user"] = ""
- config["reporting"]["datastore.password"] = ""
- config["system"]["env.name"] = "local"
- if current_version == 2 and target_version > current_version:
- logger.info("Migrating config from version [2] to [3]")
- current_version = 3
- config["meta"]["config.version"] = str(current_version)
- # Remove obsolete settings
- config["reporting"].pop("report.base.dir")
- config["reporting"].pop("output.html.report.filename")
- if current_version == 3 and target_version > current_version:
- root_dir = config["system"]["root.dir"]
- out(
- """
- *****************************************************************************************
-
- You have an old configuration of Rally. Rally has now a much simpler setup
- routine which will autodetect lots of settings for you and it also does not
- require you to setup a metrics store anymore.
-
- Rally will now migrate your configuration but if you don't need advanced features
- like a metrics store, then you should delete the configuration directory:
-
- rm -rf {0}
-
- and then rerun Rally's configuration routine:
-
- {1} configure
-
- Please also note you have {2:.1f} GB of data in your current benchmark directory at
-
- {3}
-
- You might want to clean up this directory also.
-
- For more details please see {4}
-
- *****************************************************************************************
-
- Pausing for 10 seconds to let you consider this message.
- """.format(config_file.config_dir,
- PROGRAM_NAME,
- convert.bytes_to_gb(io.get_size(root_dir)),
- root_dir,
- console.format.link("https://github.com/elastic/rally/blob/master/CHANGELOG.md#030")))
- time.sleep(10)
- logger.info("Migrating config from version [3] to [4]")
- current_version = 4
- config["meta"]["config.version"] = str(current_version)
- if len(config["reporting"]["datastore.host"]) > 0:
- config["reporting"]["datastore.type"] = "elasticsearch"
- else:
- config["reporting"]["datastore.type"] = "in-memory"
- # Remove obsolete settings
- config["build"].pop("maven.bin")
- config["benchmarks"].pop("metrics.stats.disk.device")
-
- if current_version == 4 and target_version > current_version:
- config["tracks"] = {}
- config["tracks"]["default.url"] = "https://github.com/elastic/rally-tracks"
- current_version = 5
- config["meta"]["config.version"] = str(current_version)
-
- if current_version == 5 and target_version > current_version:
- config["defaults"] = {}
- config["defaults"]["preserve_benchmark_candidate"] = str(False)
- current_version = 6
- config["meta"]["config.version"] = str(current_version)
-
- if current_version == 6 and target_version > current_version:
- # Remove obsolete settings
- config.pop("provisioning")
- config["system"].pop("log.root.dir")
- current_version = 7
- config["meta"]["config.version"] = str(current_version)
-
- if current_version == 7 and target_version > current_version:
- # move [system][root.dir] to [node][root.dir]
- if "node" not in config:
- config["node"] = {}
- config["node"]["root.dir"] = config["system"].pop("root.dir")
- # also move all references!
- for section in config:
- for k, v in config[section].items():
- config[section][k] = v.replace("${system:root.dir}", "${node:root.dir}")
- current_version = 8
- config["meta"]["config.version"] = str(current_version)
- if current_version == 8 and target_version > current_version:
- config["teams"] = {}
- config["teams"]["default.url"] = "https://github.com/elastic/rally-teams"
- current_version = 9
- config["meta"]["config.version"] = str(current_version)
- if current_version == 9 and target_version > current_version:
- config["distributions"] = {}
- config["distributions"]["release.1.url"] = "https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-" \
- "{{VERSION}}.tar.gz"
- config["distributions"]["release.2.url"] = "https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/" \
- "distribution/tar/elasticsearch/{{VERSION}}/elasticsearch-{{VERSION}}.tar.gz"
- config["distributions"]["release.url"] = "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz"
- config["distributions"]["release.cache"] = "true"
- current_version = 10
- config["meta"]["config.version"] = str(current_version)
- if current_version == 10 and target_version > current_version:
- config["runtime"]["java.home"] = config["runtime"].pop("java8.home")
- current_version = 11
- config["meta"]["config.version"] = str(current_version)
- if current_version == 11 and target_version > current_version:
- # As this is a rather complex migration, we log more than usual to understand potential migration problems better.
- if "source" in config:
- if "local.src.dir" in config["source"]:
- previous_root = config["source"].pop("local.src.dir")
- logger.info("Set [source][local.src.dir] to [%s]." % previous_root)
- # if this directory was Rally's default location, then move it on the file system to allow for checkouts of plugins
- # in the sibling directory.
- if previous_root == os.path.join(config["node"]["root.dir"], "src"):
- new_root_dir_all_sources = previous_root
- new_es_sub_dir = "elasticsearch"
- new_root = os.path.join(new_root_dir_all_sources, new_es_sub_dir)
- # only attempt to move if the directory exists. It may be possible that users never ran a source benchmark although they
- # have configured it. In that case the source directory will not yet exist.
- if io.exists(previous_root):
- logger.info("Previous source directory was at Rally's default location [%s]. Moving to [%s]."
- % (previous_root, new_root))
- try:
- # we need to do this in two steps as we need to move the sources to a subdirectory
- tmp_path = io.normalize_path(os.path.join(new_root_dir_all_sources, os.pardir, "tmp_src_mig"))
- os.rename(previous_root, tmp_path)
- io.ensure_dir(new_root)
- os.rename(tmp_path, new_root)
- except OSError:
- logger.exception("Could not move source directory from [%s] to [%s]." % (previous_root, new_root))
- # A warning is sufficient as Rally should just do a fresh checkout if moving did not work.
- console.warn("Elasticsearch source directory could not be moved from [%s] to [%s]. Please check the logs."
- % (previous_root, new_root))
- else:
- logger.info("Source directory is configured at Rally's default location [%s] but does not exist yet."
- % previous_root)
- else:
- logger.info("Previous source directory was the custom directory [%s]." % previous_root)
- new_root_dir_all_sources = io.normalize_path(os.path.join(previous_root, os.path.pardir))
- # name of the elasticsearch project directory.
- new_es_sub_dir = io.basename(previous_root)
-
- logger.info("Setting [node][src.root.dir] to [%s]." % new_root_dir_all_sources)
- config["node"]["src.root.dir"] = new_root_dir_all_sources
- logger.info("Setting [source][elasticsearch.src.subdir] to [%s]" % new_es_sub_dir)
- config["source"]["elasticsearch.src.subdir"] = new_es_sub_dir
- else:
- logger.info("Key [local.src.dir] not found. Advancing without changes.")
- else:
- logger.info("No section named [source] found in config. Advancing without changes.")
- current_version = 12
- config["meta"]["config.version"] = str(current_version)
-
if current_version == 12 and target_version > current_version:
# the current configuration allows to benchmark from sources
if "build" in config and "gradle.bin" in config["build"]:
diff --git a/esrally/driver/runner.py b/esrally/driver/runner.py
index 49badf3e..4ba93457 100644
--- a/esrally/driver/runner.py
+++ b/esrally/driver/runner.py
@@ -523,7 +523,7 @@ class Query(Runner):
"weight": retrieved_pages,
"pages": retrieved_pages,
"hits": hits,
- "unit": "ops",
+ "unit": "pages",
"timed_out": timed_out,
"took": took
}
diff --git a/esrally/utils/git.py b/esrally/utils/git.py
index e5f885e4..b5d044e8 100644
--- a/esrally/utils/git.py
+++ b/esrally/utils/git.py
@@ -8,7 +8,8 @@ from esrally.utils import io, process
def probed(f):
def probe(src, *args, **kwargs):
# Probe for -C
- if not process.exit_status_as_bool(lambda: process.run_subprocess_with_logging("git -C %s --version" % src, level=logging.DEBUG)):
+ if not process.exit_status_as_bool(lambda: process.run_subprocess_with_logging(
+ "git -C {} --version".format(src), level=logging.DEBUG), quiet=True):
version = process.run_subprocess_with_output("git --version")
if version:
version = str(version).strip()
@@ -25,7 +26,7 @@ def is_working_copy(src):
:param src: A directory. May or may not exist.
:return: True iff the given directory is a git working copy.
"""
- return os.path.exists(src) and os.path.exists("%s/.git" % src)
+ return os.path.exists(src) and os.path.exists(os.path.join(src, ".git"))
def clone(src, remote):
diff --git a/esrally/utils/process.py b/esrally/utils/process.py
index 068c32d4..c927396e 100644
--- a/esrally/utils/process.py
+++ b/esrally/utils/process.py
@@ -28,17 +28,19 @@ def run_subprocess_with_output(command_line):
return lines
-def exit_status_as_bool(runnable):
+def exit_status_as_bool(runnable, quiet=False):
"""
:param runnable: A runnable returning an int as exit status assuming ``0`` is meaning success.
+ :param quiet: Suppress any output (default: False).
:return: True iff the runnable has terminated successfully.
"""
try:
return_code = runnable()
return return_code == 0 or return_code is None
except OSError:
- logger.exception("Could not execute command.")
+ if not quiet:
+ logger.exception("Could not execute command.")
return False
| Don't require git
As Rally allows recently to define tracks as simple files there is no hard requirement that git is installed (but obviously usage is restricted to Rally as load generator with custom tracks). | elastic/rally | diff --git a/tests/config_test.py b/tests/config_test.py
index a368b47e..43ba46d2 100644
--- a/tests/config_test.py
+++ b/tests/config_test.py
@@ -419,11 +419,7 @@ class ConfigFactoryTests(TestCase):
class ConfigMigrationTests(TestCase):
- # catch all test, migrations are checked in more detail in the other tests
- @mock.patch("esrally.utils.io.get_size")
- @mock.patch("esrally.time.sleep")
- def test_migrate_from_0_to_latest(self, sleep, get_size):
- get_size.return_value = 0
+ def test_does_not_migrate_outdated_config(self):
config_file = InMemoryConfigStore("test")
sample_config = {
"system": {
@@ -448,347 +444,45 @@ class ConfigMigrationTests(TestCase):
}
config_file.store(sample_config)
- config.migrate(config_file, 0, config.Config.CURRENT_CONFIG_VERSION, out=null_output)
-
- self.assertTrue(config_file.backup_created)
- self.assertEqual(str(config.Config.CURRENT_CONFIG_VERSION), config_file.config["meta"]["config.version"])
-
- def test_migrate_from_2_to_3(self):
- config_file = InMemoryConfigStore("test")
- sample_config = {
- "meta": {
- "config.version": 2
- },
- "system": {
- "root.dir": "in-memory"
- },
- "reporting": {
- "report.base.dir": "/tests/rally/reporting",
- "output.html.report.filename": "index.html"
- },
- }
-
- config_file.store(sample_config)
- config.migrate(config_file, 2, 3, out=null_output)
-
- self.assertTrue(config_file.backup_created)
- self.assertEqual("3", config_file.config["meta"]["config.version"])
- # Did not delete the section...
- self.assertTrue("reporting" in config_file.config)
- # ... but the key
- self.assertFalse("report.base.dir" in config_file.config["reporting"])
- self.assertFalse("output.html.report.filename" in config_file.config["reporting"])
+ with self.assertRaisesRegex(config.ConfigError, "The config file.*is too old. Please delete it and reconfigure Rally from scratch"):
+ config.migrate(config_file, config.Config.EARLIEST_SUPPORTED_VERSION - 1, config.Config.CURRENT_CONFIG_VERSION, out=null_output)
+ # catch all test, migrations are checked in more detail in the other tests
@mock.patch("esrally.utils.io.get_size")
@mock.patch("esrally.time.sleep")
- def test_migrate_from_3_to_4(self, sleep, get_size):
+ def test_migrate_from_earliest_supported_to_latest(self, sleep, get_size):
get_size.return_value = 0
config_file = InMemoryConfigStore("test")
sample_config = {
"meta": {
- "config.version": 3
+ "config.version": config.Config.EARLIEST_SUPPORTED_VERSION
},
"system": {
"root.dir": "in-memory"
},
- "reporting": {
- "datastore.host": ""
+ "provisioning": {
+
},
"build": {
"maven.bin": "/usr/local/mvn"
},
"benchmarks": {
"metrics.stats.disk.device": "/dev/hdd1"
- }
- }
-
- config_file.store(sample_config)
- config.migrate(config_file, 3, 4, out=null_output)
-
- self.assertTrue(config_file.backup_created)
- self.assertEqual("4", config_file.config["meta"]["config.version"])
- # Did not delete the section...
- self.assertTrue("build" in config_file.config)
- # ... but the key
- self.assertFalse("maven.bin" in config_file.config["build"])
- self.assertTrue("benchmarks" in config_file.config)
- self.assertFalse("metrics.stats.disk.device" in config_file.config["benchmarks"])
- self.assertEqual("in-memory", config_file.config["reporting"]["datastore.type"])
-
- def test_migrate_from_4_to_5(self):
- config_file = InMemoryConfigStore("test")
- sample_config = {
- "meta": {
- "config.version": 4
- }
- }
- config_file.store(sample_config)
- config.migrate(config_file, 4, 5, out=null_output)
-
- self.assertTrue(config_file.backup_created)
- self.assertEqual("5", config_file.config["meta"]["config.version"])
- self.assertTrue("tracks" in config_file.config)
- self.assertEqual("https://github.com/elastic/rally-tracks", config_file.config["tracks"]["default.url"])
-
- def test_migrate_from_5_to_6(self):
- config_file = InMemoryConfigStore("test")
- sample_config = {
- "meta": {
- "config.version": 5
- }
- }
- config_file.store(sample_config)
- config.migrate(config_file, 5, 6, out=null_output)
-
- self.assertTrue(config_file.backup_created)
- self.assertEqual("6", config_file.config["meta"]["config.version"])
- self.assertTrue("defaults" in config_file.config)
- self.assertEqual("False", config_file.config["defaults"]["preserve_benchmark_candidate"])
-
- def test_migrate_from_6_to_7(self):
- config_file = InMemoryConfigStore("test")
- sample_config = {
- "meta": {
- "config.version": 6
- },
- "system": {
- "log.root.dir": "logs"
- },
- "provisioning": {
- "local.install.dir": "install"
- },
- }
- config_file.store(sample_config)
- config.migrate(config_file, 6, 7, out=null_output)
-
- self.assertTrue(config_file.backup_created)
- self.assertEqual("7", config_file.config["meta"]["config.version"])
- self.assertTrue("provisioning" not in config_file.config)
- self.assertTrue("log.root.dir" not in config_file.config["system"])
-
- def test_migrate_from_7_to_8(self):
- config_file = InMemoryConfigStore("test")
- sample_config = {
- "meta": {
- "config.version": 7
- },
- "system": {
- "root.dir": "~/.rally/benchmarks",
- "environment.name": "local"
- },
- "benchmarks": {
- "local.dataset.cache": "${system:root.dir}/data",
- "some.other.cache": "/data"
- }
- }
- config_file.store(sample_config)
- config.migrate(config_file, 7, 8, out=null_output)
-
- self.assertTrue(config_file.backup_created)
- self.assertEqual("8", config_file.config["meta"]["config.version"])
- self.assertTrue("root.dir" not in config_file.config["system"])
- self.assertEqual("~/.rally/benchmarks", config_file.config["node"]["root.dir"])
- self.assertEqual("local", config_file.config["system"]["environment.name"])
- self.assertEqual("${node:root.dir}/data", config_file.config["benchmarks"]["local.dataset.cache"])
- self.assertEqual("/data", config_file.config["benchmarks"]["some.other.cache"])
-
- def test_migrate_from_8_to_9(self):
- config_file = InMemoryConfigStore("test")
- sample_config = {
- "meta": {
- "config.version": 8
- },
- "system": {
- "root.dir": "~/.rally/benchmarks",
- "environment.name": "local"
},
- "benchmarks": {
- "local.dataset.cache": "${system:root.dir}/data",
- "some.other.cache": "/data"
- }
- }
- config_file.store(sample_config)
- config.migrate(config_file, 8, 9, out=null_output)
-
- self.assertTrue(config_file.backup_created)
- self.assertEqual("9", config_file.config["meta"]["config.version"])
- self.assertTrue("teams" in config_file.config)
- self.assertEqual("https://github.com/elastic/rally-teams", config_file.config["teams"]["default.url"])
-
- def test_migrate_from_9_to_10(self):
- config_file = InMemoryConfigStore("test")
- sample_config = {
- "meta": {
- "config.version": 9
- },
- "system": {
- "root.dir": "~/.rally/benchmarks",
- "environment.name": "local"
- },
- "benchmarks": {
- "local.dataset.cache": "${system:root.dir}/data",
- "some.other.cache": "/data"
- }
- }
- config_file.store(sample_config)
- config.migrate(config_file, 9, 10, out=null_output)
-
- self.assertTrue(config_file.backup_created)
- self.assertEqual("10", config_file.config["meta"]["config.version"])
- self.assertTrue("distributions" in config_file.config)
- self.assertEqual("https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-{{VERSION}}.tar.gz",
- config_file.config["distributions"]["release.1.url"])
- self.assertEqual("https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/"
- "{{VERSION}}/elasticsearch-{{VERSION}}.tar.gz",
- config_file.config["distributions"]["release.2.url"])
- self.assertEqual("https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz",
- config_file.config["distributions"]["release.url"])
- self.assertEqual("true",
- config_file.config["distributions"]["release.cache"])
-
- def test_migrate_from_10_to_11(self):
- config_file = InMemoryConfigStore("test")
- sample_config = {
- "meta": {
- "config.version": 10
+ "reporting": {
+ "report.base.dir": "/tests/rally/reporting",
+ "output.html.report.filename": "index.html"
},
"runtime": {
"java8.home": "/opt/jdk/8",
}
}
- config_file.store(sample_config)
- config.migrate(config_file, 10, 11, out=null_output)
-
- self.assertTrue(config_file.backup_created)
- self.assertEqual("11", config_file.config["meta"]["config.version"])
- self.assertTrue("runtime" in config_file.config)
- self.assertFalse("java8.home" in config_file.config["runtime"])
- self.assertEqual("/opt/jdk/8", config_file.config["runtime"]["java.home"])
-
- @mock.patch("esrally.utils.io.exists")
- @mock.patch("os.rename")
- def test_migrate_from_11_to_12_with_default_src_config_repo_checked_out(self, path_rename, path_exists):
- path_exists.return_value = True
-
- config_file = InMemoryConfigStore("test")
- sample_config = {
- "meta": {
- "config.version": 11
- },
- "node": {
- "root.dir": io.normalize_path("~/.rally/benchmarks")
- },
- "source": {
- "local.src.dir": io.normalize_path("~/.rally/benchmarks/src")
- }
- }
- config_file.store(sample_config)
- config.migrate(config_file, 11, 12, out=null_output)
-
- self.assertTrue(config_file.backup_created)
- self.assertEqual("12", config_file.config["meta"]["config.version"])
- self.assertEqual(io.normalize_path("~/.rally/benchmarks/src"), config_file.config["node"]["src.root.dir"])
- self.assertEqual("elasticsearch", config_file.config["source"]["elasticsearch.src.subdir"])
-
- path_rename.assert_has_calls(
- [
- mock.call(io.normalize_path("~/.rally/benchmarks/src"), io.normalize_path("~/.rally/benchmarks/tmp_src_mig")),
- mock.call(io.normalize_path("~/.rally/benchmarks/tmp_src_mig"),
- io.normalize_path("~/.rally/benchmarks/src/elasticsearch")),
- ]
- )
-
- @mock.patch("esrally.utils.io.exists")
- @mock.patch("os.rename")
- def test_migrate_from_11_to_12_with_default_src_config_repo_not_checked_out(self, path_rename, path_exists):
- path_exists.return_value = False
-
- config_file = InMemoryConfigStore("test")
- sample_config = {
- "meta": {
- "config.version": 11
- },
- "node": {
- "root.dir": io.normalize_path("~/.rally/benchmarks")
- },
- "source": {
- "local.src.dir": io.normalize_path("~/.rally/benchmarks/src")
- }
- }
- config_file.store(sample_config)
- config.migrate(config_file, 11, 12, out=null_output)
-
- self.assertTrue(config_file.backup_created)
- self.assertEqual("12", config_file.config["meta"]["config.version"])
- self.assertEqual(io.normalize_path("~/.rally/benchmarks/src"), config_file.config["node"]["src.root.dir"])
- self.assertEqual("elasticsearch", config_file.config["source"]["elasticsearch.src.subdir"])
- # did all the migrations but nothing moved
- path_rename.assert_not_called()
-
- def test_migrate_from_11_to_12_without_src_config(self):
- config_file = InMemoryConfigStore("test")
- sample_config = {
- "meta": {
- "config.version": 11
- },
- "node": {
- "root.dir": "~/.rally/benchmarks"
- }
- }
- config_file.store(sample_config)
- config.migrate(config_file, 11, 12, out=null_output)
-
- self.assertTrue(config_file.backup_created)
- self.assertEqual("12", config_file.config["meta"]["config.version"])
- self.assertFalse("src.root.dir" in config_file.config["node"])
-
- def test_migrate_from_11_to_12_with_partial_src_config(self):
- config_file = InMemoryConfigStore("test")
- sample_config = {
- "meta": {
- "config.version": 11
- },
- "node": {
- "root.dir": "~/.rally/benchmarks"
- },
- "source": {
- # a source config section without any keys should be treated like a missing source config section
- }
- }
- config_file.store(sample_config)
- config.migrate(config_file, 11, 12, out=null_output)
-
- self.assertTrue(config_file.backup_created)
- self.assertEqual("12", config_file.config["meta"]["config.version"])
- self.assertFalse("src.root.dir" in config_file.config["node"])
- self.assertFalse("elasticsearch.src.subdir" in config_file.config["source"])
- @mock.patch("esrally.utils.io.exists")
- @mock.patch("os.rename")
- def test_migrate_from_11_to_12_with_custom_src_config(self, path_rename, path_exists):
- path_exists.return_value = False
-
- config_file = InMemoryConfigStore("test")
- sample_config = {
- "meta": {
- "config.version": 11
- },
- "node": {
- "root.dir": io.normalize_path("~/.rally/benchmarks")
- },
- "source": {
- "local.src.dir": io.normalize_path("~/Projects/elasticsearch/master/es")
- }
- }
config_file.store(sample_config)
- config.migrate(config_file, 11, 12, out=null_output)
+ config.migrate(config_file, config.Config.EARLIEST_SUPPORTED_VERSION, config.Config.CURRENT_CONFIG_VERSION, out=null_output)
self.assertTrue(config_file.backup_created)
- self.assertEqual("12", config_file.config["meta"]["config.version"])
- self.assertEqual(io.normalize_path("~/Projects/elasticsearch/master"), config_file.config["node"]["src.root.dir"])
- self.assertEqual("es", config_file.config["source"]["elasticsearch.src.subdir"])
- # did all the migrations but nothing moved
- path_rename.assert_not_called()
+ self.assertEqual(str(config.Config.CURRENT_CONFIG_VERSION), config_file.config["meta"]["config.version"])
def test_migrate_from_12_to_13_without_gradle(self):
config_file = InMemoryConfigStore("test")
diff --git a/tests/driver/runner_test.py b/tests/driver/runner_test.py
index 55fc092c..8ad55c61 100644
--- a/tests/driver/runner_test.py
+++ b/tests/driver/runner_test.py
@@ -644,7 +644,7 @@ class QueryRunnerTests(TestCase):
self.assertEqual(1, results["pages"])
self.assertEqual(2, results["hits"])
self.assertEqual(4, results["took"])
- self.assertEqual("ops", results["unit"])
+ self.assertEqual("pages", results["unit"])
self.assertFalse(results["timed_out"])
self.assertFalse("error-type" in results)
@@ -692,7 +692,7 @@ class QueryRunnerTests(TestCase):
self.assertEqual(1, results["pages"])
self.assertEqual(2, results["hits"])
self.assertEqual(4, results["took"])
- self.assertEqual("ops", results["unit"])
+ self.assertEqual("pages", results["unit"])
self.assertFalse(results["timed_out"])
self.assertFalse("error-type" in results)
@@ -756,7 +756,7 @@ class QueryRunnerTests(TestCase):
self.assertEqual(2, results["pages"])
self.assertEqual(3, results["hits"])
self.assertEqual(79, results["took"])
- self.assertEqual("ops", results["unit"])
+ self.assertEqual("pages", results["unit"])
self.assertTrue(results["timed_out"])
self.assertFalse("error-type" in results)
@@ -812,7 +812,7 @@ class QueryRunnerTests(TestCase):
self.assertEqual(2, results["weight"])
self.assertEqual(2, results["pages"])
self.assertEqual(1, results["hits"])
- self.assertEqual("ops", results["unit"])
+ self.assertEqual("pages", results["unit"])
self.assertEqual(55, results["took"])
self.assertFalse("error-type" in results)
@@ -878,7 +878,7 @@ class QueryRunnerTests(TestCase):
self.assertEqual(2, results["pages"])
self.assertEqual(4, results["hits"])
self.assertEqual(900, results["took"])
- self.assertEqual("ops", results["unit"])
+ self.assertEqual("pages", results["unit"])
self.assertFalse(results["timed_out"])
self.assertFalse("error-type" in results)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 5
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip3 install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-benchmark"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc python3-dev"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
elasticsearch==6.2.0
-e git+https://github.com/elastic/rally.git@fab6a504616af4b0de6584652e96bd30a466c246#egg=esrally
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==2.9.5
jsonschema==2.5.1
MarkupSafe==2.0.1
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==5.4.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
py-cpuinfo==3.2.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-benchmark==3.4.1
tabulate==0.8.1
thespian==3.9.2
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.22
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: rally
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- elasticsearch==6.2.0
- jinja2==2.9.5
- jsonschema==2.5.1
- markupsafe==2.0.1
- psutil==5.4.0
- py-cpuinfo==3.2.0
- pytest-benchmark==3.4.1
- tabulate==0.8.1
- thespian==3.9.2
- urllib3==1.22
prefix: /opt/conda/envs/rally
| [
"tests/config_test.py::ConfigMigrationTests::test_does_not_migrate_outdated_config",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_earliest_supported_to_latest",
"tests/driver/runner_test.py::QueryRunnerTests::test_scroll_query_early_termination",
"tests/driver/runner_test.py::QueryRunnerTests::test_scroll_query_only_one_page",
"tests/driver/runner_test.py::QueryRunnerTests::test_scroll_query_only_one_page_only_request_body_defined",
"tests/driver/runner_test.py::QueryRunnerTests::test_scroll_query_request_all_pages",
"tests/driver/runner_test.py::QueryRunnerTests::test_scroll_query_with_explicit_number_of_pages"
]
| []
| [
"tests/config_test.py::ConfigTests::test_add_all_in_section",
"tests/config_test.py::ConfigTests::test_load_all_opts_in_section",
"tests/config_test.py::ConfigTests::test_load_existing_config",
"tests/config_test.py::ConfigTests::test_load_non_existing_config",
"tests/config_test.py::AutoLoadConfigTests::test_can_create_non_existing_config",
"tests/config_test.py::AutoLoadConfigTests::test_can_load_and_amend_existing_config",
"tests/config_test.py::AutoLoadConfigTests::test_can_migrate_outdated_config",
"tests/config_test.py::ConfigFactoryTests::test_create_advanced_config",
"tests/config_test.py::ConfigFactoryTests::test_create_simple_config",
"tests/config_test.py::ConfigFactoryTests::test_create_simple_config_no_java_detected",
"tests/config_test.py::ConfigFactoryTests::test_create_simple_config_no_java_installed",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_12_to_13_with_gradle_and_jdk8_ask_user_and_skip",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_12_to_13_with_gradle_and_jdk8_ask_user_enter_valid",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_12_to_13_with_gradle_and_jdk8_autodetect_jdk9",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_12_to_13_with_gradle_and_jdk9",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_12_to_13_without_gradle",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_13_to_14_with_gradle_and_jdk10",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_13_to_14_with_gradle_and_jdk8_ask_user_and_skip",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_13_to_14_with_gradle_and_jdk8_ask_user_enter_valid",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_13_to_14_with_gradle_and_jdk8_autodetect_jdk10",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_13_to_14_without_gradle",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_14_to_15_with_gradle",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_14_to_15_with_source_plugin_definition",
"tests/config_test.py::ConfigMigrationTests::test_migrate_from_14_to_15_without_gradle",
"tests/driver/runner_test.py::RegisterRunnerTests::test_runner_class_should_be_wrapped",
"tests/driver/runner_test.py::RegisterRunnerTests::test_runner_class_with_context_manager_should_be_registered_as_is",
"tests/driver/runner_test.py::RegisterRunnerTests::test_runner_function_should_be_wrapped",
"tests/driver/runner_test.py::BulkIndexRunnerTests::test_bulk_index_error",
"tests/driver/runner_test.py::BulkIndexRunnerTests::test_bulk_index_missing_params",
"tests/driver/runner_test.py::BulkIndexRunnerTests::test_bulk_index_success_with_metadata",
"tests/driver/runner_test.py::BulkIndexRunnerTests::test_bulk_index_success_without_metadata",
"tests/driver/runner_test.py::BulkIndexRunnerTests::test_mixed_bulk_with_detailed_stats",
"tests/driver/runner_test.py::BulkIndexRunnerTests::test_mixed_bulk_with_simple_stats",
"tests/driver/runner_test.py::QueryRunnerTests::test_query_match_all",
"tests/driver/runner_test.py::QueryRunnerTests::test_query_match_only_request_body_defined",
"tests/driver/runner_test.py::PutPipelineRunnerTests::test_create_pipeline",
"tests/driver/runner_test.py::PutPipelineRunnerTests::test_param_body_mandatory",
"tests/driver/runner_test.py::ClusterHealthRunnerTests::test_accepts_better_cluster_status",
"tests/driver/runner_test.py::ClusterHealthRunnerTests::test_rejects_relocating_shards",
"tests/driver/runner_test.py::ClusterHealthRunnerTests::test_rejects_unknown_cluster_status",
"tests/driver/runner_test.py::ClusterHealthRunnerTests::test_waits_for_expected_cluster_status",
"tests/driver/runner_test.py::CreateIndexRunnerTests::test_creates_multiple_indices",
"tests/driver/runner_test.py::CreateIndexRunnerTests::test_param_indices_mandatory",
"tests/driver/runner_test.py::DeleteIndexRunnerTests::test_deletes_all_indices",
"tests/driver/runner_test.py::DeleteIndexRunnerTests::test_deletes_existing_indices",
"tests/driver/runner_test.py::CreateIndexTemplateRunnerTests::test_create_index_templates",
"tests/driver/runner_test.py::CreateIndexTemplateRunnerTests::test_param_templates_mandatory",
"tests/driver/runner_test.py::DeleteIndexTemplateRunnerTests::test_deletes_all_index_templates",
"tests/driver/runner_test.py::DeleteIndexTemplateRunnerTests::test_deletes_only_existing_index_templates",
"tests/driver/runner_test.py::DeleteIndexTemplateRunnerTests::test_param_templates_mandatory",
"tests/driver/runner_test.py::RawRequestRunnerTests::test_issue_create_index",
"tests/driver/runner_test.py::RawRequestRunnerTests::test_issue_delete_index",
"tests/driver/runner_test.py::RawRequestRunnerTests::test_issue_msearch",
"tests/driver/runner_test.py::RawRequestRunnerTests::test_issue_request_with_defaults",
"tests/driver/runner_test.py::RetryTests::test_assumes_success_if_runner_returns_non_dict",
"tests/driver/runner_test.py::RetryTests::test_does_not_retry_on_application_error_if_not_wanted",
"tests/driver/runner_test.py::RetryTests::test_does_not_retry_on_timeout_if_not_wanted",
"tests/driver/runner_test.py::RetryTests::test_is_does_not_retry_on_success",
"tests/driver/runner_test.py::RetryTests::test_is_transparent_on_application_error_when_no_retries",
"tests/driver/runner_test.py::RetryTests::test_is_transparent_on_exception_when_no_retries",
"tests/driver/runner_test.py::RetryTests::test_is_transparent_on_success_when_no_retries",
"tests/driver/runner_test.py::RetryTests::test_retries_mixed_timeout_and_application_errors",
"tests/driver/runner_test.py::RetryTests::test_retries_on_application_error_if_wanted",
"tests/driver/runner_test.py::RetryTests::test_retries_on_timeout_if_wanted_and_raises_if_no_recovery",
"tests/driver/runner_test.py::RetryTests::test_retries_on_timeout_if_wanted_and_returns_first_call"
]
| []
| Apache License 2.0 | 2,390 | [
"docs/install.rst",
"esrally/utils/git.py",
"esrally/utils/process.py",
"esrally/config.py",
"esrally/driver/runner.py"
]
| [
"docs/install.rst",
"esrally/utils/git.py",
"esrally/utils/process.py",
"esrally/config.py",
"esrally/driver/runner.py"
]
|
|
DOV-Vlaanderen__pydov-48 | f65f1848a17074280c5686eb7f106570bafe36fb | 2018-04-11 09:14:11 | f65f1848a17074280c5686eb7f106570bafe36fb | pjhaest: @Roel looks promising. You did not have any troubles with the filter attribute of owslib? This took ages about a year ago. | diff --git a/examples/boring_search.py b/examples/boring_search.py
index 09349e9..86053d8 100644
--- a/examples/boring_search.py
+++ b/examples/boring_search.py
@@ -104,6 +104,22 @@ def get_boreholes_in_bounding_box():
print(df)
+def get_deep_boreholes_in_bounding_box():
+ """Get all details of the boreholes with a depth of at least 2000m
+ within the given bounding box."""
+ from pydov.search import BoringSearch
+ from owslib.fes import PropertyIsGreaterThanOrEqualTo
+
+ b = BoringSearch()
+ query = PropertyIsGreaterThanOrEqualTo(
+ propertyname='diepte_boring_tot', literal='2000')
+ df = b.search(
+ location=(200000, 211000, 205000, 214000),
+ query=query
+ )
+ print(df)
+
+
if __name__ == '__main__':
# Comment out to skip these examples:
get_description()
@@ -116,3 +132,4 @@ if __name__ == '__main__':
# get_deep_boreholes()
# get_groundwater_related_boreholes_in_antwerp()
# get_boreholes_in_bounding_box()
+ # get_deep_boreholes_in_bounding_box()
diff --git a/pydov/search.py b/pydov/search.py
index 9c2e2e4..4283c81 100644
--- a/pydov/search.py
+++ b/pydov/search.py
@@ -1,13 +1,13 @@
# -*- coding: utf-8 -*-
"""Module containing the search classes to retrieve DOV data."""
-import owslib
import pandas as pd
+
+import owslib
from owslib.etree import etree
from owslib.fes import (
FilterRequest,
)
from owslib.wfs import WebFeatureService
-
from pydov.types.boring import Boring
from pydov.util import owsutil
from pydov.util.errors import (
@@ -41,6 +41,7 @@ class AbstractSearch(object):
self._fields = None
self._wfs_fields = None
+ self._geometry_column = None
self._map_wfs_source_df = {}
self._map_df_wfs_source = {}
@@ -197,6 +198,7 @@ class AbstractSearch(object):
"""
fields = {}
self._wfs_fields = []
+ self._geometry_column = wfs_schema.get('geometry_column', None)
_map_wfs_datatypes = {
'int': 'integer',
@@ -251,7 +253,7 @@ class AbstractSearch(object):
Parameters
----------
- location : tuple<minx,maxx,miny,maxy>
+ location : tuple<minx,miny,maxx,maxy>
The bounding box limiting the features to retrieve.
query : owslib.fes.OgcExpression
OGC filter expression to use for searching. This can contain any
@@ -268,8 +270,6 @@ class AbstractSearch(object):
pydov.util.errors.InvalidSearchParameterError
When not one of `location` or `query` is provided.
- When both `location` and `query` are provided.
-
pydov.util.errors.InvalidFieldError
When at least one of the fields in `return_fields` is unknown.
@@ -285,11 +285,6 @@ class AbstractSearch(object):
'Provide either the location or the query parameter.'
)
- if location is not None and query is not None:
- raise InvalidSearchParameterError(
- 'Provide either the location or the query parameter, not both.'
- )
-
if query is not None:
if not isinstance(query, owslib.fes.OgcExpression):
raise InvalidSearchParameterError(
@@ -311,6 +306,9 @@ class AbstractSearch(object):
raise InvalidFieldError(
"Unknown query parameter: '%s'" % name)
+ if location is not None:
+ self._init_fields()
+
if return_fields is not None:
if type(return_fields) not in (list, tuple, set):
raise AttributeError('return_fields should be a list, '
@@ -332,19 +330,23 @@ class AbstractSearch(object):
"Field cannot be used as a return field: '%s'" % rf)
@staticmethod
- def _get_remote_wfs_feature(wfs, typename, bbox, filter, propertyname):
- """Perform the OWSLib call to get features from the remote service.
+ def _get_remote_wfs_feature(wfs, typename, bbox, filter, propertyname,
+ geometry_column):
+ """Perform the WFS GetFeature call to get features from the remote
+ service.
Parameters
----------
typename : str
Layername to query.
- bbox : tuple<minx,maxx,miny,maxy>
+ bbox : tuple<minx,miny,maxx,maxy>
The bounding box limiting the features to retrieve.
filter : owslib.fes.FilterRequest
Filter request to search on attribute values.
propertyname : list<str>
List of properties to return.
+ geometry_column : str
+ Name of the geometry column to use in the spatial filter.
Returns
-------
@@ -352,18 +354,26 @@ class AbstractSearch(object):
Response of the WFS service.
"""
- return wfs.getfeature(
+ wfs_getfeature_xml = owsutil.wfs_build_getfeature_request(
+ version=wfs.version,
+ geometry_column=geometry_column,
typename=typename,
bbox=bbox,
filter=filter,
- propertyname=propertyname).read().encode('utf-8')
+ propertyname=propertyname
+ )
+
+ return owsutil.wfs_get_feature(
+ baseurl=wfs.url,
+ get_feature_request=wfs_getfeature_xml
+ )
def _search(self, location=None, query=None, return_fields=None):
"""Perform the WFS search by issuing a GetFeature request.
Parameters
----------
- location : tuple<minx,maxx,miny,maxy>
+ location : tuple<minx,miny,maxx,maxy>
The bounding box limiting the features to retrieve.
query : owslib.fes.OgcExpression
OGC filter expression to use for searching. This can contain any
@@ -386,8 +396,6 @@ class AbstractSearch(object):
pydov.util.errors.InvalidSearchParameterError
When not one of `location` or `query` is provided.
- When both `location` and `query` are provided.
-
pydov.util.errors.InvalidFieldError
When at least one of the fields in `return_fields` is unknown.
@@ -436,11 +444,13 @@ class AbstractSearch(object):
if i in return_fields])
wfs_property_names = list(set(wfs_property_names))
- fts = self._get_remote_wfs_feature(wfs=self.__wfs,
- typename=self._layer,
- bbox=location,
- filter=filter_request,
- propertyname=wfs_property_names)
+ fts = self._get_remote_wfs_feature(
+ wfs=self.__wfs,
+ typename=self._layer,
+ bbox=location,
+ filter=filter_request,
+ propertyname=wfs_property_names,
+ geometry_column=self._geometry_column)
tree = etree.fromstring(fts)
@@ -542,7 +552,7 @@ class BoringSearch(AbstractSearch):
Parameters
----------
- location : tuple<minx,maxx,miny,maxy>
+ location : tuple<minx,miny,maxx,maxy>
The bounding box limiting the features to retrieve.
query : owslib.fes.OgcExpression
OGC filter expression to use for searching. This can contain any
@@ -564,8 +574,6 @@ class BoringSearch(AbstractSearch):
pydov.util.errors.InvalidSearchParameterError
When not one of `location` or `query` is provided.
- When both `location` and `query` are provided.
-
pydov.util.errors.InvalidFieldError
When at least one of the fields in `return_fields` is unknown.
diff --git a/pydov/util/owsutil.py b/pydov/util/owsutil.py
index 29df850..e2df59a 100644
--- a/pydov/util/owsutil.py
+++ b/pydov/util/owsutil.py
@@ -1,10 +1,12 @@
# -*- coding: utf-8 -*-
"""Module grouping utility functions for OWS services."""
+import requests
+
from owslib.feature.schema import (
_get_describefeaturetype_url,
_get_elements,
- _construct_schema,
XS_NAMESPACE,
+ GML_NAMESPACES
)
try:
@@ -151,6 +153,7 @@ def get_csw_base_url(contentmetadata):
------
pydov.util.errors.MetadataNotFoundError
If the `contentmetadata` has no valid metadata URL associated with it.
+
"""
md_url = None
for md in contentmetadata.metadataUrls:
@@ -326,6 +329,72 @@ def get_namespace(wfs, layer):
return namespace
+def _construct_schema(elements, nsmap):
+ """Copy the owslib.feature.schema.get_schema method to be able to get
+ the geometry column name.
+
+ Parameters
+ ----------
+ elements : list<Element>
+ List of elements
+ nsmap : dict
+ Namespace map
+
+ Returns
+ -------
+ dict
+ Schema
+
+ """
+ schema = {
+ 'properties': {},
+ 'geometry': None
+ }
+
+ schema_key = None
+ gml_key = None
+
+ # if nsmap is defined, use it
+ if nsmap:
+ for key in nsmap:
+ if nsmap[key] == XS_NAMESPACE:
+ schema_key = key
+ if nsmap[key] in GML_NAMESPACES:
+ gml_key = key
+ # if no nsmap is defined, we have to guess
+ else:
+ gml_key = 'gml'
+ schema_key = 'xsd'
+
+ mappings = {
+ 'PointPropertyType': 'Point',
+ 'PolygonPropertyType': 'Polygon',
+ 'LineStringPropertyType': 'LineString',
+ 'MultiPointPropertyType': 'MultiPoint',
+ 'MultiLineStringPropertyType': 'MultiLineString',
+ 'MultiPolygonPropertyType': 'MultiPolygon',
+ 'MultiGeometryPropertyType': 'MultiGeometry',
+ 'GeometryPropertyType': 'GeometryCollection',
+ 'SurfacePropertyType': '3D Polygon',
+ 'MultiSurfacePropertyType': '3D MultiPolygon'
+ }
+
+ for element in elements:
+ data_type = element.attrib['type'].replace(gml_key + ':', '')
+ name = element.attrib['name']
+
+ if data_type in mappings:
+ schema['geometry'] = mappings[data_type]
+ schema['geometry_column'] = name
+ else:
+ schema['properties'][name] = data_type.replace(schema_key+':', '')
+
+ if schema['properties'] or schema['geometry']:
+ return schema
+ else:
+ return None
+
+
def get_remote_schema(url, typename, version='1.0.0'):
"""Copy the owslib.feature.schema.get_schema method to be able to
monkeypatch the openURL request in tests.
@@ -359,3 +428,119 @@ def get_remote_schema(url, typename, version='1.0.0'):
if hasattr(root, 'nsmap'):
nsmap = root.nsmap
return _construct_schema(elements, nsmap)
+
+
+def wfs_build_getfeature_request(typename, geometry_column=None, bbox=None,
+ filter=None, propertyname=None,
+ version='1.1.0'):
+ """Build a WFS GetFeature request in XML to be used as payload in a WFS
+ GetFeature request using POST.
+
+ Parameters
+ ----------
+ typename : str
+ Typename to query.
+ geometry_column : str, optional
+ Name of the geometry column to use in the spatial filter.
+ Required if the ``bbox`` parameter is supplied.
+ bbox : tuple<minx,miny,maxx,maxy>, optional
+ The bounding box limiting the features to retrieve.
+ Requires ``geometry_column`` to be supplied as well.
+ filter : owslib.fes.FilterRequest, optional
+ Filter request to search on attribute values.
+ propertyname : list<str>, optional
+ List of properties to return. Defaults to all properties.
+ version : str, optional
+ WFS version to use. Defaults to 1.1.0
+
+ Raises
+ ------
+ AttributeError
+ If ``bbox`` is given without ``geometry_column``.
+
+ Returns
+ -------
+ element : etree.Element
+ XML element representing the WFS GetFeature request.
+
+ """
+ if bbox is not None and geometry_column is None:
+ raise AttributeError('bbox requires geometry_column and it is None')
+
+ xml = etree.Element('{http://www.opengis.net/wfs}GetFeature')
+ xml.set('service', 'WFS')
+ xml.set('version', version)
+
+ xml.set('{http://www.w3.org/2001/XMLSchema-instance}schemaLocation',
+ 'http://www.opengis.net/wfs '
+ 'http://schemas.opengis.net/wfs/%s/wfs.xsd' % version)
+
+ query = etree.Element('{http://www.opengis.net/wfs}Query')
+ query.set('typeName', typename)
+
+ if propertyname and len(propertyname) > 0:
+ for property in propertyname:
+ propertyname_xml = etree.Element(
+ '{http://www.opengis.net/wfs}PropertyName')
+ propertyname_xml.text = property
+ query.append(propertyname_xml)
+
+ filter_xml = etree.Element('{http://www.opengis.net/ogc}Filter')
+ filter_parent = filter_xml
+
+ if filter is not None and bbox is not None:
+ # if both filter and bbox are specified, we wrap them inside an
+ # ogc:And
+ and_xml = etree.Element('{http://www.opengis.net/ogc}And')
+ filter_xml.append(and_xml)
+ filter_parent = and_xml
+
+ if filter is not None:
+ filterrequest = etree.fromstring(filter)
+ filter_parent.append(filterrequest[0])
+
+ if bbox is not None:
+ within = etree.Element('{http://www.opengis.net/ogc}Within')
+ geom = etree.Element('{http://www.opengis.net/ogc}PropertyName')
+ geom.text = geometry_column
+ within.append(geom)
+
+ envelope = etree.Element('{http://www.opengis.net/gml}Envelope')
+ envelope.set('srsDimension', '2')
+ envelope.set('srsName',
+ 'http://www.opengis.net/gml/srs/epsg.xml#31370')
+
+ lower_corner = etree.Element('{http://www.opengis.net/gml}lowerCorner')
+ lower_corner.text = '%0.3f %0.3f' % (bbox[0], bbox[1])
+ envelope.append(lower_corner)
+
+ upper_corner = etree.Element('{http://www.opengis.net/gml}upperCorner')
+ upper_corner.text = '%0.3f %0.3f' % (bbox[2], bbox[3])
+ envelope.append(upper_corner)
+ within.append(envelope)
+ filter_parent.append(within)
+
+ query.append(filter_xml)
+ xml.append(query)
+ return xml
+
+
+def wfs_get_feature(baseurl, get_feature_request):
+ """Perform a WFS request using POST.
+
+ Parameters
+ ----------
+ baseurl : str
+ Base URL of the WFS service.
+ get_feature_request : etree.Element
+ XML element representing the WFS GetFeature request.
+
+ Returns
+ -------
+ bytes
+ Response of the WFS service.
+
+ """
+ data = etree.tostring(get_feature_request)
+ request = requests.post(baseurl, data)
+ return request.text.encode('utf8')
diff --git a/requirements.txt b/requirements.txt
index 41672b2..6e0b593 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,3 +2,4 @@ owslib
xmltodict
pandas
numpy
+requests
| Combine WFS attribute and location on search | DOV-Vlaanderen/pydov | diff --git a/tests/test_search.py b/tests/test_search.py
index dfd22fd..0f9d6ce 100644
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -44,11 +44,11 @@ def mp_remote_wfs_feature(monkeypatch):
if sys.version_info[0] < 3:
monkeypatch.setattr(
- 'pydov.search.AbstractSearch._get_remote_wfs_feature',
+ 'pydov.util.owsutil.wfs_get_feature',
__get_remote_wfs_feature)
else:
monkeypatch.setattr(
- 'pydov.search.AbstractSearch._get_remote_wfs_feature',
+ 'pydov.util.owsutil.wfs_get_feature',
__get_remote_wfs_feature)
@@ -175,23 +175,32 @@ class TestBoringSearch(object):
with pytest.raises(InvalidSearchParameterError):
boringsearch.search(location=None, query=None)
- def test_search_both_location_query(self, boringsearch):
+ def test_search_both_location_query(self, mp_remote_describefeaturetype,
+ mp_remote_wfs_feature, boringsearch):
"""Test the search method providing both a location and a query.
- Test whether an InvalidSearchParameterError is raised.
+ Test whether a dataframe is returned.
Parameters
----------
+ mp_remote_describefeaturetype : pytest.fixture
+ Monkeypatch the call to a remote DescribeFeatureType of the
+ dov-pub:Boringen layer.
+ mp_remote_wfs_feature : pytest.fixture
+ Monkeypatch the call to get WFS features.
boringsearch : pytest.fixture returning pydov.search.BoringSearch
An instance of BoringSearch to perform search operations on the DOV
type 'Boring'.
"""
- with pytest.raises(InvalidSearchParameterError):
- query = PropertyIsEqualTo(propertyname='gemeente',
- literal='Blankenberge')
- boringsearch.search(location=(1, 2, 3, 4),
- query=query)
+ query = PropertyIsEqualTo(propertyname='gemeente',
+ literal='Blankenberge')
+
+ df = boringsearch.search(location=(1, 2, 3, 4),
+ query=query,
+ return_fields=('pkey_boring', 'boornummer'))
+
+ assert type(df) is DataFrame
def test_search_both_location_query_wrongquerytype(self, boringsearch):
"""Test the search method providing both a location and a query,
@@ -459,8 +468,9 @@ class TestBoringSearch(object):
with pytest.raises(InvalidFieldError):
boringsearch.search(query=query)
- def test_search_xmlresolving(self, mp_remote_wfs_feature, mp_boring_xml,
- boringsearch):
+ def test_search_xmlresolving(self, mp_remote_describefeaturetype,
+ mp_remote_wfs_feature, mp_boring_xml,
+ boringsearch):
"""Test the search method with return fields from XML but not from a
subtype.
@@ -468,6 +478,9 @@ class TestBoringSearch(object):
Parameters
----------
+ mp_remote_describefeaturetype : pytest.fixture
+ Monkeypatch the call to a remote DescribeFeatureType of the
+ dov-pub:Boringen layer.
mp_remote_wfs_feature : pytest.fixture
Monkeypatch the call to get WFS features.
mp_boring_xml : pytest.fixture
diff --git a/tests/test_util_owsutil.py b/tests/test_util_owsutil.py
index e23802f..f0b002e 100644
--- a/tests/test_util_owsutil.py
+++ b/tests/test_util_owsutil.py
@@ -1,15 +1,19 @@
"""Module grouping tests for the pydov.util.owsutil module."""
-
+import re
import sys
-import owslib
import pytest
from numpy.compat import unicode
+
+import owslib
from owslib.etree import etree
+from owslib.fes import (
+ PropertyIsEqualTo,
+ FilterRequest,
+)
from owslib.iso import MD_Metadata
from owslib.util import nspath_eval
from owslib.wfs import WebFeatureService
-
from pydov.util import owsutil
from pydov.util.errors import (
MetadataNotFoundError,
@@ -209,6 +213,36 @@ def mp_remote_describefeaturetype(monkeypatch):
__get_remote_describefeaturetype.__code__)
+def clean_xml(xml):
+ """Clean the given XML string of namespace definition, namespace
+ prefixes and syntactical but otherwise meaningless differences.
+
+ Parameters
+ ----------
+ xml : str
+ String representation of XML document.
+
+ Returns
+ -------
+ str
+ String representation of cleaned XML document.
+
+ """
+ # remove xmlns namespace definitions
+ r = re.sub(r'[ ]+xmlns:[^=]+="[^"]+"', '', xml)
+
+ # remove namespace prefixes in tags
+ r = re.sub(r'<(/?)[^:]+:([^ >]+)([ >])', r'<\1\2\3', r)
+
+ # remove extra spaces in tags
+ r = re.sub(r'[ ]+/>', '/>', r)
+
+ # remove extra spaces between tags
+ r = re.sub(r'>[ ]+<', '><', r)
+
+ return r
+
+
class TestOwsutil(object):
"""Class grouping tests for the pydov.util.owsutil module."""
@@ -419,3 +453,198 @@ class TestOwsutil(object):
contentmetadata.metadataUrls = []
with pytest.raises(MetadataNotFoundError):
owsutil.get_remote_metadata(contentmetadata)
+
+ def test_wfs_build_getfeature_request_onlytypename(self):
+ """Test the owsutil.wfs_build_getfeature_request method with only a
+ typename specified.
+
+ Test whether the XML of the WFS GetFeature call is generated correctly.
+
+ """
+ xml = owsutil.wfs_build_getfeature_request('dov-pub:Boringen')
+ assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
+ '<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" '
+ 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
+ 'service="WFS" version="1.1.0" '
+ 'xsi:schemaLocation="http://www.opengis.net/wfs '
+ 'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"><wfs:Query '
+ 'typeName="dov-pub:Boringen"><ogc:Filter '
+ 'xmlns:ogc="http://www.opengis.net/ogc"/></wfs:Query></wfs'
+ ':GetFeature>')
+
+ def test_wfs_build_getfeature_request_bbox_nogeometrycolumn(self):
+ """Test the owsutil.wfs_build_getfeature_request method with a bbox
+ argument but without the geometry_column argument.
+
+ Test whether an AttributeError is raised.
+
+ """
+ with pytest.raises(AttributeError):
+ xml = owsutil.wfs_build_getfeature_request(
+ 'dov-pub:Boringen', bbox=(151650, 214675, 151750, 214775))
+
+ def test_wfs_build_getfeature_request_bbox(self):
+ """Test the owsutil.wfs_build_getfeature_request method with a
+ typename, bbox and geometry_column.
+
+ Test whether the XML of the WFS GetFeature call is generated correctly.
+
+ """
+ xml = owsutil.wfs_build_getfeature_request(
+ 'dov-pub:Boringen', bbox=(151650, 214675, 151750, 214775),
+ geometry_column='geom')
+ assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
+ '<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" '
+ 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
+ 'service="WFS" version="1.1.0" '
+ 'xsi:schemaLocation="http://www.opengis.net/wfs '
+ 'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"><wfs:Query '
+ 'typeName="dov-pub:Boringen"><ogc:Filter '
+ 'xmlns:ogc="http://www.opengis.net/ogc"><ogc:Within> '
+ '<ogc:PropertyName>geom</ogc:PropertyName><gml:Envelope '
+ 'xmlns:gml="http://www.opengis.net/gml" srsDimension="2" '
+ 'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370"><gml'
+ ':lowerCorner>151650.000 '
+ '214675.000</gml:lowerCorner><gml:upperCorner>151750.000 '
+ '214775.000</gml:upperCorner></gml:Envelope></ogc:Within></ogc'
+ ':Filter></wfs:Query></wfs:GetFeature>')
+
+ def test_wfs_build_getfeature_request_propertyname(self):
+ """Test the owsutil.wfs_build_getfeature_request method with a list
+ of propertynames.
+
+ Test whether the XML of the WFS GetFeature call is generated correctly.
+
+ """
+ xml = owsutil.wfs_build_getfeature_request(
+ 'dov-pub:Boringen', propertyname=['fiche', 'diepte_tot_m'])
+ assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
+ '<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" '
+ 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
+ 'service="WFS" version="1.1.0" '
+ 'xsi:schemaLocation="http://www.opengis.net/wfs '
+ 'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"> <wfs:Query '
+ 'typeName="dov-pub:Boringen"> '
+ '<wfs:PropertyName>fiche</wfs:PropertyName> '
+ '<wfs:PropertyName>diepte_tot_m</wfs:PropertyName> <ogc:Filter/> '
+ '</wfs:Query> </wfs:GetFeature>')
+
+ def test_wfs_build_getfeature_request_filter(self):
+ """Test the owsutil.wfs_build_getfeature_request method with an
+ attribute filter.
+
+ Test whether the XML of the WFS GetFeature call is generated correctly.
+
+ """
+ query = PropertyIsEqualTo(propertyname='gemeente',
+ literal='Herstappe')
+ filter_request = FilterRequest()
+ filter_request = filter_request.setConstraint(query)
+ try:
+ filter_request = etree.tostring(filter_request,
+ encoding='unicode')
+ except LookupError:
+ # Python2.7 without lxml uses 'utf-8' instead.
+ filter_request = etree.tostring(filter_request,
+ encoding='utf-8')
+
+ xml = owsutil.wfs_build_getfeature_request(
+ 'dov-pub:Boringen', filter=filter_request)
+ assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
+ '<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" '
+ 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
+ 'service="WFS" version="1.1.0" '
+ 'xsi:schemaLocation="http://www.opengis.net/wfs '
+ 'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"> <wfs:Query '
+ 'typeName="dov-pub:Boringen"> <ogc:Filter> '
+ '<ogc:PropertyIsEqualTo> '
+ '<ogc:PropertyName>gemeente</ogc:PropertyName> '
+ '<ogc:Literal>Herstappe</ogc:Literal> </ogc:PropertyIsEqualTo> '
+ '</ogc:Filter> </wfs:Query> </wfs:GetFeature>')
+
+ def test_wfs_build_getfeature_request_bbox_filter(self):
+ """Test the owsutil.wfs_build_getfeature_request method with an
+ attribute filter, a bbox and a geometry_column.
+
+ Test whether the XML of the WFS GetFeature call is generated correctly.
+
+ """
+ query = PropertyIsEqualTo(propertyname='gemeente',
+ literal='Herstappe')
+ filter_request = FilterRequest()
+ filter_request = filter_request.setConstraint(query)
+ try:
+ filter_request = etree.tostring(filter_request,
+ encoding='unicode')
+ except LookupError:
+ # Python2.7 without lxml uses 'utf-8' instead.
+ filter_request = etree.tostring(filter_request,
+ encoding='utf-8')
+
+ xml = owsutil.wfs_build_getfeature_request(
+ 'dov-pub:Boringen', filter=filter_request,
+ bbox=(151650, 214675, 151750, 214775),
+ geometry_column='geom')
+ assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
+ '<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" '
+ 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
+ 'service="WFS" version="1.1.0" '
+ 'xsi:schemaLocation="http://www.opengis.net/wfs '
+ 'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"> <wfs:Query '
+ 'typeName="dov-pub:Boringen"> <ogc:Filter> <ogc:And> '
+ '<ogc:PropertyIsEqualTo> '
+ '<ogc:PropertyName>gemeente</ogc:PropertyName> '
+ '<ogc:Literal>Herstappe</ogc:Literal> </ogc:PropertyIsEqualTo> '
+ '<ogc:Within> <ogc:PropertyName>geom</ogc:PropertyName> '
+ '<gml:Envelope xmlns:gml="http://www.opengis.net/gml" '
+ 'srsDimension="2" '
+ 'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370"> '
+ '<gml:lowerCorner>151650.000 214675.000</gml:lowerCorner> '
+ '<gml:upperCorner>151750.000 214775.000</gml:upperCorner> '
+ '</gml:Envelope> </ogc:Within> </ogc:And> </ogc:Filter> '
+ '</wfs:Query> </wfs:GetFeature>')
+
+ def test_wfs_build_getfeature_request_bbox_filter_propertyname(self):
+ """Test the owsutil.wfs_build_getfeature_request method with an
+ attribute filter, a bbox, a geometry_column and a list of
+ propertynames.
+
+ Test whether the XML of the WFS GetFeature call is generated correctly.
+
+ """
+ query = PropertyIsEqualTo(propertyname='gemeente',
+ literal='Herstappe')
+ filter_request = FilterRequest()
+ filter_request = filter_request.setConstraint(query)
+ try:
+ filter_request = etree.tostring(filter_request,
+ encoding='unicode')
+ except LookupError:
+ # Python2.7 without lxml uses 'utf-8' instead.
+ filter_request = etree.tostring(filter_request,
+ encoding='utf-8')
+
+ xml = owsutil.wfs_build_getfeature_request(
+ 'dov-pub:Boringen', filter=filter_request,
+ bbox=(151650, 214675, 151750, 214775),
+ geometry_column='geom', propertyname=['fiche', 'diepte_tot_m'])
+ assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
+ '<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" '
+ 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
+ 'service="WFS" version="1.1.0" '
+ 'xsi:schemaLocation="http://www.opengis.net/wfs '
+ 'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"> <wfs:Query '
+ 'typeName="dov-pub:Boringen"> '
+ '<wfs:PropertyName>fiche</wfs:PropertyName> '
+ '<wfs:PropertyName>diepte_tot_m</wfs:PropertyName> <ogc:Filter> '
+ '<ogc:And> <ogc:PropertyIsEqualTo> '
+ '<ogc:PropertyName>gemeente</ogc:PropertyName> '
+ '<ogc:Literal>Herstappe</ogc:Literal> </ogc:PropertyIsEqualTo> '
+ '<ogc:Within> <ogc:PropertyName>geom</ogc:PropertyName> '
+ '<gml:Envelope xmlns:gml="http://www.opengis.net/gml" '
+ 'srsDimension="2" '
+ 'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370"> '
+ '<gml:lowerCorner>151650.000 214675.000</gml:lowerCorner> '
+ '<gml:upperCorner>151750.000 214775.000</gml:upperCorner> '
+ '</gml:Envelope> </ogc:Within> </ogc:And> </ogc:Filter> '
+ '</wfs:Query> </wfs:GetFeature>')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 4
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-runner"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
dataclasses==0.8
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
lxml==5.3.1
numpy==1.19.5
OWSLib==0.31.0
packaging==21.3
pandas==1.1.5
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/DOV-Vlaanderen/pydov.git@f65f1848a17074280c5686eb7f106570bafe36fb#egg=pydov
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-runner==5.3.2
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.1
requests==2.27.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
xmltodict==0.14.2
zipp==3.6.0
| name: pydov
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- coverage==6.2
- dataclasses==0.8
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- lxml==5.3.1
- numpy==1.19.5
- owslib==0.31.0
- packaging==21.3
- pandas==1.1.5
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-runner==5.3.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.1
- requests==2.27.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- xmltodict==0.14.2
- zipp==3.6.0
prefix: /opt/conda/envs/pydov
| [
"tests/test_search.py::TestBoringSearch::test_search_both_location_query",
"tests/test_search.py::TestBoringSearch::test_search",
"tests/test_search.py::TestBoringSearch::test_search_returnfields",
"tests/test_search.py::TestBoringSearch::test_search_returnfields_order",
"tests/test_search.py::TestBoringSearch::test_search_xmlresolving",
"tests/test_util_owsutil.py::TestOwsutil::test_wfs_build_getfeature_request_onlytypename",
"tests/test_util_owsutil.py::TestOwsutil::test_wfs_build_getfeature_request_bbox",
"tests/test_util_owsutil.py::TestOwsutil::test_wfs_build_getfeature_request_propertyname",
"tests/test_util_owsutil.py::TestOwsutil::test_wfs_build_getfeature_request_filter",
"tests/test_util_owsutil.py::TestOwsutil::test_wfs_build_getfeature_request_bbox_filter",
"tests/test_util_owsutil.py::TestOwsutil::test_wfs_build_getfeature_request_bbox_filter_propertyname"
]
| []
| [
"tests/test_search.py::TestBoringSearch::test_get_description",
"tests/test_search.py::TestBoringSearch::test_get_fields",
"tests/test_search.py::TestBoringSearch::test_search_nolocation_noquery",
"tests/test_search.py::TestBoringSearch::test_search_both_location_query_wrongquerytype",
"tests/test_search.py::TestBoringSearch::test_search_wrongreturnfields",
"tests/test_search.py::TestBoringSearch::test_search_wrongreturnfields_queryfield",
"tests/test_search.py::TestBoringSearch::test_search_wrongreturnfieldstype",
"tests/test_search.py::TestBoringSearch::test_search_query_wrongfield",
"tests/test_search.py::TestBoringSearch::test_search_query_wrongtype",
"tests/test_search.py::TestBoringSearch::test_search_query_wrongfield_returnfield",
"tests/test_util_owsutil.py::TestOwsutil::test_get_csw_base_url",
"tests/test_util_owsutil.py::TestOwsutil::test_get_csw_base_url_nometadataurls",
"tests/test_util_owsutil.py::TestOwsutil::test_get_featurecatalogue_uuid",
"tests/test_util_owsutil.py::TestOwsutil::test_get_featurecatalogue_uuid_nocontentinfo",
"tests/test_util_owsutil.py::TestOwsutil::test_get_featurecatalogue_uuid_nouuidref",
"tests/test_util_owsutil.py::TestOwsutil::test_get_namespace",
"tests/test_util_owsutil.py::TestOwsutil::test_get_remote_featurecatalogue",
"tests/test_util_owsutil.py::TestOwsutil::test_get_remote_featurecataloge_baduuid",
"tests/test_util_owsutil.py::TestOwsutil::test_get_remote_metadata",
"tests/test_util_owsutil.py::TestOwsutil::test_get_remote_metadata_nometadataurls",
"tests/test_util_owsutil.py::TestOwsutil::test_wfs_build_getfeature_request_bbox_nogeometrycolumn"
]
| []
| MIT License | 2,391 | [
"requirements.txt",
"pydov/search.py",
"pydov/util/owsutil.py",
"examples/boring_search.py"
]
| [
"requirements.txt",
"pydov/search.py",
"pydov/util/owsutil.py",
"examples/boring_search.py"
]
|
google__mobly-433 | 02b9d84acfe775a6fe73e2b960ba7e47765184d6 | 2018-04-11 23:35:44 | 95286a01a566e056d44acfa9577a45bc7f37f51d | xpconanfan:
Review status: 0 of 4 files reviewed at latest revision, all discussions resolved.
---
*[mobly/logger.py, line 30 at r1](https://beta.reviewable.io/reviews/google/mobly/433#-L9rUD-H8deRk8kCggOb:-L9rUD-H8deRk8kCggOc:b-si4ejl) ([raw file](https://github.com/google/mobly/blob/ed7f9460e71e3f266925802c8007b4a044b64dab/mobly/logger.py#L30)):*
> ```Python
> # The micro seconds are added by the format string above,
> # so the time format does not include ms.
> log_line_time_format = '%m-%d %H:%M:%S'
> ```
why are there so many changes needed?
can't you simply change this template to use `-` instead of `:` on win?
I'd expect this to be a fairly small change.
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/433)*
<!-- Sent from Reviewable.io -->
winterfroststrom:
Review status: 0 of 4 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/logger.py, line 30 at r1](https://beta.reviewable.io/reviews/google/mobly/433#-L9rUD-H8deRk8kCggOb:-L9rVeGL144Q0V0yjdW8:b-ylpfxz) ([raw file](https://github.com/google/mobly/blob/ed7f9460e71e3f266925802c8007b4a044b64dab/mobly/logger.py#L30)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
why are there so many changes needed?
can't you simply change this template to use `-` instead of `:` on win?
I'd expect this to be a fairly small change.
</blockquote></details>
Wouldn't changing that would change all logging output?
If you think that's an okay thing to change, then I can go with that approach instead.
Also, I don't really want to special case this for Windows because that would mean that anything that consumes this stuff would also need to special case for Windows, which seems really non-ideal.
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/433)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 4 files reviewed at latest revision, all discussions resolved.
---
*[mobly/controllers/android_device.py, line 940 at r1](https://beta.reviewable.io/reviews/google/mobly/433#-L9rY7nx5JR9tU-NmjVE:-L9rY7nx5JR9tU-NmjVF:b-sujf6i) ([raw file](https://github.com/google/mobly/blob/ed7f9460e71e3f266925802c8007b4a044b64dab/mobly/controllers/android_device.py#L940)):*
> ```Python
> begin_time_file_format = mobly_logger.log_line_to_log_file_timestamp(
> begin_time, year=year)
> out_name = f_name.replace('adblog,', '').replace('.txt', '')
> ```
Let's just add another line here to replace the `:` with `-`, non-platform-specific?
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/433)*
<!-- Sent from Reviewable.io -->
winterfroststrom:
Review status: 0 of 4 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device.py, line 940 at r1](https://beta.reviewable.io/reviews/google/mobly/433#-L9rY7nx5JR9tU-NmjVE:-L9rYoFK7tUkGZS_O9Of:b-feviw3) ([raw file](https://github.com/google/mobly/blob/ed7f9460e71e3f266925802c8007b4a044b64dab/mobly/controllers/android_device.py#L940)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
Let's just add another line here to replace the `:` with `-`, non-platform-specific?
</blockquote></details>
In that case, these excerpt log files won't match the general format and any format changes could be a bit haphazard?
Is that okay?
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/433)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 4 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device.py, line 940 at r1](https://beta.reviewable.io/reviews/google/mobly/433#-L9rY7nx5JR9tU-NmjVE:-L9raa-d54WwVx0DHq-I:bhtxqid) ([raw file](https://github.com/google/mobly/blob/ed7f9460e71e3f266925802c8007b4a044b64dab/mobly/controllers/android_device.py#L940)):*
<details><summary><i>Previously, winterfroststrom wrote…</i></summary><blockquote>
In that case, these excerpt log files won't match the general format and any format changes could be a bit haphazard?
Is that okay?
</blockquote></details>
what's the general format? do we have other files that have `:` in their names?
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/433)*
<!-- Sent from Reviewable.io -->
winterfroststrom:
Review status: 0 of 4 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device.py, line 940 at r1](https://beta.reviewable.io/reviews/google/mobly/433#-L9rY7nx5JR9tU-NmjVE:-L9rbv9W4xRGxh8CWLxf:bcvkn56) ([raw file](https://github.com/google/mobly/blob/ed7f9460e71e3f266925802c8007b4a044b64dab/mobly/controllers/android_device.py#L940)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
what's the general format? do we have other files that have `:` in their names?
</blockquote></details>
I basically mean anything with get_log_file_timestamp, which seems to just be the logging directory and the sniffer client aside.
I'm not aware of any other filenames with ":" in their name atm.
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/433)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 3 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device.py, line 940 at r1](https://beta.reviewable.io/reviews/google/mobly/433#-L9rY7nx5JR9tU-NmjVE:-L9rf-5v2dhh9z7pAkVX:b-tflkmf) ([raw file](https://github.com/google/mobly/blob/ed7f9460e71e3f266925802c8007b4a044b64dab/mobly/controllers/android_device.py#L940)):*
<details><summary><i>Previously, winterfroststrom wrote…</i></summary><blockquote>
I basically mean anything with get_log_file_timestamp, which seems to just be the logging directory and the sniffer client aside.
I'm not aware of any other filenames with ":" in their name atm.
</blockquote></details>
You mean the sniffer client would have the same problem on win?
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/433)*
<!-- Sent from Reviewable.io -->
winterfroststrom:
Review status: 0 of 3 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device.py, line 940 at r1](https://beta.reviewable.io/reviews/google/mobly/433#-L9rY7nx5JR9tU-NmjVE:-L9rfz0CBFLiuW8syDME:b-t4khsn) ([raw file](https://github.com/google/mobly/blob/ed7f9460e71e3f266925802c8007b4a044b64dab/mobly/controllers/android_device.py#L940)):*
> get_log_file_timestamp
No, I mean the design would be that anything that uses get_log_file_timestamp should be fine with any OS, which seems to be the case as is,
which makes for a more robust solution and standardized solution.
However, if that's not valuable, then I can go with the simpler solution.
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/433)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 3 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device.py, line 940 at r1](https://beta.reviewable.io/reviews/google/mobly/433#-L9rY7nx5JR9tU-NmjVE:-L9rhRqi9nO2gbTCait2:b-3gkpqu) ([raw file](https://github.com/google/mobly/blob/ed7f9460e71e3f266925802c8007b4a044b64dab/mobly/controllers/android_device.py#L940)):*
<details><summary><i>Previously, winterfroststrom wrote…</i></summary><blockquote>
> get_log_file_timestamp
No, I mean the design would be that anything that uses get_log_file_timestamp should be fine with any OS, which seems to be the case as is,
which makes for a more robust solution and standardized solution.
However, if that's not valuable, then I can go with the simpler solution.
</blockquote></details>
The `get_log_file_timestamp` works as expected, which is to get the timestamp in the format used *inside* adb logcat files.
The mistake here is `cat_adb_log` uses that timestamp in its file names....
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/433)*
<!-- Sent from Reviewable.io -->
winterfroststrom:
Review status: 0 of 3 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device.py, line 940 at r1](https://beta.reviewable.io/reviews/google/mobly/433#-L9rY7nx5JR9tU-NmjVE:-L9rit0lEh-vWhiZ-LVu:b6uj05l) ([raw file](https://github.com/google/mobly/blob/ed7f9460e71e3f266925802c8007b4a044b64dab/mobly/controllers/android_device.py#L940)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
The `get_log_file_timestamp` works as expected, which is to get the timestamp in the format used *inside* adb logcat files.
The mistake here is `cat_adb_log` uses that timestamp in its file names....
</blockquote></details>
I'm confused.
I was under the impression that:
get_log_file_timestamp is used for human readable timestamps on log files
get_log_line_timestamp is used for logging inside of files and getting from them
I thought the mistake was using get_log_line_timestamp for the file name when get_log_file_timestamp is what should've been used;
hence all the logic for the conversion.
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/433)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 3 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device.py, line 940 at r1](https://beta.reviewable.io/reviews/google/mobly/433#-L9rY7nx5JR9tU-NmjVE:-L9rk7Kx5a0qYq5wnsgc:bkonyb3) ([raw file](https://github.com/google/mobly/blob/ed7f9460e71e3f266925802c8007b4a044b64dab/mobly/controllers/android_device.py#L940)):*
<details><summary><i>Previously, winterfroststrom wrote…</i></summary><blockquote>
I'm confused.
I was under the impression that:
get_log_file_timestamp is used for human readable timestamps on log files
get_log_line_timestamp is used for logging inside of files and getting from them
I thought the mistake was using get_log_line_timestamp for the file name when get_log_file_timestamp is what should've been used;
hence all the logic for the conversion.
</blockquote></details>
Sorry I misread...
Where did the `:` come from then?
Seems like it was supplied by the caller?
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/433)*
<!-- Sent from Reviewable.io -->
winterfroststrom:
Review status: 0 of 3 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device.py, line 940 at r1](https://beta.reviewable.io/reviews/google/mobly/433#-L9rY7nx5JR9tU-NmjVE:-L9rkW7dAA8m5K5LqMdT:b-mes44j) ([raw file](https://github.com/google/mobly/blob/ed7f9460e71e3f266925802c8007b4a044b64dab/mobly/controllers/android_device.py#L940)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
Sorry I misread...
Where did the `:` come from then?
Seems like it was supplied by the caller?
</blockquote></details>
It was supplied by the caller via begin_time, which must match the format of get_log_line_timestamp.
The issue is that the format of get_log_line_timestamp includes a ":", which is not a valid character on Windows.
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/433)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 3 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device.py, line 940 at r1](https://beta.reviewable.io/reviews/google/mobly/433#-L9rY7nx5JR9tU-NmjVE:-L9rl1q08d5p9FESjY67:bt97oki) ([raw file](https://github.com/google/mobly/blob/ed7f9460e71e3f266925802c8007b4a044b64dab/mobly/controllers/android_device.py#L940)):*
<details><summary><i>Previously, winterfroststrom wrote…</i></summary><blockquote>
It was supplied by the caller via begin_time, which must match the format of get_log_line_timestamp.
The issue is that the format of get_log_line_timestamp includes a ":", which is not a valid character on Windows.
</blockquote></details>
Ok, so the actual bug is we shouldn't let users supply part of file name really...
Why do you have to match the timestamp format
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/433)*
<!-- Sent from Reviewable.io -->
winterfroststrom:
Review status: 0 of 3 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device.py, line 940 at r1](https://beta.reviewable.io/reviews/google/mobly/433#-L9rY7nx5JR9tU-NmjVE:-L9rlJ698ZP3_p_dImhI:b-c95xsa) ([raw file](https://github.com/google/mobly/blob/ed7f9460e71e3f266925802c8007b4a044b64dab/mobly/controllers/android_device.py#L940)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
Ok, so the actual bug is we shouldn't let users supply part of file name really...
Why do you have to match the timestamp format
</blockquote></details>
Because the logic/purpose of this function is to create a shortened version of the adblog which only contains log lines from begin_time to current.
In the current logic, begin_time is used with _is_timestamp_in_range to determine whether or not a particular line should be included.
The logic of _is_timestamp_in_range uses logline_timestamp_comparator, which expects the log line format.
Users supplying begin_time should be fine because the logic depends on that.
The issue is that begin_time's format is not compatible with file systems, so it needs to be changed.
If a user gives a malformed begin_time, then that's just user error, which I think should be okay.
But since the log line format is not a valid file name format, this isn't a user error issue.
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/433)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 3 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device.py, line 940 at r1](https://beta.reviewable.io/reviews/google/mobly/433#-L9rY7nx5JR9tU-NmjVE:-L9rnj7oB1DbQLYv4DR3:b-v9o03b) ([raw file](https://github.com/google/mobly/blob/ed7f9460e71e3f266925802c8007b4a044b64dab/mobly/controllers/android_device.py#L940)):*
<details><summary><i>Previously, winterfroststrom wrote…</i></summary><blockquote>
Because the logic/purpose of this function is to create a shortened version of the adblog which only contains log lines from begin_time to current.
In the current logic, begin_time is used with _is_timestamp_in_range to determine whether or not a particular line should be included.
The logic of _is_timestamp_in_range uses logline_timestamp_comparator, which expects the log line format.
Users supplying begin_time should be fine because the logic depends on that.
The issue is that begin_time's format is not compatible with file systems, so it needs to be changed.
If a user gives a malformed begin_time, then that's just user error, which I think should be okay.
But since the log line format is not a valid file name format, this isn't a user error issue.
</blockquote></details>
I see.
I really want to get rid of this function eventually...
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/433)*
<!-- Sent from Reviewable.io -->
xpconanfan: <img class="emoji" title=":lgtm:" alt=":lgtm:" align="absmiddle" src="https://reviewable.io/lgtm.png" height="20" width="61"/>
---
Review status: 0 of 3 files reviewed at latest revision, 1 unresolved discussion.
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/433#-:-L9ro1ad95YVzZItkhZh:bnfp4nl)*
<!-- Sent from Reviewable.io -->
| diff --git a/mobly/controllers/android_device.py b/mobly/controllers/android_device.py
index f1a4636..14828a4 100644
--- a/mobly/controllers/android_device.py
+++ b/mobly/controllers/android_device.py
@@ -436,9 +436,8 @@ class AndroidDevice(object):
self._log_path = os.path.join(self._log_path_base,
'AndroidDevice%s' % self._serial)
self._debug_tag = self._serial
- self.log = AndroidDeviceLoggerAdapter(logging.getLogger(), {
- 'tag': self.debug_tag
- })
+ self.log = AndroidDeviceLoggerAdapter(logging.getLogger(),
+ {'tag': self.debug_tag})
self.sl4a = None
self.ed = None
self._adb_logcat_process = None
@@ -937,6 +936,7 @@ class AndroidDevice(object):
f_name = os.path.basename(self.adb_logcat_file_path)
out_name = f_name.replace('adblog,', '').replace('.txt', '')
out_name = ',%s,%s.txt' % (begin_time, out_name)
+ out_name = out_name.replace(':', '-')
tag_len = utils.MAX_FILENAME_LEN - len(out_name)
tag = tag[:tag_len]
out_name = tag + out_name
| AdbLogExcerpts filename is invalid on Windows
Windows filenames should not have colons in them:
https://support.microsoft.com/en-us/help/905231/information-about-the-characters-that-you-cannot-use-in-site-names-fol | google/mobly | diff --git a/tests/mobly/controllers/android_device_test.py b/tests/mobly/controllers/android_device_test.py
index b1428ec..f175f17 100755
--- a/tests/mobly/controllers/android_device_test.py
+++ b/tests/mobly/controllers/android_device_test.py
@@ -630,7 +630,7 @@ class AndroidDeviceTest(unittest.TestCase):
ad.cat_adb_log('some_test', MOCK_ADB_LOGCAT_BEGIN_TIME)
cat_file_path = os.path.join(
ad.log_path, 'AdbLogExcerpts',
- ('some_test,02-29 14:02:20.123,%s,%s.txt') % (ad.model, ad.serial))
+ ('some_test,02-29 14-02-20.123,%s,%s.txt') % (ad.model, ad.serial))
with open(cat_file_path, 'r') as f:
actual_cat = f.read()
self.assertEqual(actual_cat, ''.join(MOCK_ADB_LOGCAT_CAT_RESULT))
diff --git a/tests/mobly/logger_test.py b/tests/mobly/logger_test.py
index b1cf839..1ac9f1d 100755
--- a/tests/mobly/logger_test.py
+++ b/tests/mobly/logger_test.py
@@ -1,11 +1,11 @@
# Copyright 2016 Google Inc.
-#
+#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 1.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
future==1.0.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/google/mobly.git@02b9d84acfe775a6fe73e2b960ba7e47765184d6#egg=mobly
mock==1.0.1
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
portpicker==1.6.0
psutil==7.0.0
pyserial==3.5
pytest @ file:///croot/pytest_1738938843180/work
pytz==2025.2
PyYAML==6.0.2
timeout-decorator==0.5.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- future==1.0.0
- mock==1.0.1
- portpicker==1.6.0
- psutil==7.0.0
- pyserial==3.5
- pytz==2025.2
- pyyaml==6.0.2
- timeout-decorator==0.5.0
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_cat_adb_log"
]
| []
| [
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice__enable_logpersist_with_logpersist",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice__enable_logpersist_with_missing_all_logpersist",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice__enable_logpersist_with_missing_logpersist_start",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice__enable_logpersist_with_missing_logpersist_stop",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_build_info",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path_no_log_exists",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path_with_existing_file",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path_with_service",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_debug_tag",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_device_info",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_instantiation",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_attribute_name",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_package",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_snippet_name",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_fail_cleanup_also_fail",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_failure",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_precheck_failure",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_start_app_fails",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_serial_is_valid",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_snippet_cleanup",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_fail",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_fallback",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_with_destination",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_logcat",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_logcat_with_user_param",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_update_serial",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_update_serial_with_service_running",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_dict_list",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_empty_config",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_no_valid_config",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_not_list_config",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_pickup_all",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_string_list",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_usb_id",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_no_match",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_success_with_serial",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_success_with_serial_and_extra_field",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_too_many_matches",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_devices_no_match",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_devices_success_with_extra_field",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_start_services_on_ads",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_start_services_on_ads_skip_logcat",
"tests/mobly/logger_test.py::LoggerTest::test_epoch_to_log_line_timestamp"
]
| []
| Apache License 2.0 | 2,392 | [
"mobly/controllers/android_device.py"
]
| [
"mobly/controllers/android_device.py"
]
|
Stewori__pytypes-37 | 5736ecd2878aca5c78df44aeeaef13303cbfa5f2 | 2018-04-12 02:58:23 | 152a2218bfa1b96ae5d866542ee4ad148b1b7b5d | diff --git a/pytypes/type_util.py b/pytypes/type_util.py
index 13c991e..f6227c8 100644
--- a/pytypes/type_util.py
+++ b/pytypes/type_util.py
@@ -900,7 +900,7 @@ def resolve_fw_decl(in_type, module_name=None, globs=None, level=0):
for in_tp in args])
ret = resolve_fw_decl(res, None, globs)[1] or ret
return in_type, ret
- elif hasattr(in_type, '__args__'):
+ elif hasattr(in_type, '__args__') and in_type.__args__ is not None:
return in_type, any([resolve_fw_decl(in_tp, None, globs)[1] \
for in_tp in in_type.__args__])
return in_type, False
| NoneType' object is not iterable in resolve_fw_decl
```python
>>> import typing
>>> from pytypes import type_util
>>> T = typing.TypeVar('T')
>>> class Foo(typing.Generic[T]):
... pass
...
>>> type_util.resolve_fw_decl(Foo)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: 'NoneType' object is not iterable
``` | Stewori/pytypes | diff --git a/tests/test_typechecker.py b/tests/test_typechecker.py
index d6acaf9..ddd2745 100644
--- a/tests/test_typechecker.py
+++ b/tests/test_typechecker.py
@@ -23,7 +23,7 @@ from numbers import Real
import pytypes
from pytypes import typechecked, override, auto_override, no_type_check, get_types, \
get_type_hints, TypeCheckError, InputTypeError, ReturnTypeError, OverrideError, \
- TypeSyntaxError, check_argument_types, annotations, get_member_types
+ TypeSyntaxError, check_argument_types, annotations, get_member_types, resolve_fw_decl
pytypes.clean_traceback = False
try:
from backports import typing
@@ -4652,5 +4652,18 @@ class Test_check_argument_types_Python3_5(unittest.TestCase):
self.assertRaises(InputTypeError, lambda:
py3.test_inner_class_testf1_err())
+
+class Test_utils(unittest.TestCase):
+ # See: https://github.com/Stewori/pytypes/issues/36
+ def test_resolve_fw_decl(self):
+ T = typing.TypeVar('T')
+
+ class Foo(typing.Generic[T]):
+ pass
+
+ # No exception.
+ resolve_fw_decl(Foo)
+
+
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.03 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
-e git+https://github.com/Stewori/pytypes.git@5736ecd2878aca5c78df44aeeaef13303cbfa5f2#egg=pytypes
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: pytypes
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/pytypes
| [
"tests/test_typechecker.py::Test_utils::test_resolve_fw_decl"
]
| [
"tests/test_typechecker.py::testfunc",
"tests/test_typechecker.py::testfunc_err",
"tests/test_typechecker.py::testfunc2",
"tests/test_typechecker.py::testfunc4",
"tests/test_typechecker.py::testfunc_None_ret",
"tests/test_typechecker.py::testfunc_None_ret_err",
"tests/test_typechecker.py::testfunc_None_arg",
"tests/test_typechecker.py::testfunc_Dict_arg",
"tests/test_typechecker.py::testfunc_Mapping_arg",
"tests/test_typechecker.py::testfunc_Dict_ret",
"tests/test_typechecker.py::testfunc_Dict_ret_err",
"tests/test_typechecker.py::testfunc_Seq_arg",
"tests/test_typechecker.py::testfunc_Seq_ret_List",
"tests/test_typechecker.py::testfunc_Seq_ret_Tuple",
"tests/test_typechecker.py::testfunc_Seq_ret_err",
"tests/test_typechecker.py::testfunc_Iter_arg",
"tests/test_typechecker.py::testfunc_Iter_str_arg",
"tests/test_typechecker.py::testfunc_Iter_ret_err",
"tests/test_typechecker.py::testfunc_Callable_arg",
"tests/test_typechecker.py::testfunc_Callable_call_err",
"tests/test_typechecker.py::testfunc_Callable_ret",
"tests/test_typechecker.py::testfunc_Callable_ret_err",
"tests/test_typechecker.py::testfunc_Generator_arg",
"tests/test_typechecker.py::testfunc_Generator_ret",
"tests/test_typechecker.py::testfunc_Generic_arg",
"tests/test_typechecker.py::testfunc_Generic_ret",
"tests/test_typechecker.py::testfunc_Generic_ret_err",
"tests/test_typechecker.py::testfunc_numeric_tower_float",
"tests/test_typechecker.py::testfunc_numeric_tower_complex",
"tests/test_typechecker.py::testfunc_numeric_tower_tuple",
"tests/test_typechecker.py::testfunc_numeric_tower_return",
"tests/test_typechecker.py::testfunc_numeric_tower_return_err",
"tests/test_typechecker.py::testfunc_custom_annotations_typechecked",
"tests/test_typechecker.py::testfunc_custom_annotations_typechecked_err",
"tests/test_typechecker.py::testfunc_varargs2",
"tests/test_typechecker.py::testfunc_varargs3",
"tests/test_typechecker.py::testfunc_varargs5",
"tests/test_typechecker.py::testfunc_varargs_err",
"tests/test_typechecker.py::testfunc_varargs_ca3"
]
| [
"tests/test_typechecker.py::testfunc_Iter_ret",
"tests/test_typechecker.py::testfunc_Generator",
"tests/test_typechecker.py::testfunc_varargs1",
"tests/test_typechecker.py::testfunc_varargs4",
"tests/test_typechecker.py::testClass2_defTimeCheck",
"tests/test_typechecker.py::testClass2_defTimeCheck2",
"tests/test_typechecker.py::testClass2_defTimeCheck3",
"tests/test_typechecker.py::testClass2_defTimeCheck4",
"tests/test_typechecker.py::testClass3_defTimeCheck",
"tests/test_typechecker.py::testClass2_defTimeCheck_init_ov",
"tests/test_typechecker.py::testfunc_check_argument_types_empty",
"tests/test_typechecker.py::testfunc_varargs_ca1",
"tests/test_typechecker.py::testfunc_varargs_ca4",
"tests/test_typechecker.py::TestTypecheck::test_abstract_override",
"tests/test_typechecker.py::TestTypecheck::test_annotations_from_typestring",
"tests/test_typechecker.py::TestTypecheck::test_callable",
"tests/test_typechecker.py::TestTypecheck::test_classmethod",
"tests/test_typechecker.py::TestTypecheck::test_custom_annotations",
"tests/test_typechecker.py::TestTypecheck::test_custom_generic",
"tests/test_typechecker.py::TestTypecheck::test_defaults_inferred_types",
"tests/test_typechecker.py::TestTypecheck::test_dict",
"tests/test_typechecker.py::TestTypecheck::test_empty",
"tests/test_typechecker.py::TestTypecheck::test_function",
"tests/test_typechecker.py::TestTypecheck::test_generator",
"tests/test_typechecker.py::TestTypecheck::test_get_generic_parameters",
"tests/test_typechecker.py::TestTypecheck::test_get_types",
"tests/test_typechecker.py::TestTypecheck::test_iterable",
"tests/test_typechecker.py::TestTypecheck::test_method",
"tests/test_typechecker.py::TestTypecheck::test_method_forward",
"tests/test_typechecker.py::TestTypecheck::test_numeric_tower",
"tests/test_typechecker.py::TestTypecheck::test_parent_typecheck_no_override",
"tests/test_typechecker.py::TestTypecheck::test_parent_typecheck_other_signature",
"tests/test_typechecker.py::TestTypecheck::test_property",
"tests/test_typechecker.py::TestTypecheck::test_sequence",
"tests/test_typechecker.py::TestTypecheck::test_staticmethod",
"tests/test_typechecker.py::TestTypecheck::test_subtype_class_extends_generic",
"tests/test_typechecker.py::TestTypecheck::test_typecheck_parent_type",
"tests/test_typechecker.py::TestTypecheck::test_typestring_varargs_syntax",
"tests/test_typechecker.py::TestTypecheck::test_typevar_class",
"tests/test_typechecker.py::TestTypecheck::test_typevar_func",
"tests/test_typechecker.py::TestTypecheck::test_unparameterized",
"tests/test_typechecker.py::TestTypecheck::test_varargs",
"tests/test_typechecker.py::TestTypecheck::test_varargs_check_argument_types",
"tests/test_typechecker.py::TestTypecheck::test_various",
"tests/test_typechecker.py::TestTypecheck_class::test_classmethod",
"tests/test_typechecker.py::TestTypecheck_class::test_method",
"tests/test_typechecker.py::TestTypecheck_class::test_staticmethod",
"tests/test_typechecker.py::TestTypecheck_module::test_function_py2",
"tests/test_typechecker.py::TestTypecheck_module::test_function_py3",
"tests/test_typechecker.py::Test_check_argument_types::test_function",
"tests/test_typechecker.py::Test_check_argument_types::test_inner_class",
"tests/test_typechecker.py::Test_check_argument_types::test_inner_method",
"tests/test_typechecker.py::Test_check_argument_types::test_methods",
"tests/test_typechecker.py::TestOverride::test_auto_override",
"tests/test_typechecker.py::TestOverride::test_override",
"tests/test_typechecker.py::TestOverride::test_override_at_definition_time",
"tests/test_typechecker.py::TestOverride::test_override_at_definition_time_with_forward_decl",
"tests/test_typechecker.py::TestOverride::test_override_diamond",
"tests/test_typechecker.py::TestOverride::test_override_typecheck",
"tests/test_typechecker.py::TestOverride::test_override_typecheck_class",
"tests/test_typechecker.py::TestOverride::test_override_vararg",
"tests/test_typechecker.py::TestStubfile::test_annotations_from_stubfile_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_annotations_from_stubfile_plain_3_5_stub",
"tests/test_typechecker.py::TestStubfile::test_callable_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_custom_generic_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_defaults_inferred_types_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_defaults_inferred_types_plain_3_5_stub",
"tests/test_typechecker.py::TestStubfile::test_dict_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_generator_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_iterable_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_override_diamond_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_override_diamond_plain_3_5_stub",
"tests/test_typechecker.py::TestStubfile::test_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_plain_3_5_stub",
"tests/test_typechecker.py::TestStubfile::test_property_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_property_plain_3_5_stub",
"tests/test_typechecker.py::TestStubfile::test_sequence_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_typecheck_parent_type_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_typecheck_parent_type_plain_3_5_stub",
"tests/test_typechecker.py::TestStubfile::test_varargs_check_argument_types_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_varargs_check_argument_types_plain_3_5_stub",
"tests/test_typechecker.py::TestStubfile::test_varargs_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_varargs_plain_3_5_stub",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_abstract_override_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_callable_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_classmethod_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_custom_generic_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_defaults_inferred_types",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_dict_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_function_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_generator_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_get_types_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_iterable_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_method_forward_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_method_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_parent_typecheck_no_override_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_parent_typecheck_other_signature_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_property",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_sequence_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_staticmethod_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_typecheck_parent_type",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_typevar_func",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_varargs",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_varargs_check_argument_types",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_various_py3",
"tests/test_typechecker.py::TestOverride_Python3_5::test_auto_override",
"tests/test_typechecker.py::TestOverride_Python3_5::test_override_at_definition_time",
"tests/test_typechecker.py::TestOverride_Python3_5::test_override_at_definition_time_with_forward_decl",
"tests/test_typechecker.py::TestOverride_Python3_5::test_override_diamond",
"tests/test_typechecker.py::TestOverride_Python3_5::test_override_py3",
"tests/test_typechecker.py::TestOverride_Python3_5::test_override_typecheck",
"tests/test_typechecker.py::TestOverride_Python3_5::test_override_vararg",
"tests/test_typechecker.py::Test_check_argument_types_Python3_5::test_function",
"tests/test_typechecker.py::Test_check_argument_types_Python3_5::test_inner_class",
"tests/test_typechecker.py::Test_check_argument_types_Python3_5::test_inner_method",
"tests/test_typechecker.py::Test_check_argument_types_Python3_5::test_methods"
]
| []
| Apache License 2.0 | 2,393 | [
"pytypes/type_util.py"
]
| [
"pytypes/type_util.py"
]
|
|
elcaminoreal__elcaminoreal-10 | a76600014ed645b6f92b98bf491f2b49e5625d6d | 2018-04-12 03:32:11 | a76600014ed645b6f92b98bf491f2b49e5625d6d | diff --git a/src/elcaminoreal/_gather.py b/src/elcaminoreal/_gather.py
index 63f85a9..cbe7637 100644
--- a/src/elcaminoreal/_gather.py
+++ b/src/elcaminoreal/_gather.py
@@ -24,12 +24,13 @@ class Commands(object):
def command(self,
name=None,
parser=caparg.command(''),
- dependencies=pyrsistent.v()):
+ dependencies=pyrsistent.v(),
+ regular=False):
"""
Register as a command.
"""
- transform = gather.Wrapper.glue((dependencies, parser))
+ transform = gather.Wrapper.glue((dependencies, parser, regular))
ret = self._command_collector.register(name, transform=transform)
return ret
@@ -47,20 +48,27 @@ class Commands(object):
parsed = command.parse(args)
subcommand = ' '.join(parsed['__caparg_subcommand__'])
func = collection[subcommand].original
- dependencies, _ignored = collection[subcommand].extra
+ dependencies, _ignored, regular = collection[subcommand].extra
graph = self.mkgraph(dependencies)
graph.update(override_dependencies)
- return func(parsed, graph)
+ if not regular:
+ return func(parsed, graph)
+ args = {dependency: graph[dependency]
+ for dependency in dependencies}
+ args.update(parsed)
+ del args['__caparg_subcommand__']
+ return func(**args)
def dependency(self,
name=None,
dependencies=pyrsistent.v(),
- possible_dependencies=pyrsistent.v()):
+ possible_dependencies=pyrsistent.v(),
+ regular=False):
"""
Register as a dependency.
"""
- glue = (dependencies, possible_dependencies)
+ glue = (dependencies, possible_dependencies, regular)
transform = gather.Wrapper.glue(glue)
ret = self._collector.register(name, transform=transform)
return ret
@@ -83,14 +91,20 @@ class Commands(object):
on_route = on_route.add(thing)
plugin = collection[thing]
func = plugin.original
- dependencies, possible_dependencies = plugin.extra
+ dependencies, possible_dependencies, regular = plugin.extra
my_dependencies, my_possible_dependencies = {}, {}
for other_thing in dependencies:
my_dependencies[other_thing] = _build(other_thing, on_route)
for other_thing in possible_dependencies:
builder = functools.partial(_build, other_thing, on_route)
my_possible_dependencies[other_thing] = builder
- ret[thing] = func(my_dependencies, my_possible_dependencies)
+ if regular:
+ args = {'build_' + key: value
+ for key, value in my_possible_dependencies.items()}
+ args.update(my_dependencies)
+ ret[thing] = func(**args)
+ else:
+ ret[thing] = func(my_dependencies, my_possible_dependencies)
return ret[thing]
for thing in things:
_build(thing)
diff --git a/tox.ini b/tox.ini
index 0ef3c06..297cc23 100644
--- a/tox.ini
+++ b/tox.ini
@@ -30,7 +30,7 @@ commands =
# E0704 -- bare raise outside except (rare, when it's done I mean it)
# R0201 -- unused self in methods (methods can be used for polymorphism)
# R0903 -- too few public methods (attrs-based classes have implicit ones)
- py27-lint: pylint --disable=unsupported-assignment-operation --disable=no-member --disable=not-callable --disable=unsubscriptable-object --disable=E0704 --disable=R0903 --disable=R0201 src/elcaminoreal
+ py27-lint: pylint --disable=blacklisted-name --disable=unsupported-assignment-operation --disable=no-member --disable=not-callable --disable=unsubscriptable-object --disable=E0704 --disable=R0903 --disable=R0201 src/elcaminoreal
py27-lint: flake8 src/elcaminoreal
#{py27,pypy,py36,py35}-func: python -m elcaminoreal.example selftest
#{py27,pypy,py35}-func: python -m elcaminoreal.example selftest
| Allow regular arguments to commands and dependencies
The following should work:
```
@COMMANDS.dependency(
dependencies=['foo', 'bar'],
possible_dependencies=['baz'],
regular=True)
def thing(foo , bar, build_baz):
val = foo()
if val > 5:
val -= build_baz()
return val + bar()
@COMMANDS.command(
dependencies=['thing'],
parser=ca.command('',
ca.positional('lala', type=str)),
regular=True)
def do_stuff(thing, lala):
return str(thing) + lala
``` | elcaminoreal/elcaminoreal | diff --git a/src/elcaminoreal/test/some_plugins.py b/src/elcaminoreal/test/some_plugins.py
index a885594..9507b74 100644
--- a/src/elcaminoreal/test/some_plugins.py
+++ b/src/elcaminoreal/test/some_plugins.py
@@ -21,6 +21,18 @@ def a_foo(dependencies, _possible_dependencies):
return dict(bar=dependencies['bar'])
[email protected](dependencies=["bar", "quux"],
+ possible_dependencies=["foo"],
+ regular=True)
+def regular(bar, quux, build_foo):
+ """
+ Depend on bar, maybe on foo
+
+ Use regular arguments.
+ """
+ return dict(bar=bar, quux=quux, foo=build_foo())
+
+
@COMMANDS.dependency(possible_dependencies=["bar"])
def foo_2(_dependencies, possible_dependencies):
"""
@@ -37,6 +49,14 @@ def a_bar(_dependencies, _possible_dependencies):
return "I'm a bar"
[email protected](name="quux")
+def a_quux(_dependencies, _possible_dependencies):
+ """
+ Return a quux-like object.
+ """
+ return "I'm a quux"
+
+
@COMMANDS.dependency()
def rand(_dependencies, _possible_dependencies):
"""
@@ -83,6 +103,28 @@ def _print(_dependencies, _possible_dependencies):
return print
[email protected](name='output')
+def dummy_output(_dependencies, _possible_dependencies):
+ """
+ Literally do nothing.
+
+ This is designed for being overridden.
+ """
+
+
[email protected](dependencies=['foo', 'output'],
+ parser=ca.command('',
+ ca.positional('lili', type=str)),
+ regular=True)
+def regular_command(foo, lili, output):
+ """
+ Use regular arguments
+
+ Output results
+ """
+ output(foo, lili)
+
+
@COMMANDS.command(dependencies=['foo', 'print'],
parser=ca.command('',
ca.positional('lala', type=str)))
diff --git a/src/elcaminoreal/test/test_gathering.py b/src/elcaminoreal/test/test_gathering.py
index 8049cd1..7ec2028 100644
--- a/src/elcaminoreal/test/test_gathering.py
+++ b/src/elcaminoreal/test/test_gathering.py
@@ -46,6 +46,15 @@ class DependencyResolverTester(unittest.TestCase):
result = some_plugins.COMMANDS.mkgraph(['foo_2'])
self.assertEquals(result['foo_2'], dict(bar="I'm a bar"))
+ def test_mkgraph_regular(self):
+ """
+ mkgraph regular functions work
+ """
+ result = some_plugins.COMMANDS.mkgraph(['regular'])
+ self.assertEquals(result['regular']['bar'], result['bar'])
+ self.assertEquals(result['regular']['quux'], result['quux'])
+ self.assertEquals(result['regular']['foo'], result['foo'])
+
class RunnerResolverTester(unittest.TestCase):
@@ -100,3 +109,18 @@ class RunnerResolverTester(unittest.TestCase):
some_plugins.COMMANDS.run(['no-such-command'])
error_message = filep.getvalue().splitlines()
self.assertEquals(error_message.pop(0), 'Usage:')
+
+ def test_regular(self):
+ """
+ Asking for regular arguments calls functions with argument names
+ """
+ output = []
+
+ def _my_output(*args):
+ output.append(args)
+ dependencies = dict(output=_my_output)
+ some_plugins.COMMANDS.run(['regular-command', 'thing'],
+ override_dependencies=dependencies)
+ self.assertEquals(len(output), 1)
+ self.assertEquals(output[0][0]['bar'], "I'm a bar")
+ self.assertEquals(output[0][1], 'thing')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 18.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
caparg==19.11.0
coverage==7.8.0
-e git+https://github.com/elcaminoreal/elcaminoreal.git@a76600014ed645b6f92b98bf491f2b49e5625d6d#egg=elcaminoreal
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
gather==2023.1.20.1
incremental==24.7.2
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pyrsistent==0.20.0
pytest @ file:///croot/pytest_1738938843180/work
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
six==1.17.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions==4.13.0
venusian==3.1.1
| name: elcaminoreal
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- caparg==19.11.0
- coverage==7.8.0
- execnet==2.1.1
- gather==2023.1.20.1
- incremental==24.7.2
- pyrsistent==0.20.0
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- six==1.17.0
- typing-extensions==4.13.0
- venusian==3.1.1
prefix: /opt/conda/envs/elcaminoreal
| [
"src/elcaminoreal/test/test_gathering.py::RunnerResolverTester::test_required"
]
| [
"src/elcaminoreal/test/test_gathering.py::DependencyResolverTester::test_mkgraph",
"src/elcaminoreal/test/test_gathering.py::DependencyResolverTester::test_mkgraph_cycle",
"src/elcaminoreal/test/test_gathering.py::DependencyResolverTester::test_mkgraph_possible",
"src/elcaminoreal/test/test_gathering.py::DependencyResolverTester::test_mkgraph_random",
"src/elcaminoreal/test/test_gathering.py::DependencyResolverTester::test_mkgraph_regular",
"src/elcaminoreal/test/test_gathering.py::RunnerResolverTester::test_args",
"src/elcaminoreal/test/test_gathering.py::RunnerResolverTester::test_error_redirect",
"src/elcaminoreal/test/test_gathering.py::RunnerResolverTester::test_regular",
"src/elcaminoreal/test/test_gathering.py::RunnerResolverTester::test_run"
]
| []
| []
| MIT License | 2,394 | [
"src/elcaminoreal/_gather.py",
"tox.ini"
]
| [
"src/elcaminoreal/_gather.py",
"tox.ini"
]
|
|
elastic__rally-469 | a5408e0d0d07b271b509df8057a7c73303604c10 | 2018-04-12 11:53:51 | a5408e0d0d07b271b509df8057a7c73303604c10 | diff --git a/docs/adding_tracks.rst b/docs/adding_tracks.rst
index f8bf7bad..5b0b43f5 100644
--- a/docs/adding_tracks.rst
+++ b/docs/adding_tracks.rst
@@ -578,10 +578,6 @@ The data set that is used in the http_logs track starts on 26-04-1998 but we wan
Custom parameter sources
^^^^^^^^^^^^^^^^^^^^^^^^
-.. note::
-
- This is a rather new feature and the API may change! However, the effort to use custom parameter sources is very low.
-
.. warning::
Your parameter source is on a performance-critical code-path so please double-check with :ref:`Rally's profiling support <clr_enable_driver_profiling>` that you did not introduce any bottlenecks.
@@ -644,7 +640,7 @@ Rally will recognize the parameter source and looks then for a file ``track.py``
},
"index": index_name,
"type": type_name,
- "use_request_cache": params.get("cache", False)
+ "cache": params.get("cache", False)
}
def register(registry):
@@ -712,7 +708,7 @@ If you need more control, you need to implement a class. The example above, impl
},
"index": self._index_name,
"type": self._type_name,
- "use_request_cache": self._cache
+ "cache": self._cache
}
diff --git a/docs/migrate.rst b/docs/migrate.rst
index 9050cfb4..067e1582 100644
--- a/docs/migrate.rst
+++ b/docs/migrate.rst
@@ -69,6 +69,21 @@ The example above also shows how to provide per-challenge index settings. If per
This behavior applies similarly to index templates as well.
+Custom Parameter Sources
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+We have aligned the internal names between parameter sources and runners with the ones that are specified by the user in the track file. If you have implemented custom parameter sources or runners, please adjust the parameter names as follows:
+
+============== ======================= =======================
+Operation type Old name New name
+============== ======================= =======================
+search use_request_cache cache
+search request_params request-params
+search items_per_page results-per-page
+bulk action_metadata_present action-metadata-present
+force-merge max_num_segments max-num-segments
+============== ======================= =======================
+
Migrating to Rally 0.9.0
------------------------
diff --git a/docs/track.rst b/docs/track.rst
index 0bea3c76..669955df 100644
--- a/docs/track.rst
+++ b/docs/track.rst
@@ -330,7 +330,7 @@ force-merge
With the operation type ``force-merge`` you can call the `force merge API <http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html>`_. On older versions of Elasticsearch (prior to 2.1), Rally will use the ``optimize API`` instead. It supports the following parameter:
-* ``max_num_segments`` (optional) The number of segments the index should be merged into. Defaults to simply checking if a merge needs to execute, and if so, executes it.
+* ``max-num-segments`` (optional) The number of segments the index should be merged into. Defaults to simply checking if a merge needs to execute, and if so, executes it.
This is an administrative operation. Metrics are not reported by default. If reporting is forced by setting ``include-in-reporting`` to ``true``, then throughput is reported as the number of completed force-merge operations per second.
diff --git a/esrally/driver/runner.py b/esrally/driver/runner.py
index 4ba93457..42053c0a 100644
--- a/esrally/driver/runner.py
+++ b/esrally/driver/runner.py
@@ -254,7 +254,15 @@ class BulkIndex(Runner):
if "pipeline" in params:
bulk_params["pipeline"] = params["pipeline"]
- with_action_metadata = mandatory(params, "action_metadata_present", self)
+ # TODO: Remove this fallback logic with Rally 1.0
+ if "action_metadata_present" in params:
+ logger.warning("Your parameter source uses the deprecated name [action_metadata_present]. Please change it to "
+ "[action-metadata-present].")
+ action_meta_data_key = "action_metadata_present"
+ else:
+ action_meta_data_key = "action-metadata-present"
+
+ with_action_metadata = mandatory(params, action_meta_data_key, self)
bulk_size = mandatory(params, "bulk-size", self)
if with_action_metadata:
@@ -287,7 +295,15 @@ class BulkIndex(Runner):
for line_number, data in enumerate(params["body"]):
line_size = len(data.encode('utf-8'))
- if params["action_metadata_present"]:
+
+ # TODO: Remove this fallback logic with Rally 1.0
+ if "action_metadata_present" in params:
+ logger.warning("Your parameter source uses the deprecated name [action_metadata_present]. Please change it to "
+ "[action-metadata-present].")
+ action_meta_data_key = "action_metadata_present"
+ else:
+ action_meta_data_key = "action-metadata-present"
+ if params[action_meta_data_key]:
if line_number % 2 == 1:
total_document_size_bytes += line_size
else:
@@ -377,15 +393,23 @@ class ForceMerge(Runner):
logger.info("Force merging all indices.")
import elasticsearch
try:
- if "max_num_segments" in params:
- es.indices.forcemerge(index="_all", max_num_segments=params["max_num_segments"])
+ if "max-num-segments" in params:
+ max_num_segments = params["max-num-segments"]
+ elif "max_num_segments" in params:
+ logger.warning("Your parameter source uses the deprecated name [max_num_segments]. Please change it to [max-num-segments].")
+ max_num_segments = params["max_num_segments"]
+ else:
+ max_num_segments = None
+
+ if max_num_segments:
+ es.indices.forcemerge(index="_all", max_num_segments=max_num_segments)
else:
es.indices.forcemerge(index="_all")
except elasticsearch.TransportError as e:
# this is caused by older versions of Elasticsearch (< 2.1), fall back to optimize
if e.status_code == 400:
- if "max_num_segments" in params:
- es.transport.perform_request("POST", "/_optimize?max_num_segments=%s" % (params["max_num_segments"]))
+ if max_num_segments:
+ es.transport.perform_request("POST", "/_optimize?max_num_segments={}".format(max_num_segments))
else:
es.transport.perform_request("POST", "/_optimize")
else:
@@ -427,14 +451,14 @@ class Query(Runner):
* `index`: The index or indices against which to issue the query.
* `type`: See `index`
- * `use_request_cache`: True iff the request cache should be used.
+ * `cache`: True iff the request cache should be used.
* `body`: Query body
If the following parameters are present in addition, a scroll query will be issued:
* `pages`: Number of pages to retrieve at most for this scroll. If a scroll query does yield less results than the specified number of
pages we will terminate earlier.
- * `items_per_page`: Number of items to retrieve per page.
+ * `results-per-page`: Number of results to retrieve per page.
Returned meta data
@@ -457,14 +481,26 @@ class Query(Runner):
self.es = None
def __call__(self, es, params):
- if "pages" in params and "items_per_page" in params:
+ # TODO: Remove items_per_page with Rally 1.0.
+ if "pages" in params and ("results-per-page" in params or "items_per_page" in params):
return self.scroll_query(es, params)
else:
return self.request_body_query(es, params)
def request_body_query(self, es, params):
- request_params = params.get("request_params", {})
- if "use_request_cache" in params:
+ if "request-params" in params:
+ request_params = params["request-params"]
+ elif "request_params" in params:
+ # TODO: Remove with Rally 1.0.
+ logger.warning("Your parameter source uses the deprecated name [request_params]. Please change it to [request-params].")
+ request_params = params["request_params"]
+ else:
+ request_params = {}
+ if "cache" in params:
+ request_params["request_cache"] = params["cache"]
+ elif "use_request_cache" in params:
+ # TODO: Remove with Rally 1.0.
+ logger.warning("Your parameter source uses the deprecated name [use_request_cache]. Please change it to [cache].")
request_params["request_cache"] = params["use_request_cache"]
r = es.search(
index=params.get("index", "_all"),
@@ -481,7 +517,14 @@ class Query(Runner):
}
def scroll_query(self, es, params):
- request_params = params.get("request_params", {})
+ if "request-params" in params:
+ request_params = params["request-params"]
+ elif "request_params" in params:
+ # TODO: Remove with Rally 1.0.
+ logger.warning("Your parameter source uses the deprecated name [request_params]. Please change it to [request-params].")
+ request_params = params["request_params"]
+ else:
+ request_params = {}
hits = 0
retrieved_pages = 0
timed_out = False
@@ -489,6 +532,22 @@ class Query(Runner):
self.es = es
# explicitly convert to int to provoke an error otherwise
total_pages = sys.maxsize if params["pages"] == "all" else int(params["pages"])
+ if "cache" in params:
+ cache = params["cache"]
+ elif "use_request_cache" in params:
+ # TODO: Remove with Rally 1.0.
+ logger.warning("Your parameter source uses the deprecated name [use_request_cache]. Please change it to [cache].")
+ cache = params["use_request_cache"]
+ else:
+ cache = None
+ if "results-per-page" in params:
+ size = params["results-per-page"]
+ elif "items_per_page" in params:
+ # TODO: Remove with Rally 1.0.
+ logger.warning("Your parameter source uses the deprecated name [items_per_page]. Please change it to [results-per-page].")
+ size = params["items_per_page"]
+ else:
+ size = None
for page in range(total_pages):
if page == 0:
@@ -498,8 +557,8 @@ class Query(Runner):
body=mandatory(params, "body", self),
sort="_doc",
scroll="10s",
- size=params["items_per_page"],
- request_cache=params.get("use_request_cache"),
+ size=size,
+ request_cache=cache,
**request_params
)
# This should only happen if we concurrently create an index and start searching
@@ -615,7 +674,7 @@ class PutPipeline(Runner):
def __call__(self, es, params):
es.ingest.put_pipeline(id=mandatory(params, "id", self),
body=mandatory(params, "body", self),
- master_timeout=params.get("master_timeout"),
+ master_timeout=params.get("master-timeout"),
timeout=params.get("timeout"),
)
diff --git a/esrally/track/params.py b/esrally/track/params.py
index 70cd3ace..28683fe0 100644
--- a/esrally/track/params.py
+++ b/esrally/track/params.py
@@ -353,13 +353,17 @@ class SearchParamSource(ParamSource):
query_body = params.get("body", None)
query_body_params = params.get("body-params", None)
pages = params.get("pages", None)
- items_per_page = params.get("results-per-page", None)
+ results_per_page = params.get("results-per-page", None)
request_params = params.get("request-params", {})
self.query_params = {
"index": index_name,
"type": type_name,
+ "cache": request_cache,
+ # TODO: This is the old name, remove with Rally 1.0
"use_request_cache": request_cache,
+ "request-params": request_params,
+ # TODO: This is the old name, remove with Rally 1.0
"request_params": request_params,
"body": query_body
}
@@ -369,13 +373,15 @@ class SearchParamSource(ParamSource):
if pages:
self.query_params["pages"] = pages
- if items_per_page:
- self.query_params["items_per_page"] = items_per_page
+ if results_per_page:
+ self.query_params["results-per-page"] = results_per_page
+ # TODO: This is the old name, remove with Rally 1.0
+ self.query_params["items_per_page"] = results_per_page
self.query_body_params = []
if query_body_params:
for param, data in query_body_params.items():
- # TODO #365: Stricly check for allowed syntax. Be lenient in the pre-release and only interpret what's safely possible.
+ # TODO #365: Strictly check for allowed syntax. Be lenient in the pre-release and only interpret what's safely possible.
# build path based on param
# if not isinstance(data, list):
# raise exceptions.RallyError("%s in body-params defines %s but only lists are allowed. This may be a new syntax "
@@ -655,6 +661,8 @@ def bulk_generator(readers, client_index, pipeline, original_params):
"type": type,
# For our implementation it's always present. Either the original source file already contains this line or the generator
# has added it.
+ "action-metadata-present": True,
+ # TODO: This is the old name, remove with Rally 1.0
"action_metadata_present": True,
"body": bulk,
# This is not always equal to the bulk_size we get as parameter. The last bulk may be less than the bulk size.
| Align operation parameter names with runner implementation
For some operations, e.g. `search`, internal parameter names (e.g. `cache`), differ from the internal name (`use_request_cache`) which is confusing to advanced users that look at our internal runners.
Hence, we should align them to avoid this. | elastic/rally | diff --git a/tests/driver/runner_test.py b/tests/driver/runner_test.py
index 8ad55c61..dc7462ae 100644
--- a/tests/driver/runner_test.py
+++ b/tests/driver/runner_test.py
@@ -579,7 +579,7 @@ class QueryRunnerTests(TestCase):
params = {
"index": "unittest",
"type": "type",
- "use_request_cache": False,
+ "cache": False,
"body": {
"query": {
"match_all": {}
@@ -626,10 +626,10 @@ class QueryRunnerTests(TestCase):
params = {
"pages": 1,
- "items_per_page": 100,
+ "results-per-page": 100,
"index": "unittest",
"type": "type",
- "use_request_cache": False,
+ "cache": False,
"body": {
"query": {
"match_all": {}
@@ -677,7 +677,7 @@ class QueryRunnerTests(TestCase):
params = {
"pages": 1,
- "items_per_page": 100,
+ "results-per-page": 100,
"body": {
"query": {
"match_all": {}
@@ -738,10 +738,10 @@ class QueryRunnerTests(TestCase):
params = {
"pages": 2,
- "items_per_page": 100,
+ "results-per-page": 100,
"index": "unittest",
"type": "type",
- "use_request_cache": False,
+ "cache": False,
"body": {
"query": {
"match_all": {}
@@ -795,10 +795,10 @@ class QueryRunnerTests(TestCase):
params = {
"pages": 5,
- "items_per_page": 100,
+ "results-per-page": 100,
"index": "unittest",
"type": "type",
- "use_request_cache": False,
+ "cache": False,
"body": {
"query": {
"match_all": {}
@@ -860,10 +860,10 @@ class QueryRunnerTests(TestCase):
params = {
"pages": "all",
- "items_per_page": 100,
+ "results-per-page": 100,
"index": "unittest",
"type": "type",
- "use_request_cache": False,
+ "cache": False,
"body": {
"query": {
"match_all": {}
@@ -922,7 +922,7 @@ class PutPipelineRunnerTests(TestCase):
es.ingest.put_pipeline.assert_not_called()
@mock.patch("elasticsearch.Elasticsearch")
- def test_param_body_mandatory(self, es):
+ def test_param_id_mandatory(self, es):
r = runner.PutPipeline()
params = {
diff --git a/tests/track/params_test.py b/tests/track/params_test.py
index f4fa5498..eed38846 100644
--- a/tests/track/params_test.py
+++ b/tests/track/params_test.py
@@ -699,6 +699,7 @@ class BulkDataGeneratorTests(TestCase):
all_bulks = list(bulks)
self.assertEqual(2, len(all_bulks))
self.assertEqual({
+ "action-metadata-present": True,
"action_metadata_present": True,
"body": ["1", "2", "3", "4", "5"],
"bulk-id": "0-1",
@@ -710,6 +711,7 @@ class BulkDataGeneratorTests(TestCase):
}, all_bulks[0])
self.assertEqual({
+ "action-metadata-present": True,
"action_metadata_present": True,
"body": ["6", "7", "8"],
"bulk-id": "0-2",
@@ -755,6 +757,7 @@ class BulkDataGeneratorTests(TestCase):
all_bulks = list(bulks)
self.assertEqual(3, len(all_bulks))
self.assertEqual({
+ "action-metadata-present": True,
"action_metadata_present": True,
"body": ["1", "2", "3", "4", "5"],
"bulk-id": "0-1",
@@ -766,6 +769,7 @@ class BulkDataGeneratorTests(TestCase):
}, all_bulks[0])
self.assertEqual({
+ "action-metadata-present": True,
"action_metadata_present": True,
"body": ["1", "2", "3", "4", "5"],
"bulk-id": "0-2",
@@ -777,6 +781,7 @@ class BulkDataGeneratorTests(TestCase):
}, all_bulks[1])
self.assertEqual({
+ "action-metadata-present": True,
"action_metadata_present": True,
"body": ["1", "2", "3", "4", "5"],
"bulk-id": "0-3",
@@ -807,6 +812,7 @@ class BulkDataGeneratorTests(TestCase):
self.assertEqual(1, len(all_bulks))
# body must not contain 'foo'!
self.assertEqual({
+ "action-metadata-present": True,
"action_metadata_present": True,
"body": ["1", "2", "3"],
"bulk-id": "0-1",
@@ -1265,18 +1271,24 @@ class SearchParamSourceTests(TestCase):
})
p = source.params()
- self.assertEqual(5, len(p))
+ self.assertEqual(7, len(p))
self.assertEqual("index1", p["index"])
self.assertEqual("type1", p["type"])
self.assertEqual({
"_source_include": "some_field"
- }, p["request_params"])
- self.assertFalse(p["use_request_cache"])
+ }, p["request-params"])
+ self.assertFalse(p["cache"])
self.assertEqual({
"query": {
"match_all": {}
}
}, p["body"])
+ # backwards-compatibility options
+ self.assertFalse(p["use_request_cache"])
+ self.assertEqual({
+ "_source_include": "some_field"
+ }, p["request_params"])
+
def test_replaces_body_params(self):
import copy
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 5
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip3 install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-benchmark"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc python3-dev"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
elasticsearch==6.2.0
-e git+https://github.com/elastic/rally.git@a5408e0d0d07b271b509df8057a7c73303604c10#egg=esrally
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==2.9.5
jsonschema==2.5.1
MarkupSafe==2.0.1
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==5.4.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
py-cpuinfo==3.2.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-benchmark==3.4.1
tabulate==0.8.1
thespian==3.9.2
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.22
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: rally
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- elasticsearch==6.2.0
- jinja2==2.9.5
- jsonschema==2.5.1
- markupsafe==2.0.1
- psutil==5.4.0
- py-cpuinfo==3.2.0
- pytest-benchmark==3.4.1
- tabulate==0.8.1
- thespian==3.9.2
- urllib3==1.22
prefix: /opt/conda/envs/rally
| [
"tests/driver/runner_test.py::QueryRunnerTests::test_scroll_query_early_termination",
"tests/driver/runner_test.py::QueryRunnerTests::test_scroll_query_only_one_page",
"tests/driver/runner_test.py::QueryRunnerTests::test_scroll_query_only_one_page_only_request_body_defined",
"tests/driver/runner_test.py::QueryRunnerTests::test_scroll_query_request_all_pages",
"tests/driver/runner_test.py::QueryRunnerTests::test_scroll_query_with_explicit_number_of_pages",
"tests/track/params_test.py::BulkDataGeneratorTests::test_generate_bulks_from_multiple_corpora",
"tests/track/params_test.py::BulkDataGeneratorTests::test_generate_two_bulks",
"tests/track/params_test.py::BulkDataGeneratorTests::test_internal_params_take_precedence",
"tests/track/params_test.py::SearchParamSourceTests::test_passes_request_parameters"
]
| []
| [
"tests/driver/runner_test.py::RegisterRunnerTests::test_runner_class_should_be_wrapped",
"tests/driver/runner_test.py::RegisterRunnerTests::test_runner_class_with_context_manager_should_be_registered_as_is",
"tests/driver/runner_test.py::RegisterRunnerTests::test_runner_function_should_be_wrapped",
"tests/driver/runner_test.py::BulkIndexRunnerTests::test_bulk_index_error",
"tests/driver/runner_test.py::BulkIndexRunnerTests::test_bulk_index_missing_params",
"tests/driver/runner_test.py::BulkIndexRunnerTests::test_bulk_index_success_with_metadata",
"tests/driver/runner_test.py::BulkIndexRunnerTests::test_bulk_index_success_without_metadata",
"tests/driver/runner_test.py::BulkIndexRunnerTests::test_mixed_bulk_with_detailed_stats",
"tests/driver/runner_test.py::BulkIndexRunnerTests::test_mixed_bulk_with_simple_stats",
"tests/driver/runner_test.py::QueryRunnerTests::test_query_match_all",
"tests/driver/runner_test.py::QueryRunnerTests::test_query_match_only_request_body_defined",
"tests/driver/runner_test.py::PutPipelineRunnerTests::test_create_pipeline",
"tests/driver/runner_test.py::PutPipelineRunnerTests::test_param_body_mandatory",
"tests/driver/runner_test.py::PutPipelineRunnerTests::test_param_id_mandatory",
"tests/driver/runner_test.py::ClusterHealthRunnerTests::test_accepts_better_cluster_status",
"tests/driver/runner_test.py::ClusterHealthRunnerTests::test_rejects_relocating_shards",
"tests/driver/runner_test.py::ClusterHealthRunnerTests::test_rejects_unknown_cluster_status",
"tests/driver/runner_test.py::ClusterHealthRunnerTests::test_waits_for_expected_cluster_status",
"tests/driver/runner_test.py::CreateIndexRunnerTests::test_creates_multiple_indices",
"tests/driver/runner_test.py::CreateIndexRunnerTests::test_param_indices_mandatory",
"tests/driver/runner_test.py::DeleteIndexRunnerTests::test_deletes_all_indices",
"tests/driver/runner_test.py::DeleteIndexRunnerTests::test_deletes_existing_indices",
"tests/driver/runner_test.py::CreateIndexTemplateRunnerTests::test_create_index_templates",
"tests/driver/runner_test.py::CreateIndexTemplateRunnerTests::test_param_templates_mandatory",
"tests/driver/runner_test.py::DeleteIndexTemplateRunnerTests::test_deletes_all_index_templates",
"tests/driver/runner_test.py::DeleteIndexTemplateRunnerTests::test_deletes_only_existing_index_templates",
"tests/driver/runner_test.py::DeleteIndexTemplateRunnerTests::test_param_templates_mandatory",
"tests/driver/runner_test.py::RawRequestRunnerTests::test_issue_create_index",
"tests/driver/runner_test.py::RawRequestRunnerTests::test_issue_delete_index",
"tests/driver/runner_test.py::RawRequestRunnerTests::test_issue_msearch",
"tests/driver/runner_test.py::RawRequestRunnerTests::test_issue_request_with_defaults",
"tests/driver/runner_test.py::RetryTests::test_assumes_success_if_runner_returns_non_dict",
"tests/driver/runner_test.py::RetryTests::test_does_not_retry_on_application_error_if_not_wanted",
"tests/driver/runner_test.py::RetryTests::test_does_not_retry_on_timeout_if_not_wanted",
"tests/driver/runner_test.py::RetryTests::test_is_does_not_retry_on_success",
"tests/driver/runner_test.py::RetryTests::test_is_transparent_on_application_error_when_no_retries",
"tests/driver/runner_test.py::RetryTests::test_is_transparent_on_exception_when_no_retries",
"tests/driver/runner_test.py::RetryTests::test_is_transparent_on_success_when_no_retries",
"tests/driver/runner_test.py::RetryTests::test_retries_mixed_timeout_and_application_errors",
"tests/driver/runner_test.py::RetryTests::test_retries_on_application_error_if_wanted",
"tests/driver/runner_test.py::RetryTests::test_retries_on_timeout_if_wanted_and_raises_if_no_recovery",
"tests/driver/runner_test.py::RetryTests::test_retries_on_timeout_if_wanted_and_returns_first_call",
"tests/track/params_test.py::SliceTests::test_slice_with_slice_larger_than_source",
"tests/track/params_test.py::SliceTests::test_slice_with_source_larger_than_slice",
"tests/track/params_test.py::ConflictingIdsBuilderTests::test_no_id_conflicts",
"tests/track/params_test.py::ConflictingIdsBuilderTests::test_random_conflicts",
"tests/track/params_test.py::ConflictingIdsBuilderTests::test_sequential_conflicts",
"tests/track/params_test.py::ActionMetaDataTests::test_generate_action_meta_data_with_id_conflicts",
"tests/track/params_test.py::ActionMetaDataTests::test_generate_action_meta_data_without_id_conflicts",
"tests/track/params_test.py::ActionMetaDataTests::test_source_file_action_meta_data",
"tests/track/params_test.py::IndexDataReaderTests::test_read_bulk_larger_than_number_of_docs",
"tests/track/params_test.py::IndexDataReaderTests::test_read_bulk_smaller_than_number_of_docs",
"tests/track/params_test.py::IndexDataReaderTests::test_read_bulk_smaller_than_number_of_docs_and_multiple_clients",
"tests/track/params_test.py::IndexDataReaderTests::test_read_bulk_with_offset",
"tests/track/params_test.py::IndexDataReaderTests::test_read_bulks_and_assume_metadata_line_in_source_file",
"tests/track/params_test.py::InvocationGeneratorTests::test_build_conflicting_ids",
"tests/track/params_test.py::InvocationGeneratorTests::test_calculate_bounds",
"tests/track/params_test.py::InvocationGeneratorTests::test_calculate_non_multiple_bounds",
"tests/track/params_test.py::InvocationGeneratorTests::test_calculate_number_of_bulks",
"tests/track/params_test.py::InvocationGeneratorTests::test_iterator_chaining_respects_context_manager",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_valid_param_source",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_with_fraction_larger_batch_size",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_with_fraction_smaller_batch_size",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_with_ingest_percentage_not_numeric",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_with_ingest_percentage_too_high",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_with_ingest_percentage_too_low",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_with_metadata_in_source_file_but_conflicts",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_with_negative_bulk_size",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_with_non_numeric_bulk_size",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_with_unknown_id_conflicts",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_create_without_params",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_filters_corpora",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_ingests_all_documents_by_default",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_passes_all_corpora_by_default",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_raises_exception_if_no_corpus_matches",
"tests/track/params_test.py::BulkIndexParamSourceTests::test_restricts_number_of_bulks_if_required",
"tests/track/params_test.py::ParamsRegistrationTests::test_can_register_class_as_param_source",
"tests/track/params_test.py::ParamsRegistrationTests::test_can_register_function_as_param_source",
"tests/track/params_test.py::ParamsRegistrationTests::test_can_register_legacy_class_as_param_source",
"tests/track/params_test.py::ParamsRegistrationTests::test_can_register_legacy_function_as_param_source",
"tests/track/params_test.py::CreateIndexParamSourceTests::test_create_index_from_track_with_settings",
"tests/track/params_test.py::CreateIndexParamSourceTests::test_create_index_from_track_without_settings",
"tests/track/params_test.py::CreateIndexParamSourceTests::test_create_index_inline_with_body",
"tests/track/params_test.py::CreateIndexParamSourceTests::test_create_index_inline_without_body",
"tests/track/params_test.py::CreateIndexParamSourceTests::test_filter_index",
"tests/track/params_test.py::DeleteIndexParamSourceTests::test_delete_index_by_name",
"tests/track/params_test.py::DeleteIndexParamSourceTests::test_delete_index_from_track",
"tests/track/params_test.py::DeleteIndexParamSourceTests::test_delete_no_index",
"tests/track/params_test.py::DeleteIndexParamSourceTests::test_filter_index_from_track",
"tests/track/params_test.py::CreateIndexTemplateParamSourceTests::test_create_index_template_from_track",
"tests/track/params_test.py::CreateIndexTemplateParamSourceTests::test_create_index_template_inline",
"tests/track/params_test.py::DeleteIndexTemplateParamSourceTests::test_delete_index_template_by_name",
"tests/track/params_test.py::DeleteIndexTemplateParamSourceTests::test_delete_index_template_by_name_and_matching_indices",
"tests/track/params_test.py::DeleteIndexTemplateParamSourceTests::test_delete_index_template_by_name_and_matching_indices_missing_index_pattern",
"tests/track/params_test.py::DeleteIndexTemplateParamSourceTests::test_delete_index_template_from_track",
"tests/track/params_test.py::SearchParamSourceTests::test_replaces_body_params"
]
| []
| Apache License 2.0 | 2,395 | [
"esrally/track/params.py",
"docs/track.rst",
"docs/adding_tracks.rst",
"esrally/driver/runner.py",
"docs/migrate.rst"
]
| [
"esrally/track/params.py",
"docs/track.rst",
"docs/adding_tracks.rst",
"esrally/driver/runner.py",
"docs/migrate.rst"
]
|
|
python-tap__tappy-70 | 13a0ed000f4b5e423869fa6a63a7540be17ab375 | 2018-04-12 13:29:10 | 13a0ed000f4b5e423869fa6a63a7540be17ab375 | arewm: What is AppVeyor? I do not know why `ModuleNotFoundError` is not defined, isn't that part of the standard library?
codecov-io: # [Codecov](https://codecov.io/gh/python-tap/tappy/pull/70?src=pr&el=h1) Report
> Merging [#70](https://codecov.io/gh/python-tap/tappy/pull/70?src=pr&el=desc) into [master](https://codecov.io/gh/python-tap/tappy/commit/540a1cb8d37192bc455408a15e1a331cac135065?src=pr&el=desc) will **decrease** coverage by `32.44%`.
> The diff coverage is `13.63%`.
[](https://codecov.io/gh/python-tap/tappy/pull/70?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #70 +/- ##
===========================================
- Coverage 100% 67.55% -32.45%
===========================================
Files 12 11 -1
Lines 483 490 +7
===========================================
- Hits 483 331 -152
- Misses 0 159 +159
```
| [Impacted Files](https://codecov.io/gh/python-tap/tappy/pull/70?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [tap/line.py](https://codecov.io/gh/python-tap/tappy/pull/70/diff?src=pr&el=tree#diff-dGFwL2xpbmUucHk=) | `85.71% <100%> (-14.29%)` | :arrow_down: |
| [tap/parser.py](https://codecov.io/gh/python-tap/tappy/pull/70/diff?src=pr&el=tree#diff-dGFwL3BhcnNlci5weQ==) | `8.51% <11.62%> (-91.49%)` | :arrow_down: |
| [tap/loader.py](https://codecov.io/gh/python-tap/tappy/pull/70/diff?src=pr&el=tree#diff-dGFwL2xvYWRlci5weQ==) | `7.27% <0%> (-92.73%)` | :arrow_down: |
| [tap/adapter.py](https://codecov.io/gh/python-tap/tappy/pull/70/diff?src=pr&el=tree#diff-dGFwL2FkYXB0ZXIucHk=) | `76.92% <0%> (-23.08%)` | :arrow_down: |
| [tap/rules.py](https://codecov.io/gh/python-tap/tappy/pull/70/diff?src=pr&el=tree#diff-dGFwL3J1bGVzLnB5) | `88.88% <0%> (-11.12%)` | :arrow_down: |
| [tap/main.py](https://codecov.io/gh/python-tap/tappy/pull/70/diff?src=pr&el=tree#diff-dGFwL21haW4ucHk=) | | |
------
[Continue to review full report at Codecov](https://codecov.io/gh/python-tap/tappy/pull/70?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/python-tap/tappy/pull/70?src=pr&el=footer). Last update [540a1cb...5d3e356](https://codecov.io/gh/python-tap/tappy/pull/70?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
arewm: All tests are finally passing. Code coverage reduction is due to inability to test the import statement failing.
arewm: FYI, this implementation does break the concept of parsing output "line-by-line", however, I do not think there is any way to get around it. When parsing a result line, if there is a following yaml block, all of those lines will be parsed.
I do not know if this change needs to be documented anywhere.
arewm: Sounds good @mblayman. I will be happy to look at the final merge. I updated the version but did not put in a release date.
FYI, I despise unicode issues.
mblayman: @arewm sorry for the long delay. Somehow I missed that you addressed issues. I'll give this another look when I have time. It will probably be some time next week when I'm at PyCon. | diff --git a/.gitignore b/.gitignore
index 39ec5ab..7755710 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,6 +28,7 @@ pip-log.txt
nosetests.xml
htmlcov
.cache
+.pytest_cache
# Translations
.transifex.ini
@@ -38,8 +39,9 @@ htmlcov
.project
.pydevproject
-# Vim
+# Dev
*.swp
+.vscode
# TAP
*.tap
diff --git a/.travis.yml b/.travis.yml
index de124bc..3835e8a 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -25,6 +25,9 @@ matrix:
- brew upgrade python
- python3 -m venv venv
- source venv/bin/activate
+ - os: linux
+ python: 3.6
+ env: TOX_ENV=with_optional
- os: linux
python: 2.7
env: TOX_ENV=runner
diff --git a/Pipfile b/Pipfile
index 0b7bf1d..8499c0f 100644
--- a/Pipfile
+++ b/Pipfile
@@ -16,7 +16,8 @@ Sphinx = "*"
tox = "*"
twine = "*"
pytest = "*"
-
+more-itertools = "*"
+pyyaml = "*"
[packages]
diff --git a/Pipfile.lock b/Pipfile.lock
index 85019cc..bbc729b 100644
--- a/Pipfile.lock
+++ b/Pipfile.lock
@@ -1,20 +1,7 @@
{
"_meta": {
"hash": {
- "sha256": "f8c87d38088c4cd0c495b52eeeb100c09fa7d526e11f4ed07d203b6391bd7bed"
- },
- "host-environment-markers": {
- "implementation_name": "cpython",
- "implementation_version": "3.6.2",
- "os_name": "posix",
- "platform_machine": "x86_64",
- "platform_python_implementation": "CPython",
- "platform_release": "15.6.0",
- "platform_system": "Darwin",
- "platform_version": "Darwin Kernel Version 15.6.0: Mon Nov 13 21:58:35 PST 2017; root:xnu-3248.72.11~1/RELEASE_X86_64",
- "python_full_version": "3.6.2",
- "python_version": "3.6",
- "sys_platform": "darwin"
+ "sha256": "ab46296ff06c5478af07e61bc3f32d5688d7917c73249df8c89e6413e5473d3a"
},
"pipfile-spec": 6,
"requires": {},
@@ -37,130 +24,104 @@
},
"attrs": {
"hashes": [
- "sha256:a17a9573a6f475c99b551c0e0a812707ddda1ec9653bed04c13841404ed6f450",
- "sha256:1c7960ccfd6a005cd9f7ba884e6316b5e430a3f1a6c37c5f87d8b43f83b54ec9"
+ "sha256:1c7960ccfd6a005cd9f7ba884e6316b5e430a3f1a6c37c5f87d8b43f83b54ec9",
+ "sha256:a17a9573a6f475c99b551c0e0a812707ddda1ec9653bed04c13841404ed6f450"
],
"version": "==17.4.0"
},
"babel": {
"hashes": [
- "sha256:f20b2acd44f587988ff185d8949c3e208b4b3d5d20fcab7d91fe481ffa435528",
- "sha256:6007daf714d0cd5524bbe436e2d42b3c20e68da66289559341e48d2cd6d25811"
+ "sha256:8ce4cb6fdd4393edd323227cba3a077bceb2a6ce5201c902c65e730046f41f14",
+ "sha256:ad209a68d7162c4cff4b29cdebe3dec4cef75492df501b0049a9433c96ce6f80"
],
- "version": "==2.5.1"
+ "index": "pypi",
+ "version": "==2.5.3"
},
"certifi": {
"hashes": [
- "sha256:244be0d93b71e93fc0a0a479862051414d0e00e16435707e5bf5000f92e04694",
- "sha256:5ec74291ca1136b40f0379e1128ff80e866597e4e2c1e755739a913bbc3613c0"
+ "sha256:14131608ad2fd56836d33a71ee60fa1c82bc9d2c8d98b7bdbc631fe1b3cd1296",
+ "sha256:edbc3f203427eef571f79a7692bb160a2b0f7ccaa31953e99bd17e307cf63f7d"
],
- "version": "==2017.11.5"
+ "version": "==2018.1.18"
},
"chardet": {
"hashes": [
- "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691",
- "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae"
+ "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae",
+ "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"
],
"version": "==3.0.4"
},
- "configparser": {
- "hashes": [
- "sha256:5308b47021bc2340965c371f0f058cc6971a04502638d4244225c49d80db273a"
- ],
- "markers": "python_version < '3.2'",
- "version": "==3.5.0"
- },
"coverage": {
"hashes": [
- "sha256:d1ee76f560c3c3e8faada866a07a32485445e16ed2206ac8378bd90dadffb9f0",
- "sha256:007eeef7e23f9473622f7d94a3e029a45d55a92a1f083f0f3512f5ab9a669b05",
- "sha256:17307429935f96c986a1b1674f78079528833410750321d22b5fb35d1883828e",
- "sha256:845fddf89dca1e94abe168760a38271abfc2e31863fbb4ada7f9a99337d7c3dc",
- "sha256:3f4d0b3403d3e110d2588c275540649b1841725f5a11a7162620224155d00ba2",
- "sha256:4c4f368ffe1c2e7602359c2c50233269f3abe1c48ca6b288dcd0fb1d1c679733",
- "sha256:f8c55dd0f56d3d618dfacf129e010cbe5d5f94b6951c1b2f13ab1a2f79c284da",
- "sha256:cdd92dd9471e624cd1d8c1a2703d25f114b59b736b0f1f659a98414e535ffb3d",
- "sha256:2ad357d12971e77360034c1596011a03f50c0f9e1ecd12e081342b8d1aee2236",
- "sha256:e9a0e1caed2a52f15c96507ab78a48f346c05681a49c5b003172f8073da6aa6b",
- "sha256:eea9135432428d3ca7ee9be86af27cb8e56243f73764a9b6c3e0bda1394916be",
- "sha256:700d7579995044dc724847560b78ac786f0ca292867447afda7727a6fbaa082e",
- "sha256:66f393e10dd866be267deb3feca39babba08ae13763e0fc7a1063cbe1f8e49f6",
- "sha256:5ff16548492e8a12e65ff3d55857ccd818584ed587a6c2898a9ebbe09a880674",
- "sha256:d00e29b78ff610d300b2c37049a41234d48ea4f2d2581759ebcf67caaf731c31",
- "sha256:87d942863fe74b1c3be83a045996addf1639218c2cb89c5da18c06c0fe3917ea",
- "sha256:358d635b1fc22a425444d52f26287ae5aea9e96e254ff3c59c407426f44574f4",
- "sha256:81912cfe276e0069dca99e1e4e6be7b06b5fc8342641c6b472cb2fed7de7ae18",
- "sha256:079248312838c4c8f3494934ab7382a42d42d5f365f0cf7516f938dbb3f53f3f",
- "sha256:b0059630ca5c6b297690a6bf57bf2fdac1395c24b7935fd73ee64190276b743b",
- "sha256:493082f104b5ca920e97a485913de254cbe351900deed72d4264571c73464cd0",
- "sha256:e3ba9b14607c23623cf38f90b23f5bed4a3be87cbfa96e2e9f4eabb975d1e98b",
- "sha256:82cbd3317320aa63c65555aa4894bf33a13fb3a77f079059eb5935eea415938d",
- "sha256:9721f1b7275d3112dc7ccf63f0553c769f09b5c25a26ee45872c7f5c09edf6c1",
- "sha256:bd4800e32b4c8d99c3a2c943f1ac430cbf80658d884123d19639bcde90dad44a",
- "sha256:f29841e865590af72c4b90d7b5b8e93fd560f5dea436c1d5ee8053788f9285de",
- "sha256:f3a5c6d054c531536a83521c00e5d4004f1e126e2e2556ce399bef4180fbe540",
- "sha256:dd707a21332615108b736ef0b8513d3edaf12d2a7d5fc26cd04a169a8ae9b526",
- "sha256:2e1a5c6adebb93c3b175103c2f855eda957283c10cf937d791d81bef8872d6ca",
- "sha256:f87f522bde5540d8a4b11df80058281ac38c44b13ce29ced1e294963dd51a8f8",
- "sha256:a7cfaebd8f24c2b537fa6a271229b051cdac9c1734bb6f939ccfc7c055689baa",
- "sha256:309d91bd7a35063ec7a0e4d75645488bfab3f0b66373e7722f23da7f5b0f34cc",
- "sha256:0388c12539372bb92d6dde68b4627f0300d948965bbb7fc104924d715fdc0965",
- "sha256:ab3508df9a92c1d3362343d235420d08e2662969b83134f8a97dc1451cbe5e84",
- "sha256:43a155eb76025c61fc20c3d03b89ca28efa6f5be572ab6110b2fb68eda96bfea",
- "sha256:f98b461cb59f117887aa634a66022c0bd394278245ed51189f63a036516e32de",
- "sha256:b6cebae1502ce5b87d7c6f532fa90ab345cfbda62b95aeea4e431e164d498a3d",
- "sha256:a4497faa4f1c0fc365ba05eaecfb6b5d24e3c8c72e95938f9524e29dadb15e76",
- "sha256:2b4d7f03a8a6632598cbc5df15bbca9f778c43db7cf1a838f4fa2c8599a8691a",
- "sha256:1afccd7e27cac1b9617be8c769f6d8a6d363699c9b86820f40c74cfb3328921c"
- ],
- "version": "==4.4.2"
+ "sha256:03481e81d558d30d230bc12999e3edffe392d244349a90f4ef9b88425fac74ba",
+ "sha256:0b136648de27201056c1869a6c0d4e23f464750fd9a9ba9750b8336a244429ed",
+ "sha256:104ab3934abaf5be871a583541e8829d6c19ce7bde2923b2751e0d3ca44db60a",
+ "sha256:15b111b6a0f46ee1a485414a52a7ad1d703bdf984e9ed3c288a4414d3871dcbd",
+ "sha256:198626739a79b09fa0a2f06e083ffd12eb55449b5f8bfdbeed1df4910b2ca640",
+ "sha256:1c383d2ef13ade2acc636556fd544dba6e14fa30755f26812f54300e401f98f2",
+ "sha256:28b2191e7283f4f3568962e373b47ef7f0392993bb6660d079c62bd50fe9d162",
+ "sha256:2eb564bbf7816a9d68dd3369a510be3327f1c618d2357fa6b1216994c2e3d508",
+ "sha256:337ded681dd2ef9ca04ef5d93cfc87e52e09db2594c296b4a0a3662cb1b41249",
+ "sha256:3a2184c6d797a125dca8367878d3b9a178b6fdd05fdc2d35d758c3006a1cd694",
+ "sha256:3c79a6f7b95751cdebcd9037e4d06f8d5a9b60e4ed0cd231342aa8ad7124882a",
+ "sha256:3d72c20bd105022d29b14a7d628462ebdc61de2f303322c0212a054352f3b287",
+ "sha256:3eb42bf89a6be7deb64116dd1cc4b08171734d721e7a7e57ad64cc4ef29ed2f1",
+ "sha256:4635a184d0bbe537aa185a34193898eee409332a8ccb27eea36f262566585000",
+ "sha256:56e448f051a201c5ebbaa86a5efd0ca90d327204d8b059ab25ad0f35fbfd79f1",
+ "sha256:5a13ea7911ff5e1796b6d5e4fbbf6952381a611209b736d48e675c2756f3f74e",
+ "sha256:69bf008a06b76619d3c3f3b1983f5145c75a305a0fea513aca094cae5c40a8f5",
+ "sha256:6bc583dc18d5979dc0f6cec26a8603129de0304d5ae1f17e57a12834e7235062",
+ "sha256:701cd6093d63e6b8ad7009d8a92425428bc4d6e7ab8d75efbb665c806c1d79ba",
+ "sha256:7608a3dd5d73cb06c531b8925e0ef8d3de31fed2544a7de6c63960a1e73ea4bc",
+ "sha256:76ecd006d1d8f739430ec50cc872889af1f9c1b6b8f48e29941814b09b0fd3cc",
+ "sha256:7aa36d2b844a3e4a4b356708d79fd2c260281a7390d678a10b91ca595ddc9e99",
+ "sha256:7d3f553904b0c5c016d1dad058a7554c7ac4c91a789fca496e7d8347ad040653",
+ "sha256:7e1fe19bd6dce69d9fd159d8e4a80a8f52101380d5d3a4d374b6d3eae0e5de9c",
+ "sha256:8c3cb8c35ec4d9506979b4cf90ee9918bc2e49f84189d9bf5c36c0c1119c6558",
+ "sha256:9d6dd10d49e01571bf6e147d3b505141ffc093a06756c60b053a859cb2128b1f",
+ "sha256:9e112fcbe0148a6fa4f0a02e8d58e94470fc6cb82a5481618fea901699bf34c4",
+ "sha256:ac4fef68da01116a5c117eba4dd46f2e06847a497de5ed1d64bb99a5fda1ef91",
+ "sha256:b8815995e050764c8610dbc82641807d196927c3dbed207f0a079833ffcf588d",
+ "sha256:be6cfcd8053d13f5f5eeb284aa8a814220c3da1b0078fa859011c7fffd86dab9",
+ "sha256:c1bb572fab8208c400adaf06a8133ac0712179a334c09224fb11393e920abcdd",
+ "sha256:de4418dadaa1c01d497e539210cb6baa015965526ff5afc078c57ca69160108d",
+ "sha256:e05cb4d9aad6233d67e0541caa7e511fa4047ed7750ec2510d466e806e0255d6",
+ "sha256:e4d96c07229f58cb686120f168276e434660e4358cc9cf3b0464210b04913e77",
+ "sha256:f3f501f345f24383c0000395b26b726e46758b71393267aeae0bd36f8b3ade80",
+ "sha256:f8a923a85cb099422ad5a2e345fe877bbc89a8a8b23235824a93488150e45f6e"
+ ],
+ "index": "pypi",
+ "version": "==4.5.1"
},
"docutils": {
"hashes": [
- "sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6",
"sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6",
- "sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274"
+ "sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274",
+ "sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6"
],
"version": "==0.14"
},
- "enum34": {
- "hashes": [
- "sha256:6bd0f6ad48ec2aa117d3d141940d484deccda84d4fcd884f5c3d93c23ecd8c79",
- "sha256:644837f692e5f550741432dd3f223bbb9852018674981b1664e5dc339387588a",
- "sha256:8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1",
- "sha256:2d81cbbe0e73112bdfe6ef8576f2238f2ba27dd0d55752a776c41d38b7da2850"
- ],
- "markers": "python_version < '3.4'",
- "version": "==1.1.6"
- },
"flake8": {
"hashes": [
- "sha256:c7841163e2b576d435799169b78703ad6ac1bbb0f199994fc05f700b2a90ea37",
- "sha256:7253265f7abd8b313e3892944044a365e3f4ac3fcdcfb4298f55ee9ddf188ba0"
+ "sha256:7253265f7abd8b313e3892944044a365e3f4ac3fcdcfb4298f55ee9ddf188ba0",
+ "sha256:c7841163e2b576d435799169b78703ad6ac1bbb0f199994fc05f700b2a90ea37"
],
+ "index": "pypi",
"version": "==3.5.0"
},
- "funcsigs": {
- "hashes": [
- "sha256:330cc27ccbf7f1e992e69fef78261dc7c6569012cf397db8d3de0234e6c937ca",
- "sha256:a7bb0f2cf3a3fd1ab2732cb49eba4252c2af4240442415b4abce3b87022a8f50"
- ],
- "markers": "python_version < '3.3'",
- "version": "==1.0.2"
- },
"idna": {
"hashes": [
- "sha256:8c7309c718f94b3a625cb648ace320157ad16ff131ae0af362c9f21b80ef6ec4",
- "sha256:2c6a5de3089009e3da7c5dde64a141dbc8551d5b7f6cf4ed7c2568d0cc520a8f"
+ "sha256:2c6a5de3089009e3da7c5dde64a141dbc8551d5b7f6cf4ed7c2568d0cc520a8f",
+ "sha256:8c7309c718f94b3a625cb648ace320157ad16ff131ae0af362c9f21b80ef6ec4"
],
"version": "==2.6"
},
"imagesize": {
"hashes": [
- "sha256:6ebdc9e0ad188f9d1b2cdd9bc59cbe42bf931875e829e7a595e6b3abdc05cdfb",
- "sha256:0ab2c62b87987e3252f89d30b7cedbec12a01af9274af9ffa48108f2c13c6062"
+ "sha256:3620cc0cadba3f7475f9940d22431fc4d407269f1be59ec9b8edcca26440cf18",
+ "sha256:5b326e4678b6925158ccc66a9fa3122b6106d7c876ee32d7de6ce59385b96315"
],
- "version": "==0.7.1"
+ "version": "==1.0.0"
},
"jinja2": {
"hashes": [
@@ -187,21 +148,38 @@
"sha256:5ce3c71c5545b472da17b72268978914d0252980348636840bd34a00b5cc96c1",
"sha256:b158b6df76edd239b8208d481dc46b6afd45a846b7812ff0ce58971cf5bc8bba"
],
+ "index": "pypi",
"version": "==2.0.0"
},
+ "more-itertools": {
+ "hashes": [
+ "sha256:0dd8f72eeab0d2c3bd489025bb2f6a1b8342f9b198f6fc37b52d15cfa4531fea",
+ "sha256:11a625025954c20145b37ff6309cd54e39ca94f72f6bb9576d1195db6fa2442e",
+ "sha256:c9ce7eccdcb901a2c75d326ea134e0886abfbea5f93e91cc95de9507c0816c44"
+ ],
+ "index": "pypi",
+ "version": "==4.1.0"
+ },
+ "packaging": {
+ "hashes": [
+ "sha256:e9215d2d2535d3ae866c3d6efc77d5b24a0192cce0ff20e42896cc0664f889c0",
+ "sha256:f019b770dd64e585a99714f1fd5e01c7a8f11b45635aa953fd41c689a657375b"
+ ],
+ "version": "==17.1"
+ },
"pbr": {
"hashes": [
- "sha256:60c25b7dfd054ef9bb0ae327af949dd4676aa09ac3a9471cdc871d8a9213f9ac",
- "sha256:05f61c71aaefc02d8e37c0a3eeb9815ff526ea28b3b76324769e6158d7f95be1"
+ "sha256:4e8a0ed6a8705a26768f4c3da26026013b157821fe5f95881599556ea9d91c19",
+ "sha256:dae4aaa78eafcad10ce2581fc34d694faa616727837fd8e55c1a00951ad6744f"
],
- "version": "==3.1.1"
+ "version": "==4.0.2"
},
"pkginfo": {
"hashes": [
- "sha256:31a49103180ae1518b65d3f4ce09c784e2bc54e338197668b4fb7dc539521024",
- "sha256:bb1a6aeabfc898f5df124e7e00303a5b3ec9a489535f346bfbddb081af93f89e"
+ "sha256:5878d542a4b3f237e359926384f1dde4e099c9f5525d236b1840cf704fa8d474",
+ "sha256:a39076cb3eb34c333a0dd390b568e9e1e881c7bf2cc0aee12120636816f55aee"
],
- "version": "==1.4.1"
+ "version": "==1.4.2"
},
"pluggy": {
"hashes": [
@@ -211,15 +189,17 @@
},
"py": {
"hashes": [
- "sha256:8cca5c229d225f8c1e3085be4fcf306090b00850fefad892f9d96c7b6e2f310f",
- "sha256:ca18943e28235417756316bfada6cd96b23ce60dd532642690dcfdaba988a76d"
+ "sha256:29c9fab495d7528e80ba1e343b958684f4ace687327e6f789a94bf3d1915f881",
+ "sha256:983f77f3331356039fdd792e9220b7b8ee1aa6bd2b25f567a963ff1de5a64f6a"
],
- "version": "==1.5.2"
+ "version": "==1.5.3"
},
"pycodestyle": {
"hashes": [
- "sha256:6c4245ade1edfad79c3446fadfc96b0de2759662dc29d07d80a6f27ad1ca6ba9",
- "sha256:682256a5b318149ca0d2a9185d365d8864a768a28db66a84a2ea946bcc426766"
+ "sha256:1ec08a51c901dfe44921576ed6e4c1f5b7ecbad403f871397feedb5eb8e4fa14",
+ "sha256:5ff2fbcbab997895ba9ead77e1b38b3ebc2e5c3b8a6194ef918666e4c790a00e",
+ "sha256:682256a5b318149ca0d2a9185d365d8864a768a28db66a84a2ea946bcc426766",
+ "sha256:6c4245ade1edfad79c3446fadfc96b0de2759662dc29d07d80a6f27ad1ca6ba9"
],
"version": "==2.3.1"
},
@@ -237,32 +217,59 @@
],
"version": "==2.2.0"
},
+ "pyparsing": {
+ "hashes": [
+ "sha256:0832bcf47acd283788593e7a0f542407bd9550a55a8a8435214a1960e04bcb04",
+ "sha256:281683241b25fe9b80ec9d66017485f6deff1af5cde372469134b56ca8447a07",
+ "sha256:8f1e18d3fd36c6795bb7e02a39fd05c611ffc2596c1e0d995d34d67630426c18",
+ "sha256:9e8143a3e15c13713506886badd96ca4b579a87fbdf49e550dbfc057d6cb218e",
+ "sha256:b8b3117ed9bdf45e14dcc89345ce638ec7e0e29b2b579fa1ecf32ce45ebac8a5",
+ "sha256:e4d45427c6e20a59bf4f88c639dcc03ce30d193112047f94012102f235853a58",
+ "sha256:fee43f17a9c4087e7ed1605bd6df994c6173c1e977d7ade7b651292fab2bd010"
+ ],
+ "version": "==2.2.0"
+ },
"pytest": {
"hashes": [
- "sha256:b84878865558194630c6147f44bdaef27222a9f153bbd4a08908b16bf285e0b1",
- "sha256:53548280ede7818f4dc2ad96608b9f08ae2cc2ca3874f2ceb6f97e3583f25bc4"
+ "sha256:6266f87ab64692112e5477eba395cfedda53b1933ccd29478e671e73b420c19c",
+ "sha256:fae491d1874f199537fd5872b5e1f0e74a009b979df9d53d1553fd03da1703e1"
],
- "version": "==3.3.2"
+ "index": "pypi",
+ "version": "==3.5.0"
},
"pytz": {
"hashes": [
- "sha256:80af0f3008046b9975242012a985f04c5df1f01eed4ec1633d56cc47a75a6a48",
- "sha256:feb2365914948b8620347784b6b6da356f31c9d03560259070b2f30cff3d469d",
- "sha256:59707844a9825589878236ff2f4e0dc9958511b7ffaae94dc615da07d4a68d33",
- "sha256:d0ef5ef55ed3d37854320d4926b04a4cb42a2e88f71da9ddfdacfde8e364f027",
- "sha256:c41c62827ce9cafacd6f2f7018e4f83a6f1986e87bfd000b8cfbd4ab5da95f1a",
- "sha256:8cc90340159b5d7ced6f2ba77694d946fc975b09f1a51d93f3ce3bb399396f94",
- "sha256:dd2e4ca6ce3785c8dd342d1853dd9052b19290d5bf66060846e5dc6b8d6667f7",
- "sha256:699d18a2a56f19ee5698ab1123bbcc1d269d061996aeb1eda6d89248d3542b82",
- "sha256:fae4cffc040921b8a2d60c6cf0b5d662c1190fe54d718271db4eb17d44a185b7"
+ "sha256:65ae0c8101309c45772196b21b74c46b2e5d11b6275c45d251b150d5da334555",
+ "sha256:c06425302f2cf668f1bba7a0a03f3c1d34d4ebeef2c72003da308b3947c7f749"
],
- "version": "==2017.3"
+ "version": "==2018.4"
+ },
+ "pyyaml": {
+ "hashes": [
+ "sha256:0c507b7f74b3d2dd4d1322ec8a94794927305ab4cebbe89cc47fe5e81541e6e8",
+ "sha256:16b20e970597e051997d90dc2cddc713a2876c47e3d92d59ee198700c5427736",
+ "sha256:3262c96a1ca437e7e4763e2843746588a965426550f3797a79fca9c6199c431f",
+ "sha256:326420cbb492172dec84b0f65c80942de6cedb5233c413dd824483989c000608",
+ "sha256:4474f8ea030b5127225b8894d626bb66c01cda098d47a2b0d3429b6700af9fd8",
+ "sha256:592766c6303207a20efc445587778322d7f73b161bd994f227adaa341ba212ab",
+ "sha256:5ac82e411044fb129bae5cfbeb3ba626acb2af31a8d17d175004b70862a741a7",
+ "sha256:5f84523c076ad14ff5e6c037fe1c89a7f73a3e04cf0377cb4d017014976433f3",
+ "sha256:827dc04b8fa7d07c44de11fabbc888e627fa8293b695e0f99cb544fdfa1bf0d1",
+ "sha256:b4c423ab23291d3945ac61346feeb9a0dc4184999ede5e7c43e1ffb975130ae6",
+ "sha256:bc6bced57f826ca7cb5125a10b23fd0f2fff3b7c4701d64c439a300ce665fff8",
+ "sha256:c01b880ec30b5a6e6aa67b09a2fe3fb30473008c85cd6a67359a1b15ed6d83a4",
+ "sha256:ca233c64c6e40eaa6c66ef97058cdc80e8d0157a443655baa1b2966e812807ca",
+ "sha256:e863072cdf4c72eebf179342c94e6989c67185842d9997960b3e69290b2fa269"
+ ],
+ "index": "pypi",
+ "version": "==3.12"
},
"requests": {
"hashes": [
"sha256:6a1b267aa90cac58ac3a765d067950e7dbbf75b1da07e895d1f594193a40a38b",
"sha256:9c443e7324ba5b85070c4a818ade28bfabedf16ea10206da1132edaa6dda237e"
],
+ "index": "pypi",
"version": "==2.18.4"
},
"requests-toolbelt": {
@@ -274,61 +281,55 @@
},
"six": {
"hashes": [
- "sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb",
- "sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9"
+ "sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9",
+ "sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb"
],
"version": "==1.11.0"
},
"snowballstemmer": {
"hashes": [
- "sha256:9f3bcd3c401c3e862ec0ebe6d2c069ebc012ce142cce209c098ccb5b09136e89",
- "sha256:919f26a68b2c17a7634da993d91339e288964f93c274f1343e3bbbe2096e1128"
+ "sha256:919f26a68b2c17a7634da993d91339e288964f93c274f1343e3bbbe2096e1128",
+ "sha256:9f3bcd3c401c3e862ec0ebe6d2c069ebc012ce142cce209c098ccb5b09136e89"
],
"version": "==1.2.1"
},
"sphinx": {
"hashes": [
- "sha256:fdf77f4f30d84a314c797d67fe7d1b46665e6c48a25699d7bf0610e05a2221d4",
- "sha256:c6de5dbdbb7a0d7d2757f4389cc00e8f6eb3c49e1772378967a12cfcf2cfe098"
+ "sha256:5a1c9a0fec678c24b9a2f5afba240c04668edb7f45c67ce2ed008996b3f21ae2",
+ "sha256:7a606d77618a753adb79e13605166e3cf6a0e5678526e044236fc1ac43650910"
],
- "version": "==1.6.5"
+ "index": "pypi",
+ "version": "==1.7.2"
},
"sphinxcontrib-websupport": {
"hashes": [
- "sha256:f4932e95869599b89bf4f80fc3989132d83c9faa5bf633e7b5e0c25dffb75da2",
- "sha256:7a85961326aa3a400cd4ad3c816d70ed6f7c740acd7ce5d78cd0a67825072eb9"
+ "sha256:7a85961326aa3a400cd4ad3c816d70ed6f7c740acd7ce5d78cd0a67825072eb9",
+ "sha256:f4932e95869599b89bf4f80fc3989132d83c9faa5bf633e7b5e0c25dffb75da2"
],
"version": "==1.0.1"
},
"tox": {
"hashes": [
- "sha256:8af30fd835a11f3ff8e95176ccba5a4e60779df4d96a9dfefa1a1704af263225",
- "sha256:752f5ec561c6c08c5ecb167d3b20f4f4ffc158c0ab78855701a75f5cef05f4b8"
+ "sha256:96efa09710a3daeeb845561ebbe1497641d9cef2ee0aea30db6969058b2bda2f",
+ "sha256:9ee7de958a43806402a38c0d2aa07fa8553f4d2c20a15b140e9f771c2afeade0"
],
- "version": "==2.9.1"
+ "index": "pypi",
+ "version": "==3.0.0"
},
"tqdm": {
"hashes": [
- "sha256:4c041f8019f7be65b8028ddde9a836f7ccc51c4637f1ff2ba9b5813d38d19d5a",
- "sha256:df32e6f127dc0ccbc675eadb33f749abbcb8f174c5cb9ec49c0cdb73aa737377"
+ "sha256:059e7dd579f2c1a2b9103a8ec76fb0bc32cc16904d7d65977edfed6b5745dc48",
+ "sha256:a180389a780f6b52268c30f40fdcb0443ab3d925574579d987eadf10c59ff90c"
],
- "version": "==4.19.5"
+ "version": "==4.22.0"
},
"twine": {
"hashes": [
- "sha256:d3ce5c480c22ccfb761cd358526e862b32546d2fe4bc93d46b5cf04ea3cc46ca",
- "sha256:caa45b7987fc96321258cd7668e3be2ff34064f5c66d2d975b641adca659c1ab"
+ "sha256:08eb132bbaec40c6d25b358f546ec1dc96ebd2638a86eea68769d9e67fe2b129",
+ "sha256:2fd9a4d9ff0bcacf41fdc40c8cb0cfaef1f1859457c9653fd1b92237cc4e9f25"
],
- "version": "==1.9.1"
- },
- "typing": {
- "hashes": [
- "sha256:349b1f9c109c84b53ac79ac1d822eaa68fc91d63b321bd9392df15098f746f53",
- "sha256:63a8255fe7c6269916baa440eb9b6a67139b0b97a01af632e7bd2842e1e02f15",
- "sha256:d514bd84b284dd3e844f0305ac07511f097e325171f6cc4a20878d11ad771849"
- ],
- "markers": "python_version < '3.5'",
- "version": "==3.6.2"
+ "index": "pypi",
+ "version": "==1.11.0"
},
"urllib3": {
"hashes": [
@@ -339,11 +340,10 @@
},
"virtualenv": {
"hashes": [
- "sha256:39d88b533b422825d644087a21e78c45cf5af0ef7a99a1fc9fbb7b481e5c85b0",
- "sha256:02f8102c2436bb03b3ee6dede1919d1dac8a427541652e5ec95171ec8adbc93a"
+ "sha256:1d7e241b431e7afce47e77f8843a276f652699d1fa4f93b9d8ce0076fd7b0b54",
+ "sha256:e8e05d4714a1c51a2f5921e62f547fcb0f713ebbe959e0a7f585cc8bef71d11f"
],
- "markers": "python_version != '3.2'",
- "version": "==15.1.0"
+ "version": "==15.2.0"
}
}
}
diff --git a/docs/consumers.rst b/docs/consumers.rst
index 91389ad..08f614c 100644
--- a/docs/consumers.rst
+++ b/docs/consumers.rst
@@ -112,6 +112,24 @@ The API specifics are listed below.
.. autoclass:: tap.parser.Parser
:members:
+TAP version 13
+~~~~~~~~~~~~~~
+
+The specification for version 13 adds support for `yaml blocks <https://testanything.org/tap-version-13-specification.html#yaml-blocks>`_
+to provide additional information about the preceding test. In order to consume
+yaml blocks, ``tappy`` requires `pyyaml <https://pypi.org/project/PyYAML/>`_ and
+`more-itertools <https://pypi.org/project/more-itertools/>`_ to be installed.
+
+These dependencies are optional. If they are not installed, TAP output will still
+be consumed, but any yaml blocks will be parsed as :class:`tap.line.Unknown`. If a
+:class:`tap.line.Result` object has an associated yaml block, :attr:`~tap.line.Result.yaml_block`
+will return the block converted to a ``dict``. Otherwise, it will return ``None``.
+
+``tappy`` provides a strict interpretation of the specification. A yaml block will
+only be associated with a result if it immediately follows that result. Any
+:class:`diagnostic <tap.line.Diagnostic>` between a :class:`result <tap.line.Result>` and a yaml
+block will result in the block lines being parsed as :class:`tap.line.Unknown`.
+
Line Categories
~~~~~~~~~~~~~~~
diff --git a/docs/releases.rst b/docs/releases.rst
index 4eeb635..2eda995 100644
--- a/docs/releases.rst
+++ b/docs/releases.rst
@@ -4,6 +4,8 @@ Releases
Version 2.3, To Be Released
---------------------------
+* Make tappy version 13 compliant by adding
+ support for parsing yaml blocks.
* `unittest.expectedFailure` now uses a TODO directive to better align
with the specification.
diff --git a/tap/__init__.py b/tap/__init__.py
index e90a1e8..9894059 100644
--- a/tap/__init__.py
+++ b/tap/__init__.py
@@ -3,4 +3,4 @@
from .runner import TAPTestRunner
__all__ = ['TAPTestRunner']
-__version__ = '2.2'
+__version__ = '2.3'
diff --git a/tap/line.py b/tap/line.py
index b38fa31..352bc1b 100644
--- a/tap/line.py
+++ b/tap/line.py
@@ -1,4 +1,9 @@
# Copyright (c) 2018, Matt Layman and contributors
+try:
+ import yaml
+ LOAD_YAML = True
+except ImportError: # pragma: no cover
+ LOAD_YAML = False
class Line(object):
@@ -16,7 +21,7 @@ class Result(Line):
def __init__(
self, ok, number=None, description='', directive=None,
- diagnostics=None):
+ diagnostics=None, raw_yaml_block=None):
self._ok = ok
if number:
self._number = int(number)
@@ -26,6 +31,7 @@ class Result(Line):
self._description = description
self.directive = directive
self.diagnostics = diagnostics
+ self._yaml_block = raw_yaml_block
@property
def category(self):
@@ -69,6 +75,25 @@ class Result(Line):
"""
return self.directive.todo
+ @property
+ def yaml_block(self):
+ """Lazy load a yaml_block.
+
+ If yaml support is not available,
+ there is an error in parsing the yaml block,
+ or no yaml is associated with this result,
+ ``None`` will be returned.
+
+ :rtype: dict
+ """
+ if LOAD_YAML and self._yaml_block is not None:
+ try:
+ yaml_dict = yaml.load(self._yaml_block)
+ return yaml_dict
+ except yaml.error.YAMLError:
+ print('Error parsing yaml block. Check formatting.')
+ return None
+
def __str__(self):
is_not = ''
if not self.ok:
diff --git a/tap/parser.py b/tap/parser.py
index e28872b..16b3e3c 100644
--- a/tap/parser.py
+++ b/tap/parser.py
@@ -1,6 +1,7 @@
# Copyright (c) 2018, Matt Layman and contributors
from io import StringIO
+import itertools
import re
import sys
@@ -8,6 +9,13 @@ from tap.directive import Directive
from tap.i18n import _
from tap.line import Bail, Diagnostic, Plan, Result, Unknown, Version
+try:
+ from more_itertools import peekable
+ import yaml # noqa
+ ENABLE_VERSION_13 = True
+except ImportError: # pragma: no cover
+ ENABLE_VERSION_13 = False
+
class Parser(object):
"""A parser for TAP files and lines."""
@@ -40,8 +48,14 @@ class Parser(object):
""", re.VERBOSE)
version = re.compile(r'^TAP version (?P<version>\d+)$')
+ yaml_block_start = re.compile(r'^(?P<indent>\s+)-')
+ yaml_block_end = re.compile(r'^\s+\.\.\.')
+
TAP_MINIMUM_DECLARED_VERSION = 13
+ def __init__(self):
+ self._try_peeking = False
+
def parse_file(self, filename):
"""Parse a TAP file to an iterable of tap.line.Line objects.
@@ -73,18 +87,35 @@ class Parser(object):
stripped from the input lines.
"""
with fh:
- for line in fh:
- yield self.parse_line(line.rstrip())
-
- def parse_line(self, text):
+ try:
+ first_line = next(fh)
+ except StopIteration:
+ return
+ first_parsed = self.parse_line(first_line.rstrip())
+ fh_new = itertools.chain([first_line], fh)
+ if first_parsed.category == 'version' and \
+ first_parsed.version >= 13:
+ if ENABLE_VERSION_13:
+ fh_new = peekable(itertools.chain([first_line], fh))
+ self._try_peeking = True
+ else: # pragma no cover
+ print("""
+WARNING: Optional imports not found, TAP 13 output will be
+ ignored. To parse yaml, see requirements in docs:
+ https://tappy.readthedocs.io/en/latest/consumers.html#tap-version-13""")
+
+ for line in fh_new:
+ yield self.parse_line(line.rstrip(), fh_new)
+
+ def parse_line(self, text, fh=None):
"""Parse a line into whatever TAP category it belongs."""
match = self.ok.match(text)
if match:
- return self._parse_result(True, match)
+ return self._parse_result(True, match, fh)
match = self.not_ok.match(text)
if match:
- return self._parse_result(False, match)
+ return self._parse_result(False, match, fh)
if self.diagnostic.match(text):
return Diagnostic(text)
@@ -114,11 +145,46 @@ class Parser(object):
return Plan(expected_tests, directive)
- def _parse_result(self, ok, match):
+ def _parse_result(self, ok, match, fh=None):
"""Parse a matching result line into a result instance."""
+ peek_match = None
+ try:
+ if fh is not None and self._try_peeking:
+ peek_match = self.yaml_block_start.match(fh.peek())
+ except StopIteration:
+ pass
+ if peek_match is None:
+ return Result(
+ ok,
+ number=match.group('number'),
+ description=match.group('description').strip(),
+ directive=Directive(match.group('directive'))
+ )
+ indent = peek_match.group('indent')
+ concat_yaml = self._extract_yaml_block(indent, fh)
return Result(
- ok, match.group('number'), match.group('description').strip(),
- Directive(match.group('directive')))
+ ok,
+ number=match.group('number'),
+ description=match.group('description').strip(),
+ directive=Directive(match.group('directive')),
+ raw_yaml_block=concat_yaml
+ )
+
+ def _extract_yaml_block(self, indent, fh):
+ """Extract a raw yaml block from a file handler"""
+ raw_yaml = []
+ indent_match = re.compile(r'^{}'.format(indent))
+ try:
+ fh.next()
+ while indent_match.match(fh.peek()):
+ raw_yaml.append(fh.next().replace(indent, '', 1))
+ # check for the end and stop adding yaml if encountered
+ if self.yaml_block_end.match(fh.peek()):
+ fh.next()
+ break
+ except StopIteration:
+ pass
+ return '\n'.join(raw_yaml)
def _parse_version(self, match):
version = int(match.group('version'))
diff --git a/tox.ini b/tox.ini
index 1c3d62d..7551011 100644
--- a/tox.ini
+++ b/tox.ini
@@ -27,6 +27,14 @@ deps =
pytest
commands = pytest {envsitepackagesdir}/tap
+[testenv:with_optional]
+deps =
+ Babel
+ mock
+ pyyaml
+ more-itertools
+commands = python tap/tests/run.py
+
[testenv:runner]
deps =
Babel
@@ -58,6 +66,8 @@ deps =
coverage
mock
codecov
+ pyyaml
+ more-itertools
commands =
coverage run tap/tests/run.py
coverage report -m --include "*/tap/*" --omit "*/tests/*"
| Make parser understand YAML diagnostics for TAP version 13
TAP version 13 added optional YAML blocks to tests. Everything else about the spec stayed the same. If the TAP parser was updated to support YAML blocks, then tappy could be considered version 13 "compliant" (even though YAML blocks never had any formally defined schema).
| python-tap/tappy | diff --git a/tap/tests/test_loader.py b/tap/tests/test_loader.py
index d90315f..8dcd5e1 100644
--- a/tap/tests/test_loader.py
+++ b/tap/tests/test_loader.py
@@ -1,6 +1,7 @@
# Copyright (c) 2018, Matt Layman and contributors
import inspect
+from io import StringIO
import os
import tempfile
import unittest
@@ -116,8 +117,8 @@ class TestLoader(TestCase):
self.assertEqual(
'Skipping this test file.', suite._tests[0]._line.description)
- @mock.patch('tap.parser.sys')
- def test_loads_from_stream(self, mock_sys):
+ @mock.patch('tap.parser.sys.stdin', StringIO(u''))
+ def test_loads_from_stream(self):
loader = Loader()
suite = loader.load_suite_from_stdin()
self.assertTrue(isinstance(suite, unittest.TestSuite))
diff --git a/tap/tests/test_parser.py b/tap/tests/test_parser.py
index 92e7aa0..4e919d1 100644
--- a/tap/tests/test_parser.py
+++ b/tap/tests/test_parser.py
@@ -1,6 +1,9 @@
# Copyright (c) 2018, Matt Layman and contributors
+from contextlib import contextmanager
import inspect
+from io import BytesIO, StringIO
+import sys
import tempfile
import unittest
@@ -12,6 +15,20 @@ except ImportError:
from tap.parser import Parser
+@contextmanager
+def captured_output():
+ if sys.version_info[0] < 3:
+ new_out, new_err = BytesIO(), BytesIO()
+ else:
+ new_out, new_err = StringIO(), StringIO()
+ old_out, old_err = sys.stdout, sys.stderr
+ try:
+ sys.stdout, sys.stderr = new_out, new_err
+ yield sys.stdout, sys.stderr
+ finally:
+ sys.stdout, sys.stderr = old_out, old_err
+
+
class TestParser(unittest.TestCase):
"""Tests for tap.parser.Parser"""
@@ -196,18 +213,215 @@ class TestParser(unittest.TestCase):
self.assertEqual('plan', lines[0].category)
self.assertEqual('test', lines[1].category)
self.assertTrue(lines[1].ok)
+ self.assertIsNone(lines[1].yaml_block)
self.assertEqual('test', lines[2].category)
self.assertFalse(lines[2].ok)
- @mock.patch('tap.parser.sys')
- def test_parses_stdin(self, mock_sys):
- mock_sys.stdin.__iter__.return_value = iter([
- '1..2\n',
- 'ok 1 A passing test\n',
- 'not ok 2 A failing test\n',
- ])
- mock_sys.stdin.__enter__.return_value = None
- mock_sys.stdin.__exit__.return_value = None
+ def test_parses_yaml(self):
+ sample = inspect.cleandoc(
+ u"""TAP version 13
+ 1..2
+ ok 1 A passing test
+ ---
+ test: sample yaml
+ ...
+ not ok 2 A failing test""")
+ parser = Parser()
+ lines = []
+
+ for line in parser.parse_text(sample):
+ lines.append(line)
+
+ try:
+ import yaml
+ from more_itertools import peekable # noqa
+ converted_yaml = yaml.load(u"""test: sample yaml""")
+ self.assertEqual(4, len(lines))
+ self.assertEqual(13, lines[0].version)
+ self.assertEqual(converted_yaml, lines[2].yaml_block)
+ self.assertEqual('test', lines[3].category)
+ self.assertIsNone(lines[3].yaml_block)
+ except ImportError:
+ self.assertEqual(7, len(lines))
+ self.assertEqual(13, lines[0].version)
+ for l in list(range(3, 6)):
+ self.assertEqual('unknown', lines[l].category)
+ self.assertEqual('test', lines[6].category)
+
+ def test_parses_yaml_no_end(self):
+ sample = inspect.cleandoc(
+ u"""TAP version 13
+ 1..2
+ ok 1 A passing test
+ ---
+ test: sample yaml
+ not ok 2 A failing test""")
+ parser = Parser()
+ lines = []
+
+ for line in parser.parse_text(sample):
+ lines.append(line)
+
+ try:
+ import yaml
+ from more_itertools import peekable # noqa
+ converted_yaml = yaml.load(u"""test: sample yaml""")
+ self.assertEqual(4, len(lines))
+ self.assertEqual(13, lines[0].version)
+ self.assertEqual(converted_yaml, lines[2].yaml_block)
+ self.assertEqual('test', lines[3].category)
+ self.assertIsNone(lines[3].yaml_block)
+ except ImportError:
+ self.assertEqual(6, len(lines))
+ self.assertEqual(13, lines[0].version)
+ for l in list(range(3, 5)):
+ self.assertEqual('unknown', lines[l].category)
+ self.assertEqual('test', lines[5].category)
+
+ def test_parses_yaml_more_complex(self):
+ sample = inspect.cleandoc(
+ u"""TAP version 13
+ 1..2
+ ok 1 A passing test
+ ---
+ message: test
+ severity: fail
+ data:
+ got:
+ - foo
+ expect:
+ - bar""")
+ parser = Parser()
+ lines = []
+
+ for line in parser.parse_text(sample):
+ lines.append(line)
+
+ try:
+ import yaml
+ from more_itertools import peekable # noqa
+ converted_yaml = yaml.load(u"""
+ message: test
+ severity: fail
+ data:
+ got:
+ - foo
+ expect:
+ - bar""")
+ self.assertEqual(3, len(lines))
+ self.assertEqual(13, lines[0].version)
+ self.assertEqual(converted_yaml, lines[2].yaml_block)
+ except ImportError:
+ self.assertEqual(11, len(lines))
+ self.assertEqual(13, lines[0].version)
+ for l in list(range(3, 11)):
+ self.assertEqual('unknown', lines[l].category)
+
+ def test_parses_yaml_no_association(self):
+ sample = inspect.cleandoc(
+ u"""TAP version 13
+ 1..2
+ ok 1 A passing test
+ # Diagnostic line
+ ---
+ test: sample yaml
+ ...
+ not ok 2 A failing test""")
+ parser = Parser()
+ lines = []
+
+ for line in parser.parse_text(sample):
+ lines.append(line)
+
+ self.assertEqual(8, len(lines))
+ self.assertEqual(13, lines[0].version)
+ self.assertIsNone(lines[2].yaml_block)
+ self.assertEqual('diagnostic', lines[3].category)
+ for l in list(range(4, 7)):
+ self.assertEqual('unknown', lines[l].category)
+ self.assertEqual('test', lines[7].category)
+
+ def test_parses_yaml_no_start(self):
+ sample = inspect.cleandoc(
+ u"""TAP version 13
+ 1..2
+ ok 1 A passing test
+ test: sample yaml
+ ...
+ not ok 2 A failing test""")
+ parser = Parser()
+ lines = []
+
+ for line in parser.parse_text(sample):
+ lines.append(line)
+
+ self.assertEqual(6, len(lines))
+ self.assertEqual(13, lines[0].version)
+ self.assertIsNone(lines[2].yaml_block)
+ for l in list(range(3, 5)):
+ self.assertEqual('unknown', lines[l].category)
+ self.assertEqual('test', lines[5].category)
+
+ def test_malformed_yaml(self):
+ self.maxDiff = None
+ sample = inspect.cleandoc(
+ u"""TAP version 13
+ 1..2
+ ok 1 A passing test
+ ---
+ test: sample yaml
+ \tfail: tabs are not allowed!
+ ...
+ not ok 2 A failing test""")
+ yaml_err = inspect.cleandoc(
+ u"""
+WARNING: Optional imports not found, TAP 13 output will be
+ ignored. To parse yaml, see requirements in docs:
+ https://tappy.readthedocs.io/en/latest/consumers.html#tap-version-13""")
+ parser = Parser()
+ lines = []
+
+ with captured_output() as (parse_out, _):
+ for line in parser.parse_text(sample):
+ lines.append(line)
+
+ try:
+ import yaml # noqa
+ from more_itertools import peekable # noqa
+ self.assertEqual(4, len(lines))
+ self.assertEqual(13, lines[0].version)
+ with captured_output() as (out, _):
+ self.assertIsNone(lines[2].yaml_block)
+ self.assertEqual(
+ 'Error parsing yaml block. Check formatting.',
+ out.getvalue().strip())
+ self.assertEqual('test', lines[3].category)
+ self.assertIsNone(lines[3].yaml_block)
+ except ImportError:
+ self.assertEqual(8, len(lines))
+ self.assertEqual(13, lines[0].version)
+ for l in list(range(3, 7)):
+ self.assertEqual('unknown', lines[l].category)
+ self.assertEqual('test', lines[7].category)
+ self.assertEqual(
+ yaml_err, parse_out.getvalue().strip())
+
+ def test_parse_empty_file(self):
+ temp = tempfile.NamedTemporaryFile(delete=False)
+ temp.close()
+ parser = Parser()
+ lines = []
+
+ for line in parser.parse_file(temp.name):
+ lines.append(line)
+
+ self.assertEqual(0, len(lines))
+
+ @mock.patch('tap.parser.sys.stdin',
+ StringIO(u"""1..2
+ok 1 A passing test
+not ok 2 A failing test"""))
+ def test_parses_stdin(self):
parser = Parser()
lines = []
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 10
} | 2.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
-e git+https://github.com/python-tap/tappy.git@13a0ed000f4b5e423869fa6a63a7540be17ab375#egg=tap.py
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: tappy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/tappy
| [
"tap/tests/test_parser.py::TestParser::test_malformed_yaml",
"tap/tests/test_parser.py::TestParser::test_parses_file",
"tap/tests/test_parser.py::TestParser::test_parses_yaml_no_association",
"tap/tests/test_parser.py::TestParser::test_parses_yaml_no_start"
]
| []
| [
"tap/tests/test_loader.py::TestLoader::test_errors_with_multiple_version_lines",
"tap/tests/test_loader.py::TestLoader::test_errors_with_version_not_on_first_line",
"tap/tests/test_loader.py::TestLoader::test_file_does_not_exist",
"tap/tests/test_loader.py::TestLoader::test_handles_directory",
"tap/tests/test_loader.py::TestLoader::test_handles_file",
"tap/tests/test_loader.py::TestLoader::test_loads_from_stream",
"tap/tests/test_loader.py::TestLoader::test_skip_plan_aborts_loading",
"tap/tests/test_parser.py::TestParser::test_after_hash_is_not_description",
"tap/tests/test_parser.py::TestParser::test_bail_out_line",
"tap/tests/test_parser.py::TestParser::test_diagnostic_line",
"tap/tests/test_parser.py::TestParser::test_errors_on_old_version",
"tap/tests/test_parser.py::TestParser::test_finds_description",
"tap/tests/test_parser.py::TestParser::test_finds_directive",
"tap/tests/test_parser.py::TestParser::test_finds_not_ok",
"tap/tests/test_parser.py::TestParser::test_finds_number",
"tap/tests/test_parser.py::TestParser::test_finds_ok",
"tap/tests/test_parser.py::TestParser::test_finds_plan",
"tap/tests/test_parser.py::TestParser::test_finds_plan_with_skip",
"tap/tests/test_parser.py::TestParser::test_finds_skip",
"tap/tests/test_parser.py::TestParser::test_finds_todo",
"tap/tests/test_parser.py::TestParser::test_finds_version",
"tap/tests/test_parser.py::TestParser::test_ignores_plan_with_any_non_skip_directive",
"tap/tests/test_parser.py::TestParser::test_parse_empty_file",
"tap/tests/test_parser.py::TestParser::test_parses_stdin",
"tap/tests/test_parser.py::TestParser::test_parses_text",
"tap/tests/test_parser.py::TestParser::test_parses_yaml",
"tap/tests/test_parser.py::TestParser::test_parses_yaml_more_complex",
"tap/tests/test_parser.py::TestParser::test_parses_yaml_no_end",
"tap/tests/test_parser.py::TestParser::test_unrecognizable_line"
]
| []
| BSD 2-Clause "Simplified" License | 2,396 | [
"docs/releases.rst",
"tap/line.py",
".gitignore",
".travis.yml",
"Pipfile.lock",
"tap/parser.py",
"Pipfile",
"tox.ini",
"docs/consumers.rst",
"tap/__init__.py"
]
| [
"docs/releases.rst",
"tap/line.py",
".gitignore",
".travis.yml",
"Pipfile.lock",
"tap/parser.py",
"Pipfile",
"tox.ini",
"docs/consumers.rst",
"tap/__init__.py"
]
|
SAP__cf-python-logging-support-14 | 396ba738098024745205cbff22b2646a5337d1d1 | 2018-04-12 14:25:16 | 396ba738098024745205cbff22b2646a5337d1d1 | diff --git a/.gitignore b/.gitignore
index e6ff992..4a314cd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,6 +12,7 @@ __pycache__/
# Distribution / packaging
.Python
+.env
env/
env3/
venv/
@@ -25,6 +26,7 @@ lib/
dist/
lib64/
parts/
+.pytest_cache/
sdist/
var/
*.egg-info/
diff --git a/sap/cf_logging/formatters/json_formatter.py b/sap/cf_logging/formatters/json_formatter.py
index 9df5474..1e941d0 100644
--- a/sap/cf_logging/formatters/json_formatter.py
+++ b/sap/cf_logging/formatters/json_formatter.py
@@ -2,22 +2,26 @@
import json
import logging
import sys
+from sap.cf_logging.record.simple_log_record import SimpleLogRecord
+
+def _default_serializer(obj):
+ return str(obj)
if sys.version_info[0] == 3:
def _encode(obj):
- return json.dumps(obj)
+ return json.dumps(obj, default=_default_serializer)
else:
def _encode(obj):
- return unicode(json.dumps(obj)) # pylint: disable=undefined-variable
+ return unicode(json.dumps(obj, default=_default_serializer)) # pylint: disable=undefined-variable
class JsonFormatter(logging.Formatter):
"""
- Formatter for non-web application log
+ Format application log in JSON format
"""
def format(self, record):
- """ Format the log record into a JSON object """
- if hasattr(record, 'format'):
+ """ Format the known log records in JSON format """
+ if isinstance(record, SimpleLogRecord):
return _encode(record.format())
- return _encode(record.__dict__)
+ return super(JsonFormatter, self).format(record)
| Problem with object logging
Hello,
we run into trouble using this package in combination with the pika package (http://pika.readthedocs.io/en/0.10.0/). After initialization of the logger using cf_logging.init() the pika packages produces exceptions while trying to perform the following log operation:
LOGGER.info('Closing channel (%s): %r on %s',
reply_code, reply_text, self)
Here 'reply_code' is an int, 'reply_text' a string and 'self' is the channel object which has a working __repr__ method.
Best
Jan | SAP/cf-python-logging-support | diff --git a/tests/unit/formatters/test_json_formatter.py b/tests/unit/formatters/test_json_formatter.py
new file mode 100644
index 0000000..70ffe60
--- /dev/null
+++ b/tests/unit/formatters/test_json_formatter.py
@@ -0,0 +1,24 @@
+""" Tests json log formatting """
+import json
+import logging
+from sap.cf_logging.record.simple_log_record import SimpleLogRecord
+from sap.cf_logging.formatters.json_formatter import JsonFormatter
+
+lvl, fn, lno, func, exc_info = logging.INFO, "(unknown file)", 0, "(unknown function)", None
+formatter = JsonFormatter()
+
+
+def test_unknown_records_format():
+ """ test unknown log records will be delegated to logging.Formatter """
+ log_record = logging.LogRecord('name', lvl, fn, lno, 'msg', [], exc_info)
+ assert formatter.format(log_record) == 'msg'
+
+
+def test_non_json_serializable():
+ """ test json formatter handles non JSON serializable object """
+ class MyClass(object): pass
+ extra = { 'cls': MyClass() }
+ log_record = SimpleLogRecord(extra, None, 'name', lvl, fn, lno, 'msg', [], exc_info)
+ record_object = json.loads(formatter.format(log_record))
+ assert record_object.get('cls') is not None
+ assert 'MyClass' in record_object.get('cls')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | 3.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-mock",
"pytest-asyncio",
"Flask",
"sanic"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiofiles==0.8.0
attrs==22.2.0
certifi==2021.5.30
click==8.0.4
contextvars==2.4
coverage==6.2
dataclasses==0.8
Flask==2.0.3
h11==0.9.0
httpcore==0.11.1
httptools==0.6.0
httpx==0.15.4
idna==3.10
immutables==0.19
importlib-metadata==4.8.3
iniconfig==1.1.1
itsdangerous==2.0.1
Jinja2==3.0.3
MarkupSafe==2.0.1
multidict==5.2.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
rfc3986==1.5.0
sanic==20.12.7
-e git+https://github.com/SAP/cf-python-logging-support.git@396ba738098024745205cbff22b2646a5337d1d1#egg=sap_cf_logging
sniffio==1.2.0
tomli==1.2.3
typing_extensions==4.1.1
ujson==4.3.0
uvloop==0.14.0
websockets==9.1
Werkzeug==2.0.3
zipp==3.6.0
| name: cf-python-logging-support
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aiofiles==0.8.0
- attrs==22.2.0
- click==8.0.4
- contextvars==2.4
- coverage==6.2
- dataclasses==0.8
- flask==2.0.3
- h11==0.9.0
- httpcore==0.11.1
- httptools==0.6.0
- httpx==0.15.4
- idna==3.10
- immutables==0.19
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- itsdangerous==2.0.1
- jinja2==3.0.3
- markupsafe==2.0.1
- multidict==5.2.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- rfc3986==1.5.0
- sanic==20.12.7
- sniffio==1.2.0
- tomli==1.2.3
- typing-extensions==4.1.1
- ujson==4.3.0
- uvloop==0.14.0
- websockets==9.1
- werkzeug==2.0.3
- zipp==3.6.0
prefix: /opt/conda/envs/cf-python-logging-support
| [
"tests/unit/formatters/test_json_formatter.py::test_unknown_records_format",
"tests/unit/formatters/test_json_formatter.py::test_non_json_serializable"
]
| []
| []
| []
| Apache License 2.0 | 2,397 | [
".gitignore",
"sap/cf_logging/formatters/json_formatter.py"
]
| [
".gitignore",
"sap/cf_logging/formatters/json_formatter.py"
]
|
|
python-useful-helpers__exec-helpers-20 | 2882885d5bf491fee3894b634d6cce5a8eb4288b | 2018-04-12 14:31:54 | 5f107c01eb0223d63a8ba5ad28d2bedecea4a7cd | diff --git a/README.rst b/README.rst
index 3651ff0..97bdb6c 100644
--- a/README.rst
+++ b/README.rst
@@ -181,6 +181,7 @@ Execution result object has a set of useful properties:
* `cmd` - Command
* `exit_code` - Command return code. If possible to decode using enumerators for Linux -> it used.
+* `stdin` -> `str`. Text representation of stdin.
* `stdout` -> `typing.Tuple[bytes]`. Raw stdout output.
* `stderr` -> `typing.Tuple[bytes]`. Raw stderr output.
* `stdout_bin` -> `bytearray`. Binary stdout output.
diff --git a/doc/source/ExecResult.rst b/doc/source/ExecResult.rst
index 6330ae3..05ced46 100644
--- a/doc/source/ExecResult.rst
+++ b/doc/source/ExecResult.rst
@@ -10,10 +10,12 @@ API: ExecResult
Command execution result.
- .. py:method:: __init__(cmd, stdout=None, stderr=None, exit_code=ExitCodes.EX_INVALID)
+ .. py:method:: __init__(cmd, stdin=None, stdout=None, stderr=None, exit_code=ExitCodes.EX_INVALID)
:param cmd: command
:type cmd: ``str``
+ :param stdin: STDIN
+ :type stdin: ``typing.Optional[str]``
:param stdout: binary STDOUT
:type stdout: ``typing.Optional[typing.Iterable[bytes]]``
:param stderr: binary STDERR
@@ -36,6 +38,11 @@ API: ExecResult
``str``
Command
+ .. py:attribute:: stdin
+
+ ``str``
+ Stdin input as string.
+
.. py:attribute:: stdout
``typing.Tuple[bytes]``
diff --git a/doc/source/SSHClient.rst b/doc/source/SSHClient.rst
index 73d3bee..28f030d 100644
--- a/doc/source/SSHClient.rst
+++ b/doc/source/SSHClient.rst
@@ -101,7 +101,7 @@ API: SSHClient and SSHAuth.
:param enforce: Enforce sudo enabled or disabled. By default: None
:type enforce: ``typing.Optional[bool]``
- .. py:method:: execute_async(command, get_pty=False, open_stdout=True, open_stderr=True, **kwargs)
+ .. py:method:: execute_async(command, get_pty=False, open_stdout=True, open_stderr=True, stdin=None, **kwargs)
Execute command in async mode and return channel with IO objects.
@@ -109,6 +109,8 @@ API: SSHClient and SSHAuth.
:type command: ``str``
:param get_pty: open PTY on remote machine
:type get_pty: ``bool``
+ :param stdin: pass STDIN text to the process
+ :type stdin: ``typing.Union[six.text_type, six.binary_type, None]``
:param open_stdout: open STDOUT stream for read
:type open_stdout: bool
:param open_stderr: open STDERR stream for read
@@ -116,6 +118,7 @@ API: SSHClient and SSHAuth.
:rtype: ``typing.Tuple[paramiko.Channel, paramiko.ChannelFile, paramiko.ChannelFile, paramiko.ChannelFile]``
.. versionchanged:: 1.2.0 open_stdout and open_stderr flags
+ .. versionchanged:: 1.2.0 stdin data
.. py:method:: execute(command, verbose=False, timeout=1*60*60, **kwargs)
diff --git a/doc/source/Subprocess.rst b/doc/source/Subprocess.rst
index f883e06..9aed138 100644
--- a/doc/source/Subprocess.rst
+++ b/doc/source/Subprocess.rst
@@ -45,6 +45,8 @@ API: Subprocess
:param command: Command for execution
:type command: ``str``
+ :param stdin: STDIN passed to execution
+ :type stdin: ``typing.Union[six.text_type, six.binary_type, None]``
:param verbose: Produce log.info records for command call and output
:type verbose: ``bool``
:param timeout: Timeout for command execution.
@@ -52,11 +54,11 @@ API: Subprocess
:rtype: ExecResult
:raises ExecHelperTimeoutError: Timeout exceeded
+ .. note:: stdin channel is closed after the input processing
.. versionchanged:: 1.1.0 make method
- .. versionchanged:: 1.2.0
-
- open_stdout and open_stderr flags
- default timeout 1 hour
+ .. versionchanged:: 1.2.0 open_stdout and open_stderr flags
+ .. versionchanged:: 1.2.0 default timeout 1 hour
+ .. versionchanged:: 1.2.0 stdin data
.. py:method:: check_call(command, verbose=False, timeout=1*60*60, error_info=None, expected=None, raise_on_err=True, **kwargs)
diff --git a/exec_helpers/_ssh_client_base.py b/exec_helpers/_ssh_client_base.py
index 8015cba..074d4ae 100644
--- a/exec_helpers/_ssh_client_base.py
+++ b/exec_helpers/_ssh_client_base.py
@@ -489,6 +489,7 @@ class SSHClientBase(six.with_metaclass(_MemorizedSSH, _api.ExecHelper)):
self,
command, # type: str
get_pty=False, # type: bool
+ stdin=None, # type: typing.Union[six.text_type, six.binary_type, None]
open_stdout=True, # type: bool
open_stderr=True, # type: bool
**kwargs
@@ -499,6 +500,8 @@ class SSHClientBase(six.with_metaclass(_MemorizedSSH, _api.ExecHelper)):
:type command: str
:param get_pty: open PTY on remote machine
:type get_pty: bool
+ :param stdin: pass STDIN text to the process
+ :type stdin: typing.Union[six.text_type, six.binary_type, None]
:param open_stdout: open STDOUT stream for read
:type open_stdout: bool
:param open_stderr: open STDERR stream for read
@@ -511,6 +514,7 @@ class SSHClientBase(six.with_metaclass(_MemorizedSSH, _api.ExecHelper)):
]
.. versionchanged:: 1.2.0 open_stdout and open_stderr flags
+ .. versionchanged:: 1.2.0 stdin data
"""
cmd_for_log = self._mask_command(
cmd=command,
@@ -532,7 +536,7 @@ class SSHClientBase(six.with_metaclass(_MemorizedSSH, _api.ExecHelper)):
width_pixels=0, height_pixels=0
)
- stdin = chan.makefile('wb')
+ _stdin = chan.makefile('wb')
stdout = chan.makefile('rb') if open_stdout else None
stderr = chan.makefile_stderr('rb') if open_stderr else None
cmd = "{command}\n".format(command=command)
@@ -545,11 +549,17 @@ class SSHClientBase(six.with_metaclass(_MemorizedSSH, _api.ExecHelper)):
)
chan.exec_command(cmd) # nosec # Sanitize on caller side
if stdout.channel.closed is False:
- self.auth.enter_password(stdin)
- stdin.flush()
+ self.auth.enter_password(_stdin)
+ _stdin.flush()
else:
chan.exec_command(cmd) # nosec # Sanitize on caller side
- return chan, stdin, stderr, stdout
+ if stdin is not None:
+ if not isinstance(stdin, six.binary_type):
+ stdin = stdin.encode(encoding='utf-8')
+ _stdin.write('{}\n'.format(stdin))
+ _stdin.flush()
+
+ return chan, _stdin, stderr, stdout
def __exec_command(
self,
diff --git a/exec_helpers/exec_result.py b/exec_helpers/exec_result.py
index a82c8fa..924497d 100644
--- a/exec_helpers/exec_result.py
+++ b/exec_helpers/exec_result.py
@@ -42,7 +42,7 @@ class ExecResult(object):
"""Execution result."""
__slots__ = [
- '__cmd', '__stdout', '__stderr', '__exit_code',
+ '__cmd', '__stdin', '__stdout', '__stderr', '__exit_code',
'__timestamp',
'__stdout_str', '__stderr_str', '__stdout_brief', '__stderr_brief',
'__lock'
@@ -51,6 +51,7 @@ class ExecResult(object):
def __init__(
self,
cmd, # type: str
+ stdin=None, # type: typing.Union[six.text_type, six.binary_type, None]
stdout=None, # type: typing.Optional[typing.Iterable[bytes]]
stderr=None, # type: typing.Optional[typing.Iterable[bytes]]
exit_code=proc_enums.ExitCodes.EX_INVALID # type: _type_exit_codes
@@ -59,6 +60,8 @@ class ExecResult(object):
:param cmd: command
:type cmd: str
+ :param stdin: string STDIN
+ :type stdin: typing.Union[six.text_type, six.binary_type, None]
:param stdout: binary STDOUT
:type stdout: typing.Optional[typing.Iterable[bytes]]
:param stderr: binary STDERR
@@ -69,6 +72,9 @@ class ExecResult(object):
self.__lock = threading.RLock()
self.__cmd = cmd
+ if stdin is not None and not isinstance(stdin, six.text_type):
+ stdin = self._get_str_from_bin(stdin)
+ self.__stdin = stdin
self.__stdout = tuple(stdout) if stdout is not None else ()
self.__stderr = tuple(stderr) if stderr is not None else ()
@@ -141,6 +147,14 @@ class ExecResult(object):
"""
return self.__cmd
+ @property
+ def stdin(self): # type: () -> str
+ """Stdin input as string.
+
+ :rtype: str
+ """
+ return self.__stdin
+
@property
def stdout(self): # type: () -> typing.Tuple[bytes]
"""Stdout output as list of binaries.
diff --git a/exec_helpers/subprocess_runner.py b/exec_helpers/subprocess_runner.py
index 4730fab..49492f2 100644
--- a/exec_helpers/subprocess_runner.py
+++ b/exec_helpers/subprocess_runner.py
@@ -169,6 +169,7 @@ class Subprocess(six.with_metaclass(SingletonMeta, _api.ExecHelper)):
timeout=constants.DEFAULT_TIMEOUT, # type: typing.Optional[int]
verbose=False, # type: bool
log_mask_re=None, # type: typing.Optional[str]
+ stdin=None, # type: typing.Union[six.text_type, six.binary_type, None]
open_stdout=True, # type: bool
open_stderr=True, # type: bool
):
@@ -183,6 +184,7 @@ class Subprocess(six.with_metaclass(SingletonMeta, _api.ExecHelper)):
:param log_mask_re: regex lookup rule to mask command for logger.
all MATCHED groups will be replaced by '<*masked*>'
:type log_mask_re: typing.Optional[str]
+ :type stdin: typing.Union[six.text_type, six.binary_type, None]
:param open_stdout: open STDOUT stream for read
:type open_stdout: bool
:param open_stderr: open STDERR stream for read
@@ -277,12 +279,19 @@ class Subprocess(six.with_metaclass(SingletonMeta, _api.ExecHelper)):
# Run
self.__process = subprocess.Popen(
args=[command],
- stdin=subprocess.PIPE,
stdout=subprocess.PIPE if open_stdout else devnull,
stderr=subprocess.PIPE if open_stderr else devnull,
- shell=True, cwd=cwd, env=env,
+ stdin=subprocess.PIPE,
+ shell=True,
+ cwd=cwd,
+ env=env,
universal_newlines=False,
)
+ if stdin is not None:
+ if not isinstance(stdin, six.binary_type):
+ stdin = stdin.encode(encoding='utf-8')
+ self.__process.stdin.write(stdin)
+ self.__process.stdin.close()
# Poll output
| Missing support to non-interactive STDIN (only 1 input string)
It can be useful providing a string to stdin, in particular with Subprocess. | python-useful-helpers/exec-helpers | diff --git a/test/test_ssh_client.py b/test/test_ssh_client.py
index 010b83d..1b2372a 100644
--- a/test/test_ssh_client.py
+++ b/test/test_ssh_client.py
@@ -61,6 +61,7 @@ stderr_str = b''.join(stderr_list).strip().decode('utf-8')
encoded_cmd = base64.b64encode(
"{}\n".format(command).encode('utf-8')
).decode('utf-8')
+print_stdin = 'read line; echo "$line"'
@mock.patch('exec_helpers._ssh_client_base.logger', autospec=True)
@@ -1046,6 +1047,99 @@ class TestExecute(unittest.TestCase):
command, verbose, timeout=None,
error_info=None, raise_on_err=raise_on_err)
+ @mock.patch('exec_helpers.ssh_client.SSHClient.check_call')
+ def test_check_stdin_str(self, check_call, client, policy, logger):
+ stdin = u'this is a line'
+
+ return_value = exec_result.ExecResult(
+ cmd=print_stdin,
+ stdin=stdin,
+ stdout=[stdin],
+ stderr=[],
+ exit_code=0
+ )
+ check_call.return_value = return_value
+
+ verbose = False
+ raise_on_err = True
+
+ # noinspection PyTypeChecker
+ result = self.get_ssh().check_call(
+ command=print_stdin,
+ stdin=stdin,
+ verbose=verbose,
+ timeout=None,
+ raise_on_err=raise_on_err)
+ check_call.assert_called_once_with(
+ command=print_stdin,
+ stdin=stdin,
+ verbose=verbose,
+ timeout=None,
+ raise_on_err=raise_on_err)
+ self.assertEqual(result, return_value)
+
+ @mock.patch('exec_helpers.ssh_client.SSHClient.check_call')
+ def test_check_stdin_bytes(self, check_call, client, policy, logger):
+ stdin = b'this is a line'
+
+ return_value = exec_result.ExecResult(
+ cmd=print_stdin,
+ stdin=stdin,
+ stdout=[stdin],
+ stderr=[],
+ exit_code=0
+ )
+ check_call.return_value = return_value
+
+ verbose = False
+ raise_on_err = True
+
+ # noinspection PyTypeChecker
+ result = self.get_ssh().check_call(
+ command=print_stdin,
+ stdin=stdin,
+ verbose=verbose,
+ timeout=None,
+ raise_on_err=raise_on_err)
+ check_call.assert_called_once_with(
+ command=print_stdin,
+ stdin=stdin,
+ verbose=verbose,
+ timeout=None,
+ raise_on_err=raise_on_err)
+ self.assertEqual(result, return_value)
+
+ @mock.patch('exec_helpers.ssh_client.SSHClient.check_call')
+ def test_check_stdin_bytearray(self, check_call, client, policy, logger):
+ stdin = bytearray(b'this is a line')
+
+ return_value = exec_result.ExecResult(
+ cmd=print_stdin,
+ stdin=stdin,
+ stdout=[stdin],
+ stderr=[],
+ exit_code=0
+ )
+ check_call.return_value = return_value
+
+ verbose = False
+ raise_on_err = True
+
+ # noinspection PyTypeChecker
+ result = self.get_ssh().check_call(
+ command=print_stdin,
+ stdin=stdin,
+ verbose=verbose,
+ timeout=None,
+ raise_on_err=raise_on_err)
+ check_call.assert_called_once_with(
+ command=print_stdin,
+ stdin=stdin,
+ verbose=verbose,
+ timeout=None,
+ raise_on_err=raise_on_err)
+ self.assertEqual(result, return_value)
+
@mock.patch('exec_helpers._ssh_client_base.logger', autospec=True)
@mock.patch(
diff --git a/test/test_subprocess_runner.py b/test/test_subprocess_runner.py
index 11c3d85..28f0b6d 100644
--- a/test/test_subprocess_runner.py
+++ b/test/test_subprocess_runner.py
@@ -33,6 +33,7 @@ command = 'ls ~\nline 2\nline 3\nline с кирилицей'
command_log = u"Executing command:\n{!s}\n".format(command.rstrip())
stdout_list = [b' \n', b'2\n', b'3\n', b' \n']
stderr_list = [b' \n', b'0\n', b'1\n', b' \n']
+print_stdin = 'read line; echo "$line"'
class FakeFileStream(object):
@@ -577,3 +578,96 @@ class TestSubprocessRunnerHelpers(unittest.TestCase):
check_call.assert_called_once_with(
command, verbose, timeout=None,
error_info=None, raise_on_err=raise_on_err)
+
+ @mock.patch('exec_helpers.subprocess_runner.Subprocess.check_call')
+ def test_check_stdin_str(self, check_call, logger):
+ stdin = u'this is a line'
+
+ expected_result = exec_helpers.ExecResult(
+ cmd=print_stdin,
+ stdin=stdin,
+ stdout=[stdin],
+ stderr=[b''],
+ exit_code=0,
+ )
+ check_call.return_value = expected_result
+
+ verbose = False
+
+ runner = exec_helpers.Subprocess()
+
+ # noinspection PyTypeChecker
+ result = runner.check_call(
+ command=print_stdin,
+ verbose=verbose,
+ timeout=None,
+ stdin=stdin)
+ check_call.assert_called_once_with(
+ command=print_stdin,
+ verbose=verbose,
+ timeout=None,
+ stdin=stdin)
+ self.assertEqual(result, expected_result)
+ assert result == expected_result
+
+ @mock.patch('exec_helpers.subprocess_runner.Subprocess.check_call')
+ def test_check_stdin_bytes(self, check_call, logger):
+ stdin = b'this is a line'
+
+ expected_result = exec_helpers.ExecResult(
+ cmd=print_stdin,
+ stdin=stdin,
+ stdout=[stdin],
+ stderr=[b''],
+ exit_code=0,
+ )
+ check_call.return_value = expected_result
+
+ verbose = False
+
+ runner = exec_helpers.Subprocess()
+
+ # noinspection PyTypeChecker
+ result = runner.check_call(
+ command=print_stdin,
+ verbose=verbose,
+ timeout=None,
+ stdin=stdin)
+ check_call.assert_called_once_with(
+ command=print_stdin,
+ verbose=verbose,
+ timeout=None,
+ stdin=stdin)
+ self.assertEqual(result, expected_result)
+ assert result == expected_result
+
+ @mock.patch('exec_helpers.subprocess_runner.Subprocess.check_call')
+ def test_check_stdin_bytearray(self, check_call, logger):
+ stdin = bytearray(b'this is a line')
+
+ expected_result = exec_helpers.ExecResult(
+ cmd=print_stdin,
+ stdin=stdin,
+ stdout=[stdin],
+ stderr=[b''],
+ exit_code=0,
+ )
+ check_call.return_value = expected_result
+
+ verbose = False
+
+ runner = exec_helpers.Subprocess()
+
+ # noinspection PyTypeChecker
+ result = runner.check_call(
+ command=print_stdin,
+ verbose=verbose,
+ timeout=None,
+ stdin=stdin)
+ check_call.assert_called_once_with(
+ command=print_stdin,
+ verbose=verbose,
+ timeout=None,
+ stdin=stdin)
+ self.assertEqual(result, expected_result)
+ assert result == expected_result
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 7
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-html",
"pytest-sugar",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | advanced-descriptors==4.0.3
bcrypt==4.3.0
cffi==1.17.1
coverage==7.8.0
cryptography==44.0.2
exceptiongroup==1.2.2
-e git+https://github.com/python-useful-helpers/exec-helpers.git@2882885d5bf491fee3894b634d6cce5a8eb4288b#egg=exec_helpers
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
mock==5.2.0
packaging==24.2
paramiko==3.5.1
pluggy==1.5.0
pycparser==2.22
PyNaCl==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
pytest-html==4.1.1
pytest-metadata==3.1.1
pytest-sugar==1.0.0
PyYAML==6.0.2
six==1.17.0
tenacity==9.0.0
termcolor==2.5.0
threaded==4.2.0
tomli==2.2.1
| name: exec-helpers
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- advanced-descriptors==4.0.3
- bcrypt==4.3.0
- cffi==1.17.1
- coverage==7.8.0
- cryptography==44.0.2
- exceptiongroup==1.2.2
- exec-helpers==1.1.2
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- mock==5.2.0
- packaging==24.2
- paramiko==3.5.1
- pluggy==1.5.0
- pycparser==2.22
- pynacl==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-html==4.1.1
- pytest-metadata==3.1.1
- pytest-sugar==1.0.0
- pyyaml==6.0.2
- six==1.17.0
- tenacity==9.0.0
- termcolor==2.5.0
- threaded==4.2.0
- tomli==2.2.1
prefix: /opt/conda/envs/exec-helpers
| [
"test/test_ssh_client.py::TestExecute::test_check_stdin_bytearray",
"test/test_ssh_client.py::TestExecute::test_check_stdin_bytes",
"test/test_ssh_client.py::TestExecute::test_check_stdin_str",
"test/test_subprocess_runner.py::TestSubprocessRunnerHelpers::test_check_stdin_bytearray",
"test/test_subprocess_runner.py::TestSubprocessRunnerHelpers::test_check_stdin_bytes",
"test/test_subprocess_runner.py::TestSubprocessRunnerHelpers::test_check_stdin_str"
]
| []
| [
"test/test_ssh_client.py::TestExecute::test_check_call",
"test/test_ssh_client.py::TestExecute::test_check_call_expected",
"test/test_ssh_client.py::TestExecute::test_check_stderr",
"test/test_ssh_client.py::TestExecute::test_execute",
"test/test_ssh_client.py::TestExecute::test_execute_async",
"test/test_ssh_client.py::TestExecute::test_execute_async_mask_command",
"test/test_ssh_client.py::TestExecute::test_execute_async_no_stdout_stderr",
"test/test_ssh_client.py::TestExecute::test_execute_async_pty",
"test/test_ssh_client.py::TestExecute::test_execute_async_sudo",
"test/test_ssh_client.py::TestExecute::test_execute_async_sudo_password",
"test/test_ssh_client.py::TestExecute::test_execute_async_verbose",
"test/test_ssh_client.py::TestExecute::test_execute_async_with_no_sudo_enforce",
"test/test_ssh_client.py::TestExecute::test_execute_async_with_none_enforce",
"test/test_ssh_client.py::TestExecute::test_execute_async_with_sudo_enforce",
"test/test_ssh_client.py::TestExecute::test_execute_mask_command",
"test/test_ssh_client.py::TestExecute::test_execute_no_stderr",
"test/test_ssh_client.py::TestExecute::test_execute_no_stdout",
"test/test_ssh_client.py::TestExecute::test_execute_no_stdout_stderr",
"test/test_ssh_client.py::TestExecute::test_execute_timeout",
"test/test_ssh_client.py::TestExecute::test_execute_timeout_fail",
"test/test_ssh_client.py::TestExecute::test_execute_together",
"test/test_ssh_client.py::TestExecute::test_execute_together_exceptions",
"test/test_ssh_client.py::TestExecute::test_execute_verbose",
"test/test_ssh_client.py::TestExecuteThrowHost::test_execute_through_host_auth",
"test/test_ssh_client.py::TestExecuteThrowHost::test_execute_through_host_no_creds",
"test/test_ssh_client.py::TestSftp::test_download",
"test/test_ssh_client.py::TestSftp::test_exists",
"test/test_ssh_client.py::TestSftp::test_isdir",
"test/test_ssh_client.py::TestSftp::test_isfile",
"test/test_ssh_client.py::TestSftp::test_mkdir",
"test/test_ssh_client.py::TestSftp::test_open",
"test/test_ssh_client.py::TestSftp::test_rm_rf",
"test/test_ssh_client.py::TestSftp::test_stat",
"test/test_ssh_client.py::TestSftp::test_upload_dir",
"test/test_ssh_client.py::TestSftp::test_upload_file",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_call",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_call_verbose",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_context_manager",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_execute_mask_global",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_execute_mask_local",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_execute_no_stderr",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_execute_no_stdout",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_execute_no_stdout_stderr",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_execute_timeout_fail",
"test/test_subprocess_runner.py::TestSubprocessRunnerHelpers::test_check_call",
"test/test_subprocess_runner.py::TestSubprocessRunnerHelpers::test_check_call_expected",
"test/test_subprocess_runner.py::TestSubprocessRunnerHelpers::test_check_stderr"
]
| []
| Apache License 2.0 | 2,398 | [
"README.rst",
"doc/source/Subprocess.rst",
"doc/source/SSHClient.rst",
"exec_helpers/subprocess_runner.py",
"doc/source/ExecResult.rst",
"exec_helpers/_ssh_client_base.py",
"exec_helpers/exec_result.py"
]
| [
"README.rst",
"doc/source/Subprocess.rst",
"doc/source/SSHClient.rst",
"exec_helpers/subprocess_runner.py",
"doc/source/ExecResult.rst",
"exec_helpers/_ssh_client_base.py",
"exec_helpers/exec_result.py"
]
|
|
mkdocs__mkdocs-1467 | ee37665418a8c1a20eff6041fe17d0d107dde07c | 2018-04-12 14:50:34 | 27f06517db4d8b73b162f2a2af65826ddcc8db54 | waylan: Apparently [unittest.TestCase.assertLogs][1] was added in Python 3.4. A PY2.7 backport can be found [here]. The [Unittest2][3] lib also contains a backport, however, I don;t see the need for that entire lib for one feature.
There is also the [testfixtures][4] lib, which includes some [logging capture tools][5], along with a bunch of other stuff that could simplify a bunch of our tests.
[1]: https://docs.python.org/3.4/library/unittest.html#unittest.TestCase.assertLogs
[2]: https://stackoverflow.com/a/29938398/866026
[3]: https://pypi.python.org/pypi/unittest2
[4]: http://testfixtures.readthedocs.io/en/latest/index.html
[5]: http://testfixtures.readthedocs.io/en/latest/logging.html | diff --git a/docs/about/release-notes.md b/docs/about/release-notes.md
index 6f0f3518..7afca02a 100644
--- a/docs/about/release-notes.md
+++ b/docs/about/release-notes.md
@@ -66,6 +66,7 @@ authors should review how [search and themes] interact.
### Other Changes and Additions to Development Version
+* Add MkDocs version check to gh-deploy script (#640).
* Improve Markdown extension error messages. (#782).
* Drop official support for Python 3.3 and set `tornado>=5.0` (#1427).
* Add support for GitLab edit links (#1435).
diff --git a/mkdocs/__main__.py b/mkdocs/__main__.py
index 8c231180..a3f7cdae 100644
--- a/mkdocs/__main__.py
+++ b/mkdocs/__main__.py
@@ -89,6 +89,7 @@ remote_branch_help = ("The remote branch to commit to for Github Pages. This "
remote_name_help = ("The remote name to commit to for Github Pages. This "
"overrides the value specified in config")
force_help = "Force the push to the repository."
+ignore_version_help = "Ignore check that build is not being deployed with an older version of MkDocs."
pgk_dir = os.path.dirname(os.path.abspath(__file__))
@@ -172,8 +173,9 @@ def build_command(clean, config_file, strict, theme, theme_dir, site_dir):
@click.option('-b', '--remote-branch', help=remote_branch_help)
@click.option('-r', '--remote-name', help=remote_name_help)
@click.option('--force', is_flag=True, help=force_help)
[email protected]('--ignore-version', is_flag=True, help=ignore_version_help)
@common_options
-def gh_deploy_command(config_file, clean, message, remote_branch, remote_name, force):
+def gh_deploy_command(config_file, clean, message, remote_branch, remote_name, force, ignore_version):
"""Deploy your documentation to GitHub Pages"""
try:
cfg = config.load_config(
@@ -182,7 +184,7 @@ def gh_deploy_command(config_file, clean, message, remote_branch, remote_name, f
remote_name=remote_name
)
build.build(cfg, dirty=not clean)
- gh_deploy.gh_deploy(cfg, message=message, force=force)
+ gh_deploy.gh_deploy(cfg, message=message, force=force, ignore_version=ignore_version)
except exceptions.ConfigurationError as e: # pragma: no cover
# Avoid ugly, unhelpful traceback
raise SystemExit('\n' + str(e))
diff --git a/mkdocs/commands/gh_deploy.py b/mkdocs/commands/gh_deploy.py
index ccadab6d..b3d0548f 100644
--- a/mkdocs/commands/gh_deploy.py
+++ b/mkdocs/commands/gh_deploy.py
@@ -2,6 +2,8 @@ from __future__ import unicode_literals
import logging
import subprocess
import os
+import re
+from pkg_resources import parse_version
import mkdocs
from mkdocs.utils import ghp_import
@@ -49,20 +51,49 @@ def _get_remote_url(remote_name):
return host, path
-def gh_deploy(config, message=None, force=False):
+def _check_version(branch):
+
+ proc = subprocess.Popen(['git', 'show', '-s', '--format=%s', 'refs/heads/{}'.format(branch)],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ stdout, _ = proc.communicate()
+ msg = stdout.decode('utf-8').strip()
+ m = re.search(r'\d+(\.\d+)+', msg, re.X | re.I)
+ previousv = parse_version(m.group()) if m else None
+ currentv = parse_version(mkdocs.__version__)
+ if not previousv:
+ log.warn('Version check skipped: No version specificed in previous deployment.')
+ elif currentv > previousv:
+ log.info(
+ 'Previous deployment was done with MkDocs version {}; '
+ 'you are deploying with a newer version ({})'.format(previousv, currentv)
+ )
+ elif currentv < previousv:
+ log.error(
+ 'Deployment terminated: Previous deployment was made with MkDocs version {}; '
+ 'you are attempting to deploy with an older version ({}). Use --ignore-version '
+ 'to deploy anyway.'.format(previousv, currentv)
+ )
+ raise SystemExit(1)
+
+
+def gh_deploy(config, message=None, force=False, ignore_version=False):
if not _is_cwd_git_repo():
log.error('Cannot deploy - this directory does not appear to be a git '
'repository')
+ remote_branch = config['remote_branch']
+ remote_name = config['remote_name']
+
+ if not ignore_version:
+ _check_version(remote_branch)
+
if message is None:
message = default_message
sha = _get_current_sha(os.path.dirname(config.config_file_path))
message = message.format(version=mkdocs.__version__, sha=sha)
- remote_branch = config['remote_branch']
- remote_name = config['remote_name']
-
log.info("Copying '%s' to '%s' branch and pushing to GitHub.",
config['site_dir'], config['remote_branch'])
| Warn users if they are using an old/different version
pip has a nice feature where they warn users if they are using an old version and prompt them to update. I think it would be neat to add something like this. The code they use for it is here: https://github.com/pypa/pip/blob/7.0.3/pip/utils/outdated.py#L95
| mkdocs/mkdocs | diff --git a/mkdocs/tests/base.py b/mkdocs/tests/base.py
index 20ba63ec..a17fc2e0 100644
--- a/mkdocs/tests/base.py
+++ b/mkdocs/tests/base.py
@@ -2,9 +2,14 @@ from __future__ import unicode_literals
import textwrap
import markdown
import os
+import logging
+import collections
+import unittest
+
from mkdocs import toc
from mkdocs import config
+from mkdocs import utils
def dedent(text):
@@ -37,3 +42,97 @@ def load_config(**cfg):
errors_warnings = conf.validate()
assert(errors_warnings == ([], [])), errors_warnings
return conf
+
+
+# Backport unittest.TestCase.assertLogs for Python 2.7
+# see https://github.com/python/cpython/blob/3.6/Lib/unittest/case.py
+
+if not utils.PY3:
+ _LoggingWatcher = collections.namedtuple("_LoggingWatcher",
+ ["records", "output"])
+
+ class _CapturingHandler(logging.Handler):
+ """
+ A logging handler capturing all (raw and formatted) logging output.
+ """
+
+ def __init__(self):
+ logging.Handler.__init__(self)
+ self.watcher = _LoggingWatcher([], [])
+
+ def flush(self):
+ pass
+
+ def emit(self, record):
+ self.watcher.records.append(record)
+ msg = self.format(record)
+ self.watcher.output.append(msg)
+
+ class _AssertLogsContext(object):
+ """A context manager used to implement TestCase.assertLogs()."""
+
+ LOGGING_FORMAT = "%(levelname)s:%(name)s:%(message)s"
+
+ def __init__(self, test_case, logger_name, level):
+ self.test_case = test_case
+ self.logger_name = logger_name
+ if level:
+ self.level = logging._levelNames.get(level, level)
+ else:
+ self.level = logging.INFO
+ self.msg = None
+
+ def __enter__(self):
+ if isinstance(self.logger_name, logging.Logger):
+ logger = self.logger = self.logger_name
+ else:
+ logger = self.logger = logging.getLogger(self.logger_name)
+ formatter = logging.Formatter(self.LOGGING_FORMAT)
+ handler = _CapturingHandler()
+ handler.setFormatter(formatter)
+ self.watcher = handler.watcher
+ self.old_handlers = logger.handlers[:]
+ self.old_level = logger.level
+ self.old_propagate = logger.propagate
+ logger.handlers = [handler]
+ logger.setLevel(self.level)
+ logger.propagate = False
+ return handler.watcher
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self.logger.handlers = self.old_handlers
+ self.logger.propagate = self.old_propagate
+ self.logger.setLevel(self.old_level)
+ if exc_type is not None:
+ # let unexpected exceptions pass through
+ return False
+ if len(self.watcher.records) == 0:
+ self._raiseFailure(
+ "no logs of level {} or higher triggered on {}"
+ .format(logging.getLevelName(self.level), self.logger.name))
+
+ def _raiseFailure(self, standardMsg):
+ msg = self.test_case._formatMessage(self.msg, standardMsg)
+ raise self.test_case.failureException(msg)
+
+ class LogTestCase(unittest.TestCase):
+ def assertLogs(self, logger=None, level=None):
+ """Fail unless a log message of level *level* or higher is emitted
+ on *logger_name* or its children. If omitted, *level* defaults to
+ INFO and *logger* defaults to the root logger.
+ This method must be used as a context manager, and will yield
+ a recording object with two attributes: `output` and `records`.
+ At the end of the context manager, the `output` attribute will
+ be a list of the matching formatted log messages and the
+ `records` attribute will be a list of the corresponding LogRecord
+ objects.
+ Example::
+ with self.assertLogs('foo', level='INFO') as cm:
+ logging.getLogger('foo').info('first message')
+ logging.getLogger('foo.bar').error('second message')
+ self.assertEqual(cm.output, ['INFO:foo:first message',
+ 'ERROR:foo.bar:second message'])
+ """
+ return _AssertLogsContext(self, logger, level)
+else:
+ LogTestCase = unittest.TestCase
diff --git a/mkdocs/tests/cli_tests.py b/mkdocs/tests/cli_tests.py
index 57f12845..939096f7 100644
--- a/mkdocs/tests/cli_tests.py
+++ b/mkdocs/tests/cli_tests.py
@@ -346,6 +346,8 @@ class CLITests(unittest.TestCase):
self.assertEqual(g_kwargs['message'], None)
self.assertTrue('force' in g_kwargs)
self.assertEqual(g_kwargs['force'], False)
+ self.assertTrue('ignore_version' in g_kwargs)
+ self.assertEqual(g_kwargs['ignore_version'], False)
self.assertEqual(mock_build.call_count, 1)
b_args, b_kwargs = mock_build.call_args
self.assertTrue('dirty' in b_kwargs)
@@ -471,3 +473,19 @@ class CLITests(unittest.TestCase):
self.assertEqual(g_kwargs['force'], True)
self.assertEqual(mock_build.call_count, 1)
self.assertEqual(mock_load_config.call_count, 1)
+
+ @mock.patch('mkdocs.config.load_config', autospec=True)
+ @mock.patch('mkdocs.commands.build.build', autospec=True)
+ @mock.patch('mkdocs.commands.gh_deploy.gh_deploy', autospec=True)
+ def test_gh_deploy_ognore_version(self, mock_gh_deploy, mock_build, mock_load_config):
+
+ result = self.runner.invoke(
+ cli.cli, ['gh-deploy', '--ignore-version'], catch_exceptions=False)
+
+ self.assertEqual(result.exit_code, 0)
+ self.assertEqual(mock_gh_deploy.call_count, 1)
+ g_args, g_kwargs = mock_gh_deploy.call_args
+ self.assertTrue('ignore_version' in g_kwargs)
+ self.assertEqual(g_kwargs['ignore_version'], True)
+ self.assertEqual(mock_build.call_count, 1)
+ self.assertEqual(mock_load_config.call_count, 1)
diff --git a/mkdocs/tests/gh_deploy_tests.py b/mkdocs/tests/gh_deploy_tests.py
index 7b8793bc..495a6e90 100644
--- a/mkdocs/tests/gh_deploy_tests.py
+++ b/mkdocs/tests/gh_deploy_tests.py
@@ -3,8 +3,9 @@ from __future__ import unicode_literals
import unittest
import mock
-from mkdocs.tests.base import load_config
+from mkdocs.tests.base import load_config, LogTestCase
from mkdocs.commands import gh_deploy
+from mkdocs import __version__
class TestGitHubDeploy(unittest.TestCase):
@@ -99,6 +100,32 @@ class TestGitHubDeploy(unittest.TestCase):
)
gh_deploy.gh_deploy(config)
+ @mock.patch('mkdocs.commands.gh_deploy._is_cwd_git_repo', return_value=True)
+ @mock.patch('mkdocs.commands.gh_deploy._get_current_sha', return_value='shashas')
+ @mock.patch('mkdocs.commands.gh_deploy._get_remote_url', return_value=(None, None))
+ @mock.patch('mkdocs.commands.gh_deploy._check_version')
+ @mock.patch('mkdocs.commands.gh_deploy.ghp_import.ghp_import', return_value=(True, ''))
+ def test_deploy_ignore_version_default(self, mock_import, check_version, get_remote, get_sha, is_repo):
+
+ config = load_config(
+ remote_branch='test',
+ )
+ gh_deploy.gh_deploy(config)
+ check_version.assert_called_once()
+
+ @mock.patch('mkdocs.commands.gh_deploy._is_cwd_git_repo', return_value=True)
+ @mock.patch('mkdocs.commands.gh_deploy._get_current_sha', return_value='shashas')
+ @mock.patch('mkdocs.commands.gh_deploy._get_remote_url', return_value=(None, None))
+ @mock.patch('mkdocs.commands.gh_deploy._check_version')
+ @mock.patch('mkdocs.commands.gh_deploy.ghp_import.ghp_import', return_value=(True, ''))
+ def test_deploy_ignore_version(self, mock_import, check_version, get_remote, get_sha, is_repo):
+
+ config = load_config(
+ remote_branch='test',
+ )
+ gh_deploy.gh_deploy(config, ignore_version=True)
+ check_version.assert_not_called()
+
@mock.patch('mkdocs.utils.ghp_import.ghp_import')
@mock.patch('mkdocs.commands.gh_deploy.log')
def test_deploy_error(self, mock_log, mock_import):
@@ -112,3 +139,43 @@ class TestGitHubDeploy(unittest.TestCase):
self.assertRaises(SystemExit, gh_deploy.gh_deploy, config)
mock_log.error.assert_called_once_with('Failed to deploy to GitHub with error: \n%s',
error_string)
+
+
+class TestGitHubDeployLogs(LogTestCase):
+
+ @mock.patch('subprocess.Popen')
+ def test_mkdocs_newer(self, mock_popeno):
+
+ mock_popeno().communicate.return_value = (b'Deployed 12345678 with MkDocs version: 0.1.2\n', b'')
+
+ with self.assertLogs('mkdocs', level='INFO') as cm:
+ gh_deploy._check_version('gh-pages')
+ self.assertEqual(
+ cm.output, ['INFO:mkdocs.commands.gh_deploy:Previous deployment was done with MkDocs '
+ 'version 0.1.2; you are deploying with a newer version ({})'.format(__version__)]
+ )
+
+ @mock.patch('subprocess.Popen')
+ def test_mkdocs_older(self, mock_popeno):
+
+ mock_popeno().communicate.return_value = (b'Deployed 12345678 with MkDocs version: 10.1.2\n', b'')
+
+ with self.assertLogs('mkdocs', level='ERROR') as cm:
+ self.assertRaises(SystemExit, gh_deploy._check_version, 'gh-pages')
+ self.assertEqual(
+ cm.output, ['ERROR:mkdocs.commands.gh_deploy:Deployment terminated: Previous deployment was made with '
+ 'MkDocs version 10.1.2; you are attempting to deploy with an older version ({}). Use '
+ '--ignore-version to deploy anyway.'.format(__version__)]
+ )
+
+ @mock.patch('subprocess.Popen')
+ def test_version_unknown(self, mock_popeno):
+
+ mock_popeno().communicate.return_value = (b'No version specified\n', b'')
+
+ with self.assertLogs('mkdocs', level='WARNING') as cm:
+ gh_deploy._check_version('gh-pages')
+ self.assertEqual(
+ cm.output,
+ ['WARNING:mkdocs.commands.gh_deploy:Version check skipped: No version specificed in previous deployment.']
+ )
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 3
} | 0.17 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-mock",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/project.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | click==8.1.8
exceptiongroup==1.2.2
importlib_metadata==8.6.1
iniconfig==2.1.0
Jinja2==3.1.6
livereload==2.7.1
Markdown==3.7
MarkupSafe==3.0.2
mdx-gh-links==0.4
-e git+https://github.com/mkdocs/mkdocs.git@ee37665418a8c1a20eff6041fe17d0d107dde07c#egg=mkdocs
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-mock==3.14.0
PyYAML==6.0.2
tomli==2.2.1
tornado==6.4.2
zipp==3.21.0
| name: mkdocs
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- click==8.1.8
- exceptiongroup==1.2.2
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jinja2==3.1.6
- livereload==2.7.1
- markdown==3.7
- markupsafe==3.0.2
- mdx-gh-links==0.4
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-mock==3.14.0
- pyyaml==6.0.2
- tomli==2.2.1
- tornado==6.4.2
- zipp==3.21.0
prefix: /opt/conda/envs/mkdocs
| [
"mkdocs/tests/cli_tests.py::CLITests::test_gh_deploy_defaults",
"mkdocs/tests/cli_tests.py::CLITests::test_gh_deploy_ognore_version",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_deploy_ignore_version",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_deploy_ignore_version_default",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeployLogs::test_mkdocs_newer",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeployLogs::test_mkdocs_older",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeployLogs::test_version_unknown"
]
| []
| [
"mkdocs/tests/cli_tests.py::CLITests::test_build_clean",
"mkdocs/tests/cli_tests.py::CLITests::test_build_config_file",
"mkdocs/tests/cli_tests.py::CLITests::test_build_defaults",
"mkdocs/tests/cli_tests.py::CLITests::test_build_dirty",
"mkdocs/tests/cli_tests.py::CLITests::test_build_quiet",
"mkdocs/tests/cli_tests.py::CLITests::test_build_site_dir",
"mkdocs/tests/cli_tests.py::CLITests::test_build_strict",
"mkdocs/tests/cli_tests.py::CLITests::test_build_theme",
"mkdocs/tests/cli_tests.py::CLITests::test_build_theme_dir",
"mkdocs/tests/cli_tests.py::CLITests::test_build_verbose",
"mkdocs/tests/cli_tests.py::CLITests::test_gh_deploy_clean",
"mkdocs/tests/cli_tests.py::CLITests::test_gh_deploy_config_file",
"mkdocs/tests/cli_tests.py::CLITests::test_gh_deploy_dirty",
"mkdocs/tests/cli_tests.py::CLITests::test_gh_deploy_force",
"mkdocs/tests/cli_tests.py::CLITests::test_gh_deploy_message",
"mkdocs/tests/cli_tests.py::CLITests::test_gh_deploy_remote_branch",
"mkdocs/tests/cli_tests.py::CLITests::test_gh_deploy_remote_name",
"mkdocs/tests/cli_tests.py::CLITests::test_new",
"mkdocs/tests/cli_tests.py::CLITests::test_serve_config_file",
"mkdocs/tests/cli_tests.py::CLITests::test_serve_default",
"mkdocs/tests/cli_tests.py::CLITests::test_serve_dev_addr",
"mkdocs/tests/cli_tests.py::CLITests::test_serve_dirtyreload",
"mkdocs/tests/cli_tests.py::CLITests::test_serve_livereload",
"mkdocs/tests/cli_tests.py::CLITests::test_serve_no_livereload",
"mkdocs/tests/cli_tests.py::CLITests::test_serve_strict",
"mkdocs/tests/cli_tests.py::CLITests::test_serve_theme",
"mkdocs/tests/cli_tests.py::CLITests::test_serve_theme_dir",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_deploy",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_deploy_error",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_deploy_hostname",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_deploy_no_cname",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_get_current_sha",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_get_remote_url_enterprise",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_get_remote_url_http",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_get_remote_url_ssh",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_is_cwd_git_repo",
"mkdocs/tests/gh_deploy_tests.py::TestGitHubDeploy::test_is_cwd_not_git_repo"
]
| []
| BSD 2-Clause "Simplified" License | 2,399 | [
"mkdocs/commands/gh_deploy.py",
"docs/about/release-notes.md",
"mkdocs/__main__.py"
]
| [
"mkdocs/commands/gh_deploy.py",
"docs/about/release-notes.md",
"mkdocs/__main__.py"
]
|
numpy__numpydoc-172 | 40b3733b4bf4604ff7622b5eab592edcef750591 | 2018-04-12 22:31:17 | 1f197e32a31db2280b71be183e6724f9457ce78e | pvanmulbregt: Under Py3.6 nosetests passes, but the latexpdf fails with `Error: Unicode char \u8: not set up for use with LaTeX.` [On my local machine the message is slightly different: `Package inputenc Error: Unicode char (U+200B) (inputenc) not set up for use with LaTeX.`]
Under Py 2.7, one nosetest test fails with `UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 71: ordinal not in range(128)`, inside jinja2's `Template.render()`, presumably from encountering the UTF-8 for the ZERO WIDTH SPACE, '\xe2\x80\x8b'.
Implication: Inserting a Unicode character into the output, either as Unicode or UTF-8, breaks downstream processing.
@jnothman suggests using ".." instead and that seems to behave better.
ev-br: Also closes gh-28
pvanmulbregt: Summary:
1. Using `".."` instead of unicode zero-width space allows multiple functions on a line, works under both Py2 and Py3, and keeps the latexpdf builds working.
2. If any line is missing the description field, then the rendering of the See Also block misaligns the subsequent description fields, the yellow div block is too short, and the last line in the See Also block may interfere with the next section. That appears to be a style-sheet issue, independent of this change.
larsoner: This seems like a useful extension/bugfix to me, and it sounds like using `..` instead of unicode fixed the build problems. @jnothman what's the `needs-decision` point here? I think we just need to make sure that everything works properly, and have another set of eyes on the code.
I rebuilt MNE doc with this and things still looked okay. I also rebuilt SciPy and things looked okay. And when I modified this:
```
See Also
--------
lfiltic : Construct initial conditions for `lfilter`.
lfilter_zi : Compute initial state (steady state of step response) for
`lfilter`.
filtfilt : A forward-backward filter, to obtain a filter with linear phase.
...
```
To be this:
```
See Also
--------
lfiltic, lfilter_zi : Construct initial conditions or steady state, respectively, for `lfilter`.
filtfilt : A forward-backward filter, to obtain a filter with linear phase.
...
```
It looked good:

| diff --git a/doc/format.rst b/doc/format.rst
index 87f5ff7..cdeec0b 100644
--- a/doc/format.rst
+++ b/doc/format.rst
@@ -252,13 +252,21 @@ The sections of a function's docstring are:
Support for the **Yields** section was added in `numpydoc
<https://github.com/numpy/numpydoc>`_ version 0.6.
-7. **Other Parameters**
+7. **Receives**
+
+ Explanation of parameters passed to a generator's ``.send()`` method,
+ formatted as for Parameters, above. Since, like for Yields and Returns, a
+ single object is always passed to the method, this may describe either the
+ single parameter, or positional arguments passed as a tuple. If a docstring
+ includes Receives it must also include Yields.
+
+8. **Other Parameters**
An optional section used to describe infrequently used parameters.
It should only be used if a function has a large number of keyword
parameters, to prevent cluttering the **Parameters** section.
-8. **Raises**
+9. **Raises**
An optional section detailing which errors get raised and under
what conditions::
@@ -271,16 +279,16 @@ The sections of a function's docstring are:
This section should be used judiciously, i.e., only for errors
that are non-obvious or have a large chance of getting raised.
-9. **Warns**
+10. **Warns**
An optional section detailing which warnings get raised and
under what conditions, formatted similarly to Raises.
-10. **Warnings**
+11. **Warnings**
An optional section with cautions to the user in free text/reST.
-11. **See Also**
+12. **See Also**
An optional section used to refer to related code. This section
can be very useful, but should be used judiciously. The goal is to
@@ -319,7 +327,7 @@ The sections of a function's docstring are:
func_b, func_c_, func_d
func_e
-12. **Notes**
+13. **Notes**
An optional section that provides additional information about the
code, possibly including a discussion of the algorithm. This
@@ -364,7 +372,7 @@ The sections of a function's docstring are:
where filename is a path relative to the reference guide source
directory.
-13. **References**
+14. **References**
References cited in the **notes** section may be listed here,
e.g. if you cited the article below using the text ``[1]_``,
@@ -397,7 +405,7 @@ The sections of a function's docstring are:
.. highlight:: pycon
-14. **Examples**
+15. **Examples**
An optional section for examples, using the `doctest
<http://docs.python.org/library/doctest.html>`_ format.
diff --git a/numpydoc/docscrape.py b/numpydoc/docscrape.py
index f3453c6..02afd88 100644
--- a/numpydoc/docscrape.py
+++ b/numpydoc/docscrape.py
@@ -16,6 +16,7 @@ except ImportError:
import copy
import sys
+from sphinx.ext.autodoc import ALL
def strip_blank_lines(l):
"Remove leading and trailing blank lines from a list of lines"
@@ -127,6 +128,7 @@ class NumpyDocString(Mapping):
'Parameters': [],
'Returns': [],
'Yields': [],
+ 'Receives': [],
'Raises': [],
'Warns': [],
'Other Parameters': [],
@@ -236,9 +238,41 @@ class NumpyDocString(Mapping):
return params
- _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):"
- r"`(?P<name>(?:~\w+\.)?[a-zA-Z0-9_.-]+)`|"
- r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
+ # See also supports the following formats.
+ #
+ # <FUNCNAME>
+ # <FUNCNAME> SPACE* COLON SPACE+ <DESC> SPACE*
+ # <FUNCNAME> ( COMMA SPACE+ <FUNCNAME>)* SPACE*
+ # <FUNCNAME> ( COMMA SPACE+ <FUNCNAME>)* SPACE* COLON SPACE+ <DESC> SPACE*
+
+ # <FUNCNAME> is one of
+ # <PLAIN_FUNCNAME>
+ # COLON <ROLE> COLON BACKTICK <PLAIN_FUNCNAME> BACKTICK
+ # where
+ # <PLAIN_FUNCNAME> is a legal function name, and
+ # <ROLE> is any nonempty sequence of word characters.
+ # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`
+ # <DESC> is a string describing the function.
+
+ _role = r":(?P<role>\w+):"
+ _funcbacktick = r"`(?P<name>(?:~\w+\.)?[a-zA-Z0-9_.-]+)`"
+ _funcplain = r"(?P<name2>[a-zA-Z0-9_.-]+)"
+ _funcname = r"(" + _role + _funcbacktick + r"|" + _funcplain + r")"
+ _funcnamenext = _funcname.replace('role', 'rolenext')
+ _funcnamenext = _funcnamenext.replace('name', 'namenext')
+ _description = r"(?P<description>\s*:(\s+(?P<desc>\S+.*))?)?\s*$"
+ _func_rgx = re.compile(r"^\s*" + _funcname + r"\s*")
+ _line_rgx = re.compile(
+ r"^\s*" +
+ r"(?P<allfuncs>" + # group for all function names
+ _funcname +
+ r"(?P<morefuncs>([,]\s+" + _funcnamenext + r")*)" +
+ r")" + # end of "allfuncs"
+ r"(?P<trailing>\s*,)?" + # Some function lists have a trailing comma
+ _description)
+
+ # Empty <DESC> elements are replaced with '..'
+ empty_description = '..'
def _parse_see_also(self, content):
"""
@@ -248,52 +282,49 @@ class NumpyDocString(Mapping):
func_name1, func_name2, :meth:`func_name`, func_name3
"""
+
items = []
def parse_item_name(text):
- """Match ':role:`name`' or 'name'"""
- m = self._name_rgx.match(text)
- if m:
- g = m.groups()
- if g[1] is None:
- return g[3], None
- else:
- return g[2], g[1]
- raise ParseError("%s is not a item name" % text)
+ """Match ':role:`name`' or 'name'."""
+ m = self._func_rgx.match(text)
+ if not m:
+ raise ParseError("%s is not a item name" % text)
+ role = m.group('role')
+ name = m.group('name') if role else m.group('name2')
+ return name, role, m.end()
- def push_item(name, rest):
- if not name:
- return
- name, role = parse_item_name(name)
- items.append((name, list(rest), role))
- del rest[:]
-
- current_func = None
rest = []
-
for line in content:
if not line.strip():
continue
- m = self._name_rgx.match(line)
- if m and line[m.end():].strip().startswith(':'):
- push_item(current_func, rest)
- current_func, line = line[:m.end()], line[m.end():]
- rest = [line.split(':', 1)[1].strip()]
- if not rest[0]:
- rest = []
- elif not line.startswith(' '):
- push_item(current_func, rest)
- current_func = None
- if ',' in line:
- for func in line.split(','):
- if func.strip():
- push_item(func, [])
- elif line.strip():
- current_func = line
- elif current_func is not None:
+ line_match = self._line_rgx.match(line)
+ description = None
+ if line_match:
+ description = line_match.group('desc')
+ if line_match.group('trailing'):
+ self._error_location(
+ 'Unexpected comma after function list at index %d of '
+ 'line "%s"' % (line_match.end('trailing'), line),
+ error=False)
+ if not description and line.startswith(' '):
rest.append(line.strip())
- push_item(current_func, rest)
+ elif line_match:
+ funcs = []
+ text = line_match.group('allfuncs')
+ while True:
+ if not text.strip():
+ break
+ name, role, match_end = parse_item_name(text)
+ funcs.append((name, role))
+ text = text[match_end:].strip()
+ if text and text[0] == ',':
+ text = text[1:].strip()
+ rest = list(filter(None, [description]))
+ items.append((funcs, rest))
+ else:
+ raise ParseError("%s is not a item name" % line)
return items
def _parse_index(self, section, content):
@@ -350,6 +381,9 @@ class NumpyDocString(Mapping):
if has_returns and has_yields:
msg = 'Docstring contains both a Returns and Yields section.'
raise ValueError(msg)
+ if not has_yields and 'Receives' in section_names:
+ msg = 'Docstring contains a Receives section but not Yields.'
+ raise ValueError(msg)
for (section, content) in sections:
if not section.startswith('..'):
@@ -359,8 +393,8 @@ class NumpyDocString(Mapping):
self._error_location("The section %s appears twice"
% section)
- if section in ('Parameters', 'Returns', 'Yields', 'Raises',
- 'Warns', 'Other Parameters', 'Attributes',
+ if section in ('Parameters', 'Returns', 'Yields', 'Receives',
+ 'Raises', 'Warns', 'Other Parameters', 'Attributes',
'Methods'):
self[section] = self._parse_param_list(content)
elif section.startswith('.. index::'):
@@ -440,24 +474,30 @@ class NumpyDocString(Mapping):
return []
out = []
out += self._str_header("See Also")
+ out += ['']
last_had_desc = True
- for func, desc, role in self['See Also']:
- if role:
- link = ':%s:`%s`' % (role, func)
- elif func_role:
- link = ':%s:`%s`' % (func_role, func)
- else:
- link = "`%s`_" % func
- if desc or last_had_desc:
- out += ['']
- out += [link]
- else:
- out[-1] += ", %s" % link
+ for funcs, desc in self['See Also']:
+ assert isinstance(funcs, list)
+ links = []
+ for func, role in funcs:
+ if role:
+ link = ':%s:`%s`' % (role, func)
+ elif func_role:
+ link = ':%s:`%s`' % (func_role, func)
+ else:
+ link = "`%s`_" % func
+ links.append(link)
+ link = ', '.join(links)
+ out += [link]
if desc:
out += self._str_indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
+ out += self._str_indent([self.empty_description])
+
+ if last_had_desc:
+ out += ['']
out += ['']
return out
@@ -484,7 +524,7 @@ class NumpyDocString(Mapping):
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
- for param_list in ('Parameters', 'Returns', 'Yields',
+ for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',
'Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_section('Warnings')
@@ -593,18 +633,25 @@ class ClassDoc(NumpyDocString):
NumpyDocString.__init__(self, doc)
- if config.get('show_class_members', True):
+ _members = config.get('members', [])
+ if _members is ALL:
+ _members = None
+ _exclude = config.get('exclude-members', [])
+
+ if config.get('show_class_members', True) and _exclude is not ALL:
def splitlines_x(s):
if not s:
return []
else:
return s.splitlines()
-
for field, items in [('Methods', self.methods),
('Attributes', self.properties)]:
if not self[field]:
doc_list = []
for name in sorted(items):
+ if (name in _exclude or
+ (_members and name not in _members)):
+ continue
try:
doc_item = pydoc.getdoc(getattr(self._cls, name))
doc_list.append(
diff --git a/numpydoc/docscrape_sphinx.py b/numpydoc/docscrape_sphinx.py
index 4cc95d8..9b23235 100644
--- a/numpydoc/docscrape_sphinx.py
+++ b/numpydoc/docscrape_sphinx.py
@@ -374,6 +374,7 @@ class SphinxDocString(NumpyDocString):
'parameters': self._str_param_list('Parameters'),
'returns': self._str_returns('Returns'),
'yields': self._str_returns('Yields'),
+ 'receives': self._str_returns('Receives'),
'other_parameters': self._str_param_list('Other Parameters'),
'raises': self._str_param_list('Raises'),
'warns': self._str_param_list('Warns'),
diff --git a/numpydoc/numpydoc.py b/numpydoc/numpydoc.py
index c8e676f..e25241d 100644
--- a/numpydoc/numpydoc.py
+++ b/numpydoc/numpydoc.py
@@ -27,8 +27,9 @@ try:
except ImportError:
from collections import Callable
import hashlib
+import itertools
-from docutils.nodes import citation, Text, reference
+from docutils.nodes import citation, Text, section, comment, reference
import sphinx
from sphinx.addnodes import pending_xref, desc_content, only
@@ -73,18 +74,39 @@ def rename_references(app, what, name, obj, options, lines):
sixu('.. [%s]') % new_r)
-def _ascend(node, cls):
- while node and not isinstance(node, cls):
- node = node.parent
- return node
+def _is_cite_in_numpydoc_docstring(citation_node):
+ # Find DEDUPLICATION_TAG in comment as last node of sibling section
+
+ # XXX: I failed to use citation_node.traverse to do this:
+ section_node = citation_node.parent
+
+ def is_docstring_section(node):
+ return isinstance(node, (section, desc_content))
+
+ while not is_docstring_section(section_node):
+ section_node = section_node.parent
+ if section_node is None:
+ return False
+
+ sibling_sections = itertools.chain(section_node.traverse(is_docstring_section,
+ include_self=True,
+ descend=False,
+ siblings=True))
+ for sibling_section in sibling_sections:
+ if not sibling_section.children:
+ continue
+ last_child = sibling_section.children[-1]
+ if not isinstance(last_child, comment):
+ continue
+ if last_child.rawsource.strip() == DEDUPLICATION_TAG.strip():
+ return True
+ return False
def relabel_references(app, doc):
# Change 'hash-ref' to 'ref' in label text
for citation_node in doc.traverse(citation):
- if _ascend(citation_node, desc_content) is None:
- # no desc node in ancestry -> not in a docstring
- # XXX: should we also somehow check it's in a References section?
+ if not _is_cite_in_numpydoc_docstring(citation_node):
continue
label_node = citation_node[0]
prefix, _, new_label = label_node[0].astext().partition('-')
@@ -132,6 +154,7 @@ def mangle_docstrings(app, what, name, obj, options, lines):
app.config.numpydoc_show_inherited_class_members,
'class_members_toctree': app.config.numpydoc_class_members_toctree}
+ cfg.update(options or {})
u_NL = sixu('\n')
if what == 'module':
# Strip top title
@@ -177,7 +200,7 @@ def mangle_signature(app, what, name, obj, options, sig, retann):
if not hasattr(obj, '__doc__'):
return
- doc = get_doc_object(obj)
+ doc = get_doc_object(obj, config={'show_class_members': False})
sig = doc['Signature'] or getattr(obj, '__text_signature__', None)
if sig:
sig = re.sub(sixu("^[^(]*"), sixu(""), sig)
diff --git a/numpydoc/templates/numpydoc_docstring.rst b/numpydoc/templates/numpydoc_docstring.rst
index 1900db5..79ab1f8 100644
--- a/numpydoc/templates/numpydoc_docstring.rst
+++ b/numpydoc/templates/numpydoc_docstring.rst
@@ -4,6 +4,7 @@
{{parameters}}
{{returns}}
{{yields}}
+{{receives}}
{{other_parameters}}
{{raises}}
{{warns}}
| multiple entries in a See Also section
Observed in scipy.interpolate docs
```
See Also
------------
splev, splrep : FITPACK wrappers
```
The description text (the stuff after the colon) does not show up in the generated html docs.
Moving the description to a separate line,
```
See Also
------------
splev, splrep
FITPACK wrappers
```
does not help either
| numpy/numpydoc | diff --git a/numpydoc/tests/test_docscrape.py b/numpydoc/tests/test_docscrape.py
index 2085948..b4b7e03 100644
--- a/numpydoc/tests/test_docscrape.py
+++ b/numpydoc/tests/test_docscrape.py
@@ -150,6 +150,25 @@ int
doc_yields = NumpyDocString(doc_yields_txt)
+doc_sent_txt = """
+Test generator
+
+Yields
+------
+a : int
+ The number of apples.
+
+Receives
+--------
+b : int
+ The number of bananas.
+c : int
+ The number of oranges.
+
+"""
+doc_sent = NumpyDocString(doc_sent_txt)
+
+
def test_signature():
assert doc['Signature'].startswith('numpy.multivariate_normal(')
assert doc['Signature'].endswith('spam=None)')
@@ -216,6 +235,38 @@ def test_yields():
assert desc[0].endswith(end)
+def test_sent():
+ section = doc_sent['Receives']
+ assert len(section) == 2
+ truth = [('b', 'int', 'bananas.'),
+ ('c', 'int', 'oranges.')]
+ for (arg, arg_type, desc), (arg_, arg_type_, end) in zip(section, truth):
+ assert arg == arg_
+ assert arg_type == arg_type_
+ assert desc[0].startswith('The number of')
+ assert desc[0].endswith(end)
+
+
+def test_returnyield():
+ doc_text = """
+Test having returns and yields.
+
+Returns
+-------
+int
+ The number of apples.
+
+Yields
+------
+a : int
+ The number of apples.
+b : int
+ The number of bananas.
+
+"""
+ assert_raises(ValueError, NumpyDocString, doc_text)
+
+
def test_returnyield():
doc_text = """
Test having returns and yields.
@@ -335,7 +386,7 @@ def line_by_line_compare(a, b):
b = textwrap.dedent(b)
a = [l.rstrip() for l in _strip_blank_lines(a).split('\n')]
b = [l.rstrip() for l in _strip_blank_lines(b).split('\n')]
- assert all(x == y for x, y in zip(a, b))
+ assert all(x == y for x, y in zip(a, b)), str([[x, y] for x, y in zip(a, b) if x != y])
def test_str():
@@ -403,7 +454,7 @@ See Also
--------
`some`_, `other`_, `funcs`_
-
+ ..
`otherfunc`_
relationship
@@ -468,6 +519,25 @@ int
.. index:: """)
+def test_receives_str():
+ line_by_line_compare(str(doc_sent),
+"""Test generator
+
+Yields
+------
+a : int
+ The number of apples.
+
+Receives
+--------
+b : int
+ The number of bananas.
+c : int
+ The number of oranges.
+
+.. index:: """)
+
+
def test_no_index_in_str():
assert "index" not in str(NumpyDocString("""Test idx
@@ -553,7 +623,7 @@ of the one-dimensional normal distribution to higher dimensions.
.. seealso::
:obj:`some`, :obj:`other`, :obj:`funcs`
-
+ ..
:obj:`otherfunc`
relationship
@@ -709,36 +779,46 @@ def test_see_also():
multiple lines
func_f, func_g, :meth:`func_h`, func_j,
func_k
+ func_f1, func_g1, :meth:`func_h1`, func_j1
+ func_f2, func_g2, :meth:`func_h2`, func_j2 : description of multiple
:obj:`baz.obj_q`
:obj:`~baz.obj_r`
:class:`class_j`: fubar
foobar
""")
- assert len(doc6['See Also']) == 13
- for func, desc, role in doc6['See Also']:
- if func in ('func_a', 'func_b', 'func_c', 'func_f',
- 'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q',
- '~baz.obj_r'):
- assert(not desc)
- else:
- assert(desc)
-
- if func == 'func_h':
- assert role == 'meth'
- elif func == 'baz.obj_q' or func == '~baz.obj_r':
- assert role == 'obj'
- elif func == 'class_j':
- assert role == 'class'
- else:
- assert role is None
+ assert len(doc6['See Also']) == 10
+ for funcs, desc in doc6['See Also']:
+ for func, role in funcs:
+ if func in ('func_a', 'func_b', 'func_c', 'func_f',
+ 'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q',
+ 'func_f1', 'func_g1', 'func_h1', 'func_j1',
+ '~baz.obj_r'):
+ assert not desc, str([func, desc])
+ elif func in ('func_f2', 'func_g2', 'func_h2', 'func_j2'):
+ assert desc, str([func, desc])
+ else:
+ assert desc, str([func, desc])
+
+ if func == 'func_h':
+ assert role == 'meth'
+ elif func == 'baz.obj_q' or func == '~baz.obj_r':
+ assert role == 'obj'
+ elif func == 'class_j':
+ assert role == 'class'
+ elif func in ['func_h1', 'func_h2']:
+ assert role == 'meth'
+ else:
+ assert role is None, str([func, role])
- if func == 'func_d':
- assert desc == ['some equivalent func']
- elif func == 'foo.func_e':
- assert desc == ['some other func over', 'multiple lines']
- elif func == 'class_j':
- assert desc == ['fubar', 'foobar']
+ if func == 'func_d':
+ assert desc == ['some equivalent func']
+ elif func == 'foo.func_e':
+ assert desc == ['some other func over', 'multiple lines']
+ elif func == 'class_j':
+ assert desc == ['fubar', 'foobar']
+ elif func in ['func_f2', 'func_g2', 'func_h2', 'func_j2']:
+ assert desc == ['description of multiple'], str([desc, ['description of multiple']])
def test_see_also_parse_error():
@@ -796,11 +876,13 @@ This should be ignored and warned about
pass
with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', UserWarning)
NumpyDocString(doc_text)
assert len(w) == 1
assert "Unknown section Mope" == str(w[0].message)
with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', UserWarning)
SphinxClassDoc(BadSection)
assert len(w) == 1
assert('test_docscrape.test_unknown_section.<locals>.BadSection'
@@ -1267,6 +1349,24 @@ def test_args_and_kwargs():
Keyword arguments
""")
+def test_autoclass():
+ cfg=dict(show_class_members=True,
+ show_inherited_class_members=True)
+ doc = SphinxClassDoc(str, '''
+A top section before
+
+.. autoclass:: str
+ ''', config=cfg)
+ line_by_line_compare(str(doc), r'''
+A top section before
+
+.. autoclass:: str
+
+.. rubric:: Methods
+
+
+ ''')
+
if __name__ == "__main__":
import pytest
diff --git a/numpydoc/tests/test_numpydoc.py b/numpydoc/tests/test_numpydoc.py
new file mode 100644
index 0000000..3a0bd12
--- /dev/null
+++ b/numpydoc/tests/test_numpydoc.py
@@ -0,0 +1,56 @@
+# -*- encoding:utf-8 -*-
+from __future__ import division, absolute_import, print_function
+
+from numpydoc.numpydoc import mangle_docstrings
+from sphinx.ext.autodoc import ALL
+
+class MockConfig():
+ numpydoc_use_plots = False
+ numpydoc_use_blockquotes = True
+ numpydoc_show_class_members = True
+ numpydoc_show_inherited_class_members = True
+ numpydoc_class_members_toctree = True
+ templates_path = []
+ numpydoc_edit_link = False
+ numpydoc_citation_re = '[a-z0-9_.-]+'
+
+class MockBuilder():
+ config = MockConfig()
+
+class MockApp():
+ config = MockConfig()
+ builder = MockBuilder()
+ translator = None
+
+
+app = MockApp()
+app.builder.app = app
+
+def test_mangle_docstrings():
+ s ='''
+A top section before
+
+.. autoclass:: str
+ '''
+ lines = s.split('\n')
+ doc = mangle_docstrings(MockApp(), 'class', 'str', str, {}, lines)
+ assert 'rpartition' in [x.strip() for x in lines]
+
+ lines = s.split('\n')
+ doc = mangle_docstrings(MockApp(), 'class', 'str', str, {'members': ['upper']}, lines)
+ assert 'rpartition' not in [x.strip() for x in lines]
+ assert 'upper' in [x.strip() for x in lines]
+
+ lines = s.split('\n')
+ doc = mangle_docstrings(MockApp(), 'class', 'str', str, {'exclude-members': ALL}, lines)
+ assert 'rpartition' not in [x.strip() for x in lines]
+ assert 'upper' not in [x.strip() for x in lines]
+
+ lines = s.split('\n')
+ doc = mangle_docstrings(MockApp(), 'class', 'str', str,
+ {'exclude-members': ['upper']}, lines)
+ assert 'rpartition' in [x.strip() for x in lines]
+ assert 'upper' not in [x.strip() for x in lines]
+
+if __name__ == "__main__":
+ import pytest
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 5
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y texlive texlive-latex-extra latexmk",
"pip install --upgrade pip setuptools"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.0.3
MarkupSafe==2.0.1
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
-e git+https://github.com/numpy/numpydoc.git@40b3733b4bf4604ff7622b5eab592edcef750591#egg=numpydoc
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
Pygments==2.14.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytz==2025.2
requests==2.27.1
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: numpydoc
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- babel==2.11.0
- charset-normalizer==2.0.12
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- jinja2==3.0.3
- markupsafe==2.0.1
- pip==21.3.1
- pygments==2.14.0
- pytz==2025.2
- requests==2.27.1
- setuptools==59.6.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- urllib3==1.26.20
prefix: /opt/conda/envs/numpydoc
| [
"numpydoc/tests/test_docscrape.py::test_sent",
"numpydoc/tests/test_docscrape.py::test_str",
"numpydoc/tests/test_docscrape.py::test_sphinx_str",
"numpydoc/tests/test_docscrape.py::test_see_also",
"numpydoc/tests/test_numpydoc.py::test_mangle_docstrings"
]
| []
| [
"numpydoc/tests/test_docscrape.py::test_signature",
"numpydoc/tests/test_docscrape.py::test_summary",
"numpydoc/tests/test_docscrape.py::test_extended_summary",
"numpydoc/tests/test_docscrape.py::test_parameters",
"numpydoc/tests/test_docscrape.py::test_other_parameters",
"numpydoc/tests/test_docscrape.py::test_returns",
"numpydoc/tests/test_docscrape.py::test_yields",
"numpydoc/tests/test_docscrape.py::test_returnyield",
"numpydoc/tests/test_docscrape.py::test_section_twice",
"numpydoc/tests/test_docscrape.py::test_notes",
"numpydoc/tests/test_docscrape.py::test_references",
"numpydoc/tests/test_docscrape.py::test_examples",
"numpydoc/tests/test_docscrape.py::test_index",
"numpydoc/tests/test_docscrape.py::test_yield_str",
"numpydoc/tests/test_docscrape.py::test_receives_str",
"numpydoc/tests/test_docscrape.py::test_no_index_in_str",
"numpydoc/tests/test_docscrape.py::test_sphinx_yields_str",
"numpydoc/tests/test_docscrape.py::test_parameters_without_extended_description",
"numpydoc/tests/test_docscrape.py::test_escape_stars",
"numpydoc/tests/test_docscrape.py::test_empty_extended_summary",
"numpydoc/tests/test_docscrape.py::test_raises",
"numpydoc/tests/test_docscrape.py::test_warns",
"numpydoc/tests/test_docscrape.py::test_see_also_parse_error",
"numpydoc/tests/test_docscrape.py::test_see_also_print",
"numpydoc/tests/test_docscrape.py::test_unknown_section",
"numpydoc/tests/test_docscrape.py::test_empty_first_line",
"numpydoc/tests/test_docscrape.py::test_no_summary",
"numpydoc/tests/test_docscrape.py::test_unicode",
"numpydoc/tests/test_docscrape.py::test_plot_examples",
"numpydoc/tests/test_docscrape.py::test_use_blockquotes",
"numpydoc/tests/test_docscrape.py::test_class_members",
"numpydoc/tests/test_docscrape.py::test_duplicate_signature",
"numpydoc/tests/test_docscrape.py::test_class_members_doc",
"numpydoc/tests/test_docscrape.py::test_class_members_doc_sphinx",
"numpydoc/tests/test_docscrape.py::test_templated_sections",
"numpydoc/tests/test_docscrape.py::test_nonstandard_property",
"numpydoc/tests/test_docscrape.py::test_args_and_kwargs",
"numpydoc/tests/test_docscrape.py::test_autoclass"
]
| []
| BSD License | 2,400 | [
"numpydoc/docscrape.py",
"numpydoc/docscrape_sphinx.py",
"doc/format.rst",
"numpydoc/numpydoc.py",
"numpydoc/templates/numpydoc_docstring.rst"
]
| [
"numpydoc/docscrape.py",
"numpydoc/docscrape_sphinx.py",
"doc/format.rst",
"numpydoc/numpydoc.py",
"numpydoc/templates/numpydoc_docstring.rst"
]
|
Stewori__pytypes-38 | 152a2218bfa1b96ae5d866542ee4ad148b1b7b5d | 2018-04-13 00:46:02 | 152a2218bfa1b96ae5d866542ee4ad148b1b7b5d | diff --git a/pytypes/type_util.py b/pytypes/type_util.py
index f6227c8..976f96f 100644
--- a/pytypes/type_util.py
+++ b/pytypes/type_util.py
@@ -22,10 +22,10 @@ import typing
import collections
from inspect import isfunction, ismethod, isclass, ismodule
try:
- from backports.typing import Tuple, Dict, List, Set, Union, Any, \
+ from backports.typing import Tuple, Dict, List, Set, FrozenSet, Union, Any, \
Sequence, Mapping, TypeVar, Container, Generic, Sized, Iterable
except ImportError:
- from typing import Tuple, Dict, List, Set, Union, Any, \
+ from typing import Tuple, Dict, List, Set, FrozenSet, Union, Any, \
Sequence, Mapping, TypeVar, Container, Generic, Sized, Iterable
try:
# Python 3.7
@@ -465,9 +465,13 @@ def _deep_type(obj, checked, checked_len, depth = None, max_sample = None):
tpl1 = tuple(_deep_type(t, checked, checked_len2, depth-1) for t in ksmpl)
tpl2 = tuple(_deep_type(t, checked, checked_len2, depth-1) for t in vsmpl)
res = Dict[Union[tpl1], Union[tpl2]]
- elif res == set:
+ elif res == set or res == frozenset:
+ if res == set:
+ typ = Set
+ else:
+ typ = FrozenSet
if len(obj) == 0:
- return Empty[Set]
+ return Empty[typ]
if max_sample == -1 or max_sample >= len(obj)-1 or len(obj) <= 2:
tpl = tuple(_deep_type(t, checked, depth-1) for t in obj)
else:
@@ -485,7 +489,7 @@ def _deep_type(obj, checked, checked_len, depth = None, max_sample = None):
j -= 1
smpl.append(next(itr))
tpl = tuple(_deep_type(t, checked, depth-1) for t in smpl)
- res = Set[Union[tpl]]
+ res = typ[Union[tpl]]
elif res == types.GeneratorType:
res = get_generator_type(obj)
elif sys.version_info.major == 2 and isinstance(obj, types.InstanceType):
| Frozenset seems to not be equal to set?
```python
>>> import typing
>>> from pytypes import type_util
>>> type_util._isinstance(frozenset({1, 2, 'a', None, 'b'}), typing.AbstractSet[typing.Union[str, int, None]])
False
>>> type_util._isinstance({1, 2, 'a', None, 'b'}, typing.AbstractSet[typing.Union[str, int, type(None)]])
True
>>> issubclass(frozenset, typing.AbstractSet)
True
``` | Stewori/pytypes | diff --git a/tests/test_typechecker.py b/tests/test_typechecker.py
index ddd2745..9106d6b 100644
--- a/tests/test_typechecker.py
+++ b/tests/test_typechecker.py
@@ -4664,6 +4664,10 @@ class Test_utils(unittest.TestCase):
# No exception.
resolve_fw_decl(Foo)
+ # See: https://github.com/Stewori/pytypes/issues/35
+ def test_frozenset(self):
+ self.assertTrue(pytypes.is_of_type(frozenset({1, 2, 'a', None, 'b'}), typing.AbstractSet[typing.Union[str, int, None]]))
+
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.03 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
-e git+https://github.com/Stewori/pytypes.git@152a2218bfa1b96ae5d866542ee4ad148b1b7b5d#egg=pytypes
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: pytypes
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/pytypes
| [
"tests/test_typechecker.py::Test_utils::test_frozenset"
]
| [
"tests/test_typechecker.py::testfunc",
"tests/test_typechecker.py::testfunc_err",
"tests/test_typechecker.py::testfunc2",
"tests/test_typechecker.py::testfunc4",
"tests/test_typechecker.py::testfunc_None_ret",
"tests/test_typechecker.py::testfunc_None_ret_err",
"tests/test_typechecker.py::testfunc_None_arg",
"tests/test_typechecker.py::testfunc_Dict_arg",
"tests/test_typechecker.py::testfunc_Mapping_arg",
"tests/test_typechecker.py::testfunc_Dict_ret",
"tests/test_typechecker.py::testfunc_Dict_ret_err",
"tests/test_typechecker.py::testfunc_Seq_arg",
"tests/test_typechecker.py::testfunc_Seq_ret_List",
"tests/test_typechecker.py::testfunc_Seq_ret_Tuple",
"tests/test_typechecker.py::testfunc_Seq_ret_err",
"tests/test_typechecker.py::testfunc_Iter_arg",
"tests/test_typechecker.py::testfunc_Iter_str_arg",
"tests/test_typechecker.py::testfunc_Iter_ret_err",
"tests/test_typechecker.py::testfunc_Callable_arg",
"tests/test_typechecker.py::testfunc_Callable_call_err",
"tests/test_typechecker.py::testfunc_Callable_ret",
"tests/test_typechecker.py::testfunc_Callable_ret_err",
"tests/test_typechecker.py::testfunc_Generator_arg",
"tests/test_typechecker.py::testfunc_Generator_ret",
"tests/test_typechecker.py::testfunc_Generic_arg",
"tests/test_typechecker.py::testfunc_Generic_ret",
"tests/test_typechecker.py::testfunc_Generic_ret_err",
"tests/test_typechecker.py::testfunc_numeric_tower_float",
"tests/test_typechecker.py::testfunc_numeric_tower_complex",
"tests/test_typechecker.py::testfunc_numeric_tower_tuple",
"tests/test_typechecker.py::testfunc_numeric_tower_return",
"tests/test_typechecker.py::testfunc_numeric_tower_return_err",
"tests/test_typechecker.py::testfunc_custom_annotations_typechecked",
"tests/test_typechecker.py::testfunc_custom_annotations_typechecked_err",
"tests/test_typechecker.py::testfunc_varargs2",
"tests/test_typechecker.py::testfunc_varargs3",
"tests/test_typechecker.py::testfunc_varargs5",
"tests/test_typechecker.py::testfunc_varargs_err",
"tests/test_typechecker.py::testfunc_varargs_ca3"
]
| [
"tests/test_typechecker.py::testfunc_Iter_ret",
"tests/test_typechecker.py::testfunc_Generator",
"tests/test_typechecker.py::testfunc_varargs1",
"tests/test_typechecker.py::testfunc_varargs4",
"tests/test_typechecker.py::testClass2_defTimeCheck",
"tests/test_typechecker.py::testClass2_defTimeCheck2",
"tests/test_typechecker.py::testClass2_defTimeCheck3",
"tests/test_typechecker.py::testClass2_defTimeCheck4",
"tests/test_typechecker.py::testClass3_defTimeCheck",
"tests/test_typechecker.py::testClass2_defTimeCheck_init_ov",
"tests/test_typechecker.py::testfunc_check_argument_types_empty",
"tests/test_typechecker.py::testfunc_varargs_ca1",
"tests/test_typechecker.py::testfunc_varargs_ca4",
"tests/test_typechecker.py::TestTypecheck::test_abstract_override",
"tests/test_typechecker.py::TestTypecheck::test_annotations_from_typestring",
"tests/test_typechecker.py::TestTypecheck::test_callable",
"tests/test_typechecker.py::TestTypecheck::test_classmethod",
"tests/test_typechecker.py::TestTypecheck::test_custom_annotations",
"tests/test_typechecker.py::TestTypecheck::test_custom_generic",
"tests/test_typechecker.py::TestTypecheck::test_defaults_inferred_types",
"tests/test_typechecker.py::TestTypecheck::test_dict",
"tests/test_typechecker.py::TestTypecheck::test_empty",
"tests/test_typechecker.py::TestTypecheck::test_function",
"tests/test_typechecker.py::TestTypecheck::test_generator",
"tests/test_typechecker.py::TestTypecheck::test_get_generic_parameters",
"tests/test_typechecker.py::TestTypecheck::test_get_types",
"tests/test_typechecker.py::TestTypecheck::test_iterable",
"tests/test_typechecker.py::TestTypecheck::test_method",
"tests/test_typechecker.py::TestTypecheck::test_method_forward",
"tests/test_typechecker.py::TestTypecheck::test_numeric_tower",
"tests/test_typechecker.py::TestTypecheck::test_parent_typecheck_no_override",
"tests/test_typechecker.py::TestTypecheck::test_parent_typecheck_other_signature",
"tests/test_typechecker.py::TestTypecheck::test_property",
"tests/test_typechecker.py::TestTypecheck::test_sequence",
"tests/test_typechecker.py::TestTypecheck::test_staticmethod",
"tests/test_typechecker.py::TestTypecheck::test_subtype_class_extends_generic",
"tests/test_typechecker.py::TestTypecheck::test_typecheck_parent_type",
"tests/test_typechecker.py::TestTypecheck::test_typestring_varargs_syntax",
"tests/test_typechecker.py::TestTypecheck::test_typevar_class",
"tests/test_typechecker.py::TestTypecheck::test_typevar_func",
"tests/test_typechecker.py::TestTypecheck::test_unparameterized",
"tests/test_typechecker.py::TestTypecheck::test_varargs",
"tests/test_typechecker.py::TestTypecheck::test_varargs_check_argument_types",
"tests/test_typechecker.py::TestTypecheck::test_various",
"tests/test_typechecker.py::TestTypecheck_class::test_classmethod",
"tests/test_typechecker.py::TestTypecheck_class::test_method",
"tests/test_typechecker.py::TestTypecheck_class::test_staticmethod",
"tests/test_typechecker.py::TestTypecheck_module::test_function_py2",
"tests/test_typechecker.py::TestTypecheck_module::test_function_py3",
"tests/test_typechecker.py::Test_check_argument_types::test_function",
"tests/test_typechecker.py::Test_check_argument_types::test_inner_class",
"tests/test_typechecker.py::Test_check_argument_types::test_inner_method",
"tests/test_typechecker.py::Test_check_argument_types::test_methods",
"tests/test_typechecker.py::TestOverride::test_auto_override",
"tests/test_typechecker.py::TestOverride::test_override",
"tests/test_typechecker.py::TestOverride::test_override_at_definition_time",
"tests/test_typechecker.py::TestOverride::test_override_at_definition_time_with_forward_decl",
"tests/test_typechecker.py::TestOverride::test_override_diamond",
"tests/test_typechecker.py::TestOverride::test_override_typecheck",
"tests/test_typechecker.py::TestOverride::test_override_typecheck_class",
"tests/test_typechecker.py::TestOverride::test_override_vararg",
"tests/test_typechecker.py::TestStubfile::test_annotations_from_stubfile_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_annotations_from_stubfile_plain_3_5_stub",
"tests/test_typechecker.py::TestStubfile::test_callable_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_custom_generic_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_defaults_inferred_types_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_defaults_inferred_types_plain_3_5_stub",
"tests/test_typechecker.py::TestStubfile::test_dict_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_generator_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_iterable_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_override_diamond_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_override_diamond_plain_3_5_stub",
"tests/test_typechecker.py::TestStubfile::test_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_plain_3_5_stub",
"tests/test_typechecker.py::TestStubfile::test_property_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_property_plain_3_5_stub",
"tests/test_typechecker.py::TestStubfile::test_sequence_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_typecheck_parent_type_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_typecheck_parent_type_plain_3_5_stub",
"tests/test_typechecker.py::TestStubfile::test_varargs_check_argument_types_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_varargs_check_argument_types_plain_3_5_stub",
"tests/test_typechecker.py::TestStubfile::test_varargs_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_varargs_plain_3_5_stub",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_abstract_override_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_callable_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_classmethod_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_custom_generic_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_defaults_inferred_types",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_dict_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_function_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_generator_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_get_types_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_iterable_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_method_forward_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_method_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_parent_typecheck_no_override_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_parent_typecheck_other_signature_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_property",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_sequence_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_staticmethod_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_typecheck_parent_type",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_typevar_func",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_varargs",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_varargs_check_argument_types",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_various_py3",
"tests/test_typechecker.py::TestOverride_Python3_5::test_auto_override",
"tests/test_typechecker.py::TestOverride_Python3_5::test_override_at_definition_time",
"tests/test_typechecker.py::TestOverride_Python3_5::test_override_at_definition_time_with_forward_decl",
"tests/test_typechecker.py::TestOverride_Python3_5::test_override_diamond",
"tests/test_typechecker.py::TestOverride_Python3_5::test_override_py3",
"tests/test_typechecker.py::TestOverride_Python3_5::test_override_typecheck",
"tests/test_typechecker.py::TestOverride_Python3_5::test_override_vararg",
"tests/test_typechecker.py::Test_check_argument_types_Python3_5::test_function",
"tests/test_typechecker.py::Test_check_argument_types_Python3_5::test_inner_class",
"tests/test_typechecker.py::Test_check_argument_types_Python3_5::test_inner_method",
"tests/test_typechecker.py::Test_check_argument_types_Python3_5::test_methods",
"tests/test_typechecker.py::Test_utils::test_resolve_fw_decl"
]
| []
| Apache License 2.0 | 2,401 | [
"pytypes/type_util.py"
]
| [
"pytypes/type_util.py"
]
|
|
python-pillow__Pillow-3095 | b01ba0f50cf48a2abef9cef485e6e3a9dd246b34 | 2018-04-13 07:25:27 | cba0004cdc2af5939dec0c9319272d6cdd440bce | diff --git a/docs/installation.rst b/docs/installation.rst
index 35b5edf19..0371e517b 100644
--- a/docs/installation.rst
+++ b/docs/installation.rst
@@ -175,7 +175,7 @@ Many of Pillow's features require external libraries:
* setting text direction or font features is not supported without
libraqm.
* libraqm is dynamically loaded in Pillow 5.0.0 and above, so support
- is available if all the libraries are installed.
+ is available if all the libraries are installed.
* Windows support: Raqm support is currently unsupported on Windows.
Once you have installed the prerequisites, run::
diff --git a/docs/reference/ImageDraw.rst b/docs/reference/ImageDraw.rst
index de26a4d85..6b686568d 100644
--- a/docs/reference/ImageDraw.rst
+++ b/docs/reference/ImageDraw.rst
@@ -25,7 +25,6 @@ Example: Draw a gray cross over an image
draw = ImageDraw.Draw(im)
draw.line((0, 0) + im.size, fill=128)
draw.line((0, im.size[1], im.size[0], 0), fill=128)
- del draw
# write to stdout
im.save(sys.stdout, "PNG")
diff --git a/docs/reference/ImageFilter.rst b/docs/reference/ImageFilter.rst
index 3368f799f..5275329ab 100644
--- a/docs/reference/ImageFilter.rst
+++ b/docs/reference/ImageFilter.rst
@@ -37,9 +37,8 @@ image enhancement filters:
* **SMOOTH**
* **SMOOTH_MORE**
-.. autoclass:: PIL.ImageFilter.Color3DLUT
-.. autoclass:: PIL.ImageFilter.BoxBlur
.. autoclass:: PIL.ImageFilter.GaussianBlur
+.. autoclass:: PIL.ImageFilter.BoxBlur
.. autoclass:: PIL.ImageFilter.UnsharpMask
.. autoclass:: PIL.ImageFilter.Kernel
.. autoclass:: PIL.ImageFilter.RankFilter
diff --git a/src/PIL/ImageFilter.py b/src/PIL/ImageFilter.py
index ff9348b21..93cd7ad47 100644
--- a/src/PIL/ImageFilter.py
+++ b/src/PIL/ImageFilter.py
@@ -15,8 +15,6 @@
# See the README file for information on usage and redistribution.
#
-from __future__ import division
-
import functools
@@ -325,18 +323,12 @@ class Color3DLUT(MultibandFilter):
"""
name = "Color 3D LUT"
- def __init__(self, size, table, channels=3, target_mode=None, **kwargs):
- if channels not in (3, 4):
- raise ValueError("Only 3 or 4 output channels are supported")
+ def __init__(self, size, table, channels=3, target_mode=None):
self.size = size = self._check_size(size)
self.channels = channels
self.mode = target_mode
- # Hidden flag `_copy_table=False` could be used to avoid extra copying
- # of the table if the table is specially made for the constructor.
- if kwargs.get('_copy_table', True):
- table = list(table)
-
+ table = list(table)
# Convert to a flat list
if table and isinstance(table[0], (list, tuple)):
table, raw_table = [], table
@@ -379,82 +371,20 @@ class Color3DLUT(MultibandFilter):
three color channels. Will be called ``size**3``
times with values from 0.0 to 1.0 and should return
a tuple with ``channels`` elements.
- :param channels: The number of channels which should return callback.
- :param target_mode: Passed to the constructor of the resulting
- lookup table.
+ :param channels: Passed to the constructor.
+ :param target_mode: Passed to the constructor.
"""
size1D, size2D, size3D = cls._check_size(size)
- if channels not in (3, 4):
- raise ValueError("Only 3 or 4 output channels are supported")
-
- table = [0] * (size1D * size2D * size3D * channels)
- idx_out = 0
- for b in range(size3D):
- for g in range(size2D):
- for r in range(size1D):
- table[idx_out:idx_out + channels] = callback(
- r / (size1D-1), g / (size2D-1), b / (size3D-1))
- idx_out += channels
-
- return cls((size1D, size2D, size3D), table, channels=channels,
- target_mode=target_mode, _copy_table=False)
-
- def transform(self, callback, with_normals=False, channels=None,
- target_mode=None):
- """Transforms the table values using provided callback and returns
- a new LUT with altered values.
-
- :param callback: A function which takes old lookup table values
- and returns a new set of values. The number
- of arguments which function should take is
- ``self.channels`` or ``3 + self.channels``
- if ``with_normals`` flag is set.
- Should return a tuple of ``self.channels`` or
- ``channels`` elements if it is set.
- :param with_normals: If true, ``callback`` will be called with
- coordinates in the color cube as the first
- three arguments. Otherwise, ``callback``
- will be called only with actual color values.
- :param channels: The number of channels in the resulting lookup table.
- :param target_mode: Passed to the constructor of the resulting
- lookup table.
- """
- if channels not in (None, 3, 4):
- raise ValueError("Only 3 or 4 output channels are supported")
- ch_in = self.channels
- ch_out = channels or ch_in
- size1D, size2D, size3D = self.size
-
- table = [0] * (size1D * size2D * size3D * ch_out)
- idx_in = 0
- idx_out = 0
+ table = []
for b in range(size3D):
for g in range(size2D):
for r in range(size1D):
- values = self.table[idx_in:idx_in + ch_in]
- if with_normals:
- values = callback(r / (size1D-1), g / (size2D-1),
- b / (size3D-1), *values)
- else:
- values = callback(*values)
- table[idx_out:idx_out + ch_out] = values
- idx_in += ch_in
- idx_out += ch_out
-
- return type(self)(self.size, table, channels=ch_out,
- target_mode=target_mode or self.mode,
- _copy_table=False)
-
- def __repr__(self):
- r = [
- "{} from {}".format(self.__class__.__name__,
- self.table.__class__.__name__),
- "size={:d}x{:d}x{:d}".format(*self.size),
- "channels={:d}".format(self.channels),
- ]
- if self.mode:
- r.append("target_mode={}".format(self.mode))
- return "<{}>".format(" ".join(r))
+ table.append(callback(
+ r / float(size1D-1),
+ g / float(size2D-1),
+ b / float(size3D-1)))
+
+ return cls((size1D, size2D, size3D), table, channels, target_mode)
def filter(self, image):
from . import Image
diff --git a/src/_imaging.c b/src/_imaging.c
index 808917ad1..544e54d87 100644
--- a/src/_imaging.c
+++ b/src/_imaging.c
@@ -379,12 +379,12 @@ getlist(PyObject* arg, Py_ssize_t* length, const char* wrong_length, int type)
PyObject* seq;
PyObject* op;
- if ( ! PySequence_Check(arg)) {
+ if (!PySequence_Check(arg)) {
PyErr_SetString(PyExc_TypeError, must_be_sequence);
return NULL;
}
- n = PySequence_Size(arg);
+ n = PyObject_Length(arg);
if (length && wrong_length && n != *length) {
PyErr_SetString(PyExc_ValueError, wrong_length);
return NULL;
@@ -393,12 +393,13 @@ getlist(PyObject* arg, Py_ssize_t* length, const char* wrong_length, int type)
/* malloc check ok, type & ff is just a sizeof(something)
calloc checks for overflow */
list = calloc(n, type & 0xff);
- if ( ! list)
+ if (!list)
return PyErr_NoMemory();
seq = PySequence_Fast(arg, must_be_sequence);
- if ( ! seq) {
+ if (!seq) {
free(list);
+ PyErr_SetString(PyExc_TypeError, must_be_sequence);
return NULL;
}
@@ -426,16 +427,12 @@ getlist(PyObject* arg, Py_ssize_t* length, const char* wrong_length, int type)
}
}
- Py_DECREF(seq);
-
- if (PyErr_Occurred()) {
- free(list);
- return NULL;
- }
-
if (length)
*length = n;
+ PyErr_Clear();
+ Py_DECREF(seq);
+
return list;
}
| ImageDraw examples - is "del" really necessary?
Hi! In our project, we have a "canvas" system, based off `PIL.Image` and `PIL.ImageDraw`. [The code](https://github.com/ZeroPhone/ZPUI/blob/devel/ui/canvas.py) is simple - create an `Image`, create an `ImageDraw` object, allow the user to do stuff with it, then allow the user to get the resulting image.
So, a question about `ImageDraw` - [in the first example](http://pillow.readthedocs.io/en/5.0.0/reference/ImageDraw.html#example-draw-a-gray-cross-over-an-image), `del` is used - what's the reasoning behind it? Is the `draw` object not collected by GC eventually (IIRC Python can deal with cyclic references quite well)? | python-pillow/Pillow | diff --git a/Tests/test_color_lut.py b/Tests/test_color_lut.py
index 8bf33be92..f9d35c83c 100644
--- a/Tests/test_color_lut.py
+++ b/Tests/test_color_lut.py
@@ -1,13 +1,11 @@
from __future__ import division
-from array import array
-
from PIL import Image, ImageFilter
from helper import unittest, PillowTestCase
class TestColorLut3DCoreAPI(PillowTestCase):
- def generate_identity_table(self, channels, size):
+ def generate_unit_table(self, channels, size):
if isinstance(size, tuple):
size1D, size2D, size3D = size
else:
@@ -34,31 +32,31 @@ class TestColorLut3DCoreAPI(PillowTestCase):
with self.assertRaisesRegexp(ValueError, "filter"):
im.im.color_lut_3d('RGB', Image.CUBIC,
- *self.generate_identity_table(3, 3))
+ *self.generate_unit_table(3, 3))
with self.assertRaisesRegexp(ValueError, "image mode"):
im.im.color_lut_3d('wrong', Image.LINEAR,
- *self.generate_identity_table(3, 3))
+ *self.generate_unit_table(3, 3))
with self.assertRaisesRegexp(ValueError, "table_channels"):
im.im.color_lut_3d('RGB', Image.LINEAR,
- *self.generate_identity_table(5, 3))
+ *self.generate_unit_table(5, 3))
with self.assertRaisesRegexp(ValueError, "table_channels"):
im.im.color_lut_3d('RGB', Image.LINEAR,
- *self.generate_identity_table(1, 3))
+ *self.generate_unit_table(1, 3))
with self.assertRaisesRegexp(ValueError, "table_channels"):
im.im.color_lut_3d('RGB', Image.LINEAR,
- *self.generate_identity_table(2, 3))
+ *self.generate_unit_table(2, 3))
with self.assertRaisesRegexp(ValueError, "Table size"):
im.im.color_lut_3d('RGB', Image.LINEAR,
- *self.generate_identity_table(3, (1, 3, 3)))
+ *self.generate_unit_table(3, (1, 3, 3)))
with self.assertRaisesRegexp(ValueError, "Table size"):
im.im.color_lut_3d('RGB', Image.LINEAR,
- *self.generate_identity_table(3, (66, 3, 3)))
+ *self.generate_unit_table(3, (66, 3, 3)))
with self.assertRaisesRegexp(ValueError, r"size1D \* size2D \* size3D"):
im.im.color_lut_3d('RGB', Image.LINEAR,
@@ -68,79 +66,71 @@ class TestColorLut3DCoreAPI(PillowTestCase):
im.im.color_lut_3d('RGB', Image.LINEAR,
3, 2, 2, 2, [0, 0, 0] * 9)
- with self.assertRaises(TypeError):
- im.im.color_lut_3d('RGB', Image.LINEAR,
- 3, 2, 2, 2, [0, 0, "0"] * 8)
-
- with self.assertRaises(TypeError):
- im.im.color_lut_3d('RGB', Image.LINEAR,
- 3, 2, 2, 2, 16)
-
def test_correct_args(self):
im = Image.new('RGB', (10, 10), 0)
im.im.color_lut_3d('RGB', Image.LINEAR,
- *self.generate_identity_table(3, 3))
+ *self.generate_unit_table(3, 3))
im.im.color_lut_3d('CMYK', Image.LINEAR,
- *self.generate_identity_table(4, 3))
+ *self.generate_unit_table(4, 3))
im.im.color_lut_3d('RGB', Image.LINEAR,
- *self.generate_identity_table(3, (2, 3, 3)))
+ *self.generate_unit_table(3, (2, 3, 3)))
im.im.color_lut_3d('RGB', Image.LINEAR,
- *self.generate_identity_table(3, (65, 3, 3)))
+ *self.generate_unit_table(3, (65, 3, 3)))
im.im.color_lut_3d('RGB', Image.LINEAR,
- *self.generate_identity_table(3, (3, 65, 3)))
+ *self.generate_unit_table(3, (3, 65, 3)))
im.im.color_lut_3d('RGB', Image.LINEAR,
- *self.generate_identity_table(3, (3, 3, 65)))
+ *self.generate_unit_table(3, (3, 3, 65)))
def test_wrong_mode(self):
with self.assertRaisesRegexp(ValueError, "wrong mode"):
im = Image.new('L', (10, 10), 0)
im.im.color_lut_3d('RGB', Image.LINEAR,
- *self.generate_identity_table(3, 3))
+ *self.generate_unit_table(3, 3))
with self.assertRaisesRegexp(ValueError, "wrong mode"):
im = Image.new('RGB', (10, 10), 0)
im.im.color_lut_3d('L', Image.LINEAR,
- *self.generate_identity_table(3, 3))
+ *self.generate_unit_table(3, 3))
with self.assertRaisesRegexp(ValueError, "wrong mode"):
im = Image.new('L', (10, 10), 0)
im.im.color_lut_3d('L', Image.LINEAR,
- *self.generate_identity_table(3, 3))
+ *self.generate_unit_table(3, 3))
with self.assertRaisesRegexp(ValueError, "wrong mode"):
im = Image.new('RGB', (10, 10), 0)
im.im.color_lut_3d('RGBA', Image.LINEAR,
- *self.generate_identity_table(3, 3))
+ *self.generate_unit_table(3, 3))
with self.assertRaisesRegexp(ValueError, "wrong mode"):
im = Image.new('RGB', (10, 10), 0)
im.im.color_lut_3d('RGB', Image.LINEAR,
- *self.generate_identity_table(4, 3))
+ *self.generate_unit_table(4, 3))
def test_correct_mode(self):
im = Image.new('RGBA', (10, 10), 0)
im.im.color_lut_3d('RGBA', Image.LINEAR,
- *self.generate_identity_table(3, 3))
+ *self.generate_unit_table(3, 3))
im = Image.new('RGBA', (10, 10), 0)
im.im.color_lut_3d('RGBA', Image.LINEAR,
- *self.generate_identity_table(4, 3))
+ *self.generate_unit_table(4, 3))
im = Image.new('RGB', (10, 10), 0)
im.im.color_lut_3d('HSV', Image.LINEAR,
- *self.generate_identity_table(3, 3))
+ *self.generate_unit_table(3, 3))
im = Image.new('RGB', (10, 10), 0)
im.im.color_lut_3d('RGBA', Image.LINEAR,
- *self.generate_identity_table(4, 3))
+ *self.generate_unit_table(4, 3))
- def test_identities(self):
+ def test_units(self):
g = Image.linear_gradient('L')
im = Image.merge('RGB', [g, g.transpose(Image.ROTATE_90),
g.transpose(Image.ROTATE_180)])
@@ -149,14 +139,14 @@ class TestColorLut3DCoreAPI(PillowTestCase):
for size in [2, 3, 5, 7, 11, 16, 17]:
self.assert_image_equal(im, im._new(
im.im.color_lut_3d('RGB', Image.LINEAR,
- *self.generate_identity_table(3, size))))
+ *self.generate_unit_table(3, size))))
# Not so fast
self.assert_image_equal(im, im._new(
im.im.color_lut_3d('RGB', Image.LINEAR,
- *self.generate_identity_table(3, (2, 2, 65)))))
+ *self.generate_unit_table(3, (2, 2, 65)))))
- def test_identities_4_channels(self):
+ def test_units_4channels(self):
g = Image.linear_gradient('L')
im = Image.merge('RGB', [g, g.transpose(Image.ROTATE_90),
g.transpose(Image.ROTATE_180)])
@@ -165,7 +155,7 @@ class TestColorLut3DCoreAPI(PillowTestCase):
self.assert_image_equal(
Image.merge('RGBA', (im.split()*2)[:4]),
im._new(im.im.color_lut_3d('RGBA', Image.LINEAR,
- *self.generate_identity_table(4, 17))))
+ *self.generate_unit_table(4, 17))))
def test_copy_alpha_channel(self):
g = Image.linear_gradient('L')
@@ -175,7 +165,7 @@ class TestColorLut3DCoreAPI(PillowTestCase):
self.assert_image_equal(im, im._new(
im.im.color_lut_3d('RGBA', Image.LINEAR,
- *self.generate_identity_table(3, 17))))
+ *self.generate_unit_table(3, 17))))
def test_channels_order(self):
g = Image.linear_gradient('L')
@@ -262,9 +252,6 @@ class TestColorLut3DFilter(PillowTestCase):
with self.assertRaisesRegexp(ValueError, "should have a length of 3"):
ImageFilter.Color3DLUT((2, 2, 2), [[1, 1]] * 8)
- with self.assertRaisesRegexp(ValueError, "Only 3 or 4 output"):
- ImageFilter.Color3DLUT((2, 2, 2), [[1, 1]] * 8, channels=2)
-
def test_convert_table(self):
lut = ImageFilter.Color3DLUT(2, [0, 1, 2] * 8)
self.assertEqual(tuple(lut.size), (2, 2, 2))
@@ -277,33 +264,9 @@ class TestColorLut3DFilter(PillowTestCase):
self.assertEqual(lut.table, list(range(24)))
lut = ImageFilter.Color3DLUT((2, 2, 2), [(0, 1, 2, 3)] * 8,
- channels=4)
-
- def test_repr(self):
- lut = ImageFilter.Color3DLUT(2, [0, 1, 2] * 8)
- self.assertEqual(repr(lut),
- "<Color3DLUT from list size=2x2x2 channels=3>")
-
- lut = ImageFilter.Color3DLUT(
- (3, 4, 5), array('f', [0, 0, 0, 0] * (3 * 4 * 5)),
- channels=4, target_mode='YCbCr', _copy_table=False)
- self.assertEqual(repr(lut),
- "<Color3DLUT from array size=3x4x5 channels=4 target_mode=YCbCr>")
-
-class TestGenerateColorLut3D(PillowTestCase):
- def test_wrong_channels_count(self):
- with self.assertRaisesRegexp(ValueError, "3 or 4 output channels"):
- ImageFilter.Color3DLUT.generate(5, channels=2,
- callback=lambda r, g, b: (r, g, b))
-
- with self.assertRaisesRegexp(ValueError, "should have either channels"):
- ImageFilter.Color3DLUT.generate(5, lambda r, g, b: (r, g, b, r))
-
- with self.assertRaisesRegexp(ValueError, "should have either channels"):
- ImageFilter.Color3DLUT.generate(5, channels=4,
- callback=lambda r, g, b: (r, g, b))
+ channels=4)
- def test_3_channels(self):
+ def test_generate(self):
lut = ImageFilter.Color3DLUT.generate(5, lambda r, g, b: (r, g, b))
self.assertEqual(tuple(lut.size), (5, 5, 5))
self.assertEqual(lut.name, "Color 3D LUT")
@@ -311,7 +274,11 @@ class TestGenerateColorLut3D(PillowTestCase):
0.0, 0.0, 0.0, 0.25, 0.0, 0.0, 0.5, 0.0, 0.0, 0.75, 0.0, 0.0,
1.0, 0.0, 0.0, 0.0, 0.25, 0.0, 0.25, 0.25, 0.0, 0.5, 0.25, 0.0])
- def test_4_channels(self):
+ g = Image.linear_gradient('L')
+ im = Image.merge('RGB', [g, g.transpose(Image.ROTATE_90),
+ g.transpose(Image.ROTATE_180)])
+ self.assertEqual(im, im.filter(lut))
+
lut = ImageFilter.Color3DLUT.generate(5, channels=4,
callback=lambda r, g, b: (b, r, g, (r+g+b) / 2))
self.assertEqual(tuple(lut.size), (5, 5, 5))
@@ -320,110 +287,12 @@ class TestGenerateColorLut3D(PillowTestCase):
0.0, 0.0, 0.0, 0.0, 0.0, 0.25, 0.0, 0.125, 0.0, 0.5, 0.0, 0.25,
0.0, 0.75, 0.0, 0.375, 0.0, 1.0, 0.0, 0.5, 0.0, 0.0, 0.25, 0.125])
- def test_apply(self):
- lut = ImageFilter.Color3DLUT.generate(5, lambda r, g, b: (r, g, b))
-
- g = Image.linear_gradient('L')
- im = Image.merge('RGB', [g, g.transpose(Image.ROTATE_90),
- g.transpose(Image.ROTATE_180)])
- self.assertEqual(im, im.filter(lut))
-
+ with self.assertRaisesRegexp(ValueError, "should have a length of 3"):
+ ImageFilter.Color3DLUT.generate(5, lambda r, g, b: (r, g, b, r))
-class TestTransformColorLut3D(PillowTestCase):
- def test_wrong_args(self):
- source = ImageFilter.Color3DLUT.generate(5, lambda r, g, b: (r, g, b))
-
- with self.assertRaisesRegexp(ValueError, "Only 3 or 4 output"):
- source.transform(lambda r, g, b: (r, g, b), channels=8)
-
- with self.assertRaisesRegexp(ValueError, "should have either channels"):
- source.transform(lambda r, g, b: (r, g, b), channels=4)
-
- with self.assertRaisesRegexp(ValueError, "should have either channels"):
- source.transform(lambda r, g, b: (r, g, b, 1))
-
- with self.assertRaises(TypeError):
- source.transform(lambda r, g, b, a: (r, g, b))
-
- def test_target_mode(self):
- source = ImageFilter.Color3DLUT.generate(
- 2, lambda r, g, b: (r, g, b), target_mode='HSV')
-
- lut = source.transform(lambda r, g, b: (r, g, b))
- self.assertEqual(lut.mode, 'HSV')
-
- lut = source.transform(lambda r, g, b: (r, g, b), target_mode='RGB')
- self.assertEqual(lut.mode, 'RGB')
-
- def test_3_to_3_channels(self):
- source = ImageFilter.Color3DLUT.generate(
- (3, 4, 5), lambda r, g, b: (r, g, b))
- lut = source.transform(lambda r, g, b: (r*r, g*g, b*b))
- self.assertEqual(tuple(lut.size), tuple(source.size))
- self.assertEqual(len(lut.table), len(source.table))
- self.assertNotEqual(lut.table, source.table)
- self.assertEqual(lut.table[0:10], [
- 0.0, 0.0, 0.0, 0.25, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
-
- def test_3_to_4_channels(self):
- source = ImageFilter.Color3DLUT.generate(
- (6, 5, 4), lambda r, g, b: (r, g, b))
- lut = source.transform(lambda r, g, b: (r*r, g*g, b*b, 1), channels=4)
- self.assertEqual(tuple(lut.size), tuple(source.size))
- self.assertNotEqual(len(lut.table), len(source.table))
- self.assertNotEqual(lut.table, source.table)
- self.assertEqual(lut.table[0:16], [
- 0.0, 0.0, 0.0, 1, 0.2**2, 0.0, 0.0, 1,
- 0.4**2, 0.0, 0.0, 1, 0.6**2, 0.0, 0.0, 1])
-
- def test_4_to_3_channels(self):
- source = ImageFilter.Color3DLUT.generate(
- (3, 6, 5), lambda r, g, b: (r, g, b, 1), channels=4)
- lut = source.transform(lambda r, g, b, a: (a - r*r, a - g*g, a - b*b),
- channels=3)
- self.assertEqual(tuple(lut.size), tuple(source.size))
- self.assertNotEqual(len(lut.table), len(source.table))
- self.assertNotEqual(lut.table, source.table)
- self.assertEqual(lut.table[0:18], [
- 1.0, 1.0, 1.0, 0.75, 1.0, 1.0, 0.0, 1.0, 1.0,
- 1.0, 0.96, 1.0, 0.75, 0.96, 1.0, 0.0, 0.96, 1.0])
-
- def test_4_to_4_channels(self):
- source = ImageFilter.Color3DLUT.generate(
- (6, 5, 4), lambda r, g, b: (r, g, b, 1), channels=4)
- lut = source.transform(lambda r, g, b, a: (r*r, g*g, b*b, a - 0.5))
- self.assertEqual(tuple(lut.size), tuple(source.size))
- self.assertEqual(len(lut.table), len(source.table))
- self.assertNotEqual(lut.table, source.table)
- self.assertEqual(lut.table[0:16], [
- 0.0, 0.0, 0.0, 0.5, 0.2**2, 0.0, 0.0, 0.5,
- 0.4**2, 0.0, 0.0, 0.5, 0.6**2, 0.0, 0.0, 0.5])
-
- def test_with_normals_3_channels(self):
- source = ImageFilter.Color3DLUT.generate(
- (6, 5, 4), lambda r, g, b: (r*r, g*g, b*b))
- lut = source.transform(
- lambda nr, ng, nb, r, g, b: (nr - r, ng - g, nb - b),
- with_normals=True)
- self.assertEqual(tuple(lut.size), tuple(source.size))
- self.assertEqual(len(lut.table), len(source.table))
- self.assertNotEqual(lut.table, source.table)
- self.assertEqual(lut.table[0:18], [
- 0.0, 0.0, 0.0, 0.16, 0.0, 0.0, 0.24, 0.0, 0.0,
- 0.24, 0.0, 0.0, 0.8 - (0.8**2), 0, 0, 0, 0, 0])
-
- def test_with_normals_4_channels(self):
- source = ImageFilter.Color3DLUT.generate(
- (3, 6, 5), lambda r, g, b: (r*r, g*g, b*b, 1), channels=4)
- lut = source.transform(
- lambda nr, ng, nb, r, g, b, a: (nr - r, ng - g, nb - b, a-0.5),
- with_normals=True)
- self.assertEqual(tuple(lut.size), tuple(source.size))
- self.assertEqual(len(lut.table), len(source.table))
- self.assertNotEqual(lut.table, source.table)
- self.assertEqual(lut.table[0:16], [
- 0.0, 0.0, 0.0, 0.5, 0.25, 0.0, 0.0, 0.5,
- 0.0, 0.0, 0.0, 0.5, 0.0, 0.16, 0.0, 0.5])
+ with self.assertRaisesRegexp(ValueError, "should have a length of 4"):
+ ImageFilter.Color3DLUT.generate(5, channels=4,
+ callback=lambda r, g, b: (r, g, b))
if __name__ == '__main__':
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 5
} | 5.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": [
"apt-get update",
"apt-get install -y gcc libjpeg-dev zlib1g-dev libtiff5-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev libharfbuzz-dev libfribidi-dev libxcb1-dev"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
babel==2.17.0
blessed==1.20.0
build==1.2.2.post1
certifi==2025.1.31
charset-normalizer==3.4.1
check-manifest==0.50
cov-core==1.15.0
coverage==7.8.0
coveralls==4.0.1
docopt==0.6.2
docutils==0.21.2
exceptiongroup==1.2.2
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
jarn.viewdoc==2.7
Jinja2==3.1.6
MarkupSafe==3.0.2
olefile==0.47
packaging==24.2
-e git+https://github.com/python-pillow/Pillow.git@b01ba0f50cf48a2abef9cef485e6e3a9dd246b34#egg=Pillow
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.1
Pygments==2.19.1
pyproject_hooks==1.2.0
pyroma==4.2
pytest==8.3.5
pytest-cov==6.0.0
pytz==2025.2
requests==2.32.3
six==1.17.0
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinx-rtd-theme==3.0.2
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
tomli==2.2.1
trove-classifiers==2025.3.19.19
urllib3==2.3.0
wcwidth==0.2.13
zipp==3.21.0
| name: Pillow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- babel==2.17.0
- blessed==1.20.0
- build==1.2.2.post1
- certifi==2025.1.31
- charset-normalizer==3.4.1
- check-manifest==0.50
- cov-core==1.15.0
- coverage==7.8.0
- coveralls==4.0.1
- docopt==0.6.2
- docutils==0.21.2
- exceptiongroup==1.2.2
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jarn-viewdoc==2.7
- jinja2==3.1.6
- markupsafe==3.0.2
- olefile==0.47
- packaging==24.2
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pygments==2.19.1
- pyproject-hooks==1.2.0
- pyroma==4.2
- pytest==8.3.5
- pytest-cov==6.0.0
- pytz==2025.2
- requests==2.32.3
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinx-rtd-theme==3.0.2
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- tomli==2.2.1
- trove-classifiers==2025.3.19.19
- urllib3==2.3.0
- wcwidth==0.2.13
- zipp==3.21.0
prefix: /opt/conda/envs/Pillow
| [
"Tests/test_color_lut.py::TestColorLut3DFilter::test_generate",
"Tests/test_color_lut.py::TestColorLut3DFilter::test_wrong_args"
]
| []
| [
"Tests/test_color_lut.py::TestColorLut3DCoreAPI::test_channels_order",
"Tests/test_color_lut.py::TestColorLut3DCoreAPI::test_copy_alpha_channel",
"Tests/test_color_lut.py::TestColorLut3DCoreAPI::test_correct_args",
"Tests/test_color_lut.py::TestColorLut3DCoreAPI::test_correct_mode",
"Tests/test_color_lut.py::TestColorLut3DCoreAPI::test_overflow",
"Tests/test_color_lut.py::TestColorLut3DCoreAPI::test_units",
"Tests/test_color_lut.py::TestColorLut3DCoreAPI::test_units_4channels",
"Tests/test_color_lut.py::TestColorLut3DCoreAPI::test_wrong_args",
"Tests/test_color_lut.py::TestColorLut3DCoreAPI::test_wrong_mode",
"Tests/test_color_lut.py::TestColorLut3DFilter::test_convert_table"
]
| []
| MIT-CMU License | 2,402 | [
"docs/installation.rst",
"docs/reference/ImageFilter.rst",
"src/_imaging.c",
"src/PIL/ImageFilter.py",
"docs/reference/ImageDraw.rst"
]
| [
"docs/installation.rst",
"docs/reference/ImageFilter.rst",
"src/_imaging.c",
"src/PIL/ImageFilter.py",
"docs/reference/ImageDraw.rst"
]
|
|
oasis-open__cti-python-stix2-165 | 2d689815d743611a8f3ccd48ce5e2d1ec70695e5 | 2018-04-13 15:46:09 | 2d689815d743611a8f3ccd48ce5e2d1ec70695e5 | diff --git a/stix2/properties.py b/stix2/properties.py
index ca7f04c..41841b6 100644
--- a/stix2/properties.py
+++ b/stix2/properties.py
@@ -129,6 +129,8 @@ class ListProperty(Property):
# constructor again
result.append(valid)
continue
+ elif type(self.contained) is DictionaryProperty:
+ obj_type = dict
else:
obj_type = self.contained
diff --git a/stix2/utils.py b/stix2/utils.py
index 9febd78..4ef3d23 100644
--- a/stix2/utils.py
+++ b/stix2/utils.py
@@ -166,7 +166,7 @@ def get_dict(data):
def find_property_index(obj, properties, tuple_to_find):
"""Recursively find the property in the object model, return the index
according to the _properties OrderedDict. If it's a list look for
- individual objects.
+ individual objects. Returns and integer indicating its location
"""
from .base import _STIXBase
try:
@@ -183,6 +183,11 @@ def find_property_index(obj, properties, tuple_to_find):
tuple_to_find)
if val is not None:
return val
+ elif isinstance(item, dict):
+ for idx, val in enumerate(sorted(item)):
+ if (tuple_to_find[0] == val and
+ item.get(val) == tuple_to_find[1]):
+ return idx
elif isinstance(pv, dict):
if pv.get(tuple_to_find[0]) is not None:
try:
| Create an Extension with Dict annidate inside List
Hi,
I'm trying to create a CyberObservable Extension for UserAccount which have to contain a DictionaryProperty() inside a ListProperty(). It is possible? Because when I try to create an extension like this one
```
@CustomExtension(UserAccount, 'ssh_keys', {
keys': ListProperty(DictionaryProperty(), required=True)
})
class SSHKeysExtension:
pass
```
and use it with example = SSHKeysExtension(keys=[{'test123':123, 'test345','aaaa'}])
I obtain a lot of strange errors (the library seems to interpreter the dict as parameters for __init__()
| oasis-open/cti-python-stix2 | diff --git a/stix2/test/test_custom.py b/stix2/test/test_custom.py
index a14503f..b45670f 100644
--- a/stix2/test/test_custom.py
+++ b/stix2/test/test_custom.py
@@ -479,6 +479,27 @@ def test_custom_extension_wrong_observable_type():
assert 'Cannot determine extension type' in excinfo.value.reason
[email protected]("data", [
+ """{
+ "keys": [
+ {
+ "test123": 123,
+ "test345": "aaaa"
+ }
+ ]
+}""",
+])
+def test_custom_extension_with_list_and_dict_properties_observable_type(data):
+ @stix2.observables.CustomExtension(stix2.UserAccount, 'some-extension', [
+ ('keys', stix2.properties.ListProperty(stix2.properties.DictionaryProperty, required=True))
+ ])
+ class SomeCustomExtension:
+ pass
+
+ example = SomeCustomExtension(keys=[{'test123': 123, 'test345': 'aaaa'}])
+ assert data == str(example)
+
+
def test_custom_extension_invalid_observable():
# These extensions are being applied to improperly-created Observables.
# The Observable classes should have been created with the CustomObservable decorator.
diff --git a/stix2/test/test_properties.py b/stix2/test/test_properties.py
index 34edc96..16ff06a 100644
--- a/stix2/test/test_properties.py
+++ b/stix2/test/test_properties.py
@@ -1,6 +1,6 @@
import pytest
-from stix2 import EmailMIMEComponent, ExtensionsProperty, TCPExt
+from stix2 import CustomObject, EmailMIMEComponent, ExtensionsProperty, TCPExt
from stix2.exceptions import AtLeastOnePropertyError, DictionaryKeyError
from stix2.properties import (BinaryProperty, BooleanProperty,
DictionaryProperty, EmbeddedObjectProperty,
@@ -266,6 +266,17 @@ def test_dictionary_property_invalid(d):
assert str(excinfo.value) == d[1]
+def test_property_list_of_dictionary():
+ @CustomObject('x-new-obj', [
+ ('property1', ListProperty(DictionaryProperty(), required=True)),
+ ])
+ class NewObj():
+ pass
+
+ test_obj = NewObj(property1=[{'foo': 'bar'}])
+ assert test_obj.property1[0]['foo'] == 'bar'
+
+
@pytest.mark.parametrize("value", [
{"sha256": "6db12788c37247f2316052e142f42f4b259d6561751e5f401a1ae2a6df9c674b"},
[('MD5', '2dfb1bcc980200c6706feee399d41b3f'), ('RIPEMD-160', 'b3a8cd8a27c90af79b3c81754f267780f443dfef')],
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[taxii]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
antlr4-python3-runtime==4.9.3
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
bump2version==1.0.1
bumpversion==0.6.0
certifi==2021.5.30
cfgv==3.3.1
charset-normalizer==2.0.12
coverage==6.2
decorator==5.1.1
defusedxml==0.7.1
distlib==0.3.9
docutils==0.18.1
entrypoints==0.4
filelock==3.4.1
identify==2.4.4
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.2.3
iniconfig==1.1.1
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
Jinja2==3.0.3
jsonschema==3.2.0
jupyter-client==7.1.2
jupyter-core==4.9.2
jupyterlab-pygments==0.1.2
MarkupSafe==2.0.1
mistune==0.8.4
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
nbsphinx==0.3.2
nest-asyncio==1.6.0
nodeenv==1.6.0
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.1
pyzmq==25.1.2
requests==2.27.1
simplejson==3.20.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==1.5.6
sphinx-prompt==1.5.0
-e git+https://github.com/oasis-open/cti-python-stix2.git@2d689815d743611a8f3ccd48ce5e2d1ec70695e5#egg=stix2
stix2-patterns==2.0.0
taxii2-client==2.3.0
testpath==0.6.0
toml==0.10.2
tomli==1.2.3
tornado==6.1
tox==3.28.0
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.16.2
wcwidth==0.2.13
webencodings==0.5.1
zipp==3.6.0
| name: cti-python-stix2
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- antlr4-python3-runtime==4.9.3
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- bump2version==1.0.1
- bumpversion==0.6.0
- cfgv==3.3.1
- charset-normalizer==2.0.12
- coverage==6.2
- decorator==5.1.1
- defusedxml==0.7.1
- distlib==0.3.9
- docutils==0.18.1
- entrypoints==0.4
- filelock==3.4.1
- identify==2.4.4
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.2.3
- iniconfig==1.1.1
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- jinja2==3.0.3
- jsonschema==3.2.0
- jupyter-client==7.1.2
- jupyter-core==4.9.2
- jupyterlab-pygments==0.1.2
- markupsafe==2.0.1
- mistune==0.8.4
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbsphinx==0.3.2
- nest-asyncio==1.6.0
- nodeenv==1.6.0
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.1
- pyzmq==25.1.2
- requests==2.27.1
- simplejson==3.20.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==1.5.6
- sphinx-prompt==1.5.0
- stix2-patterns==2.0.0
- taxii2-client==2.3.0
- testpath==0.6.0
- toml==0.10.2
- tomli==1.2.3
- tornado==6.1
- tox==3.28.0
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.16.2
- wcwidth==0.2.13
- webencodings==0.5.1
- zipp==3.6.0
prefix: /opt/conda/envs/cti-python-stix2
| [
"stix2/test/test_custom.py::test_custom_extension_with_list_and_dict_properties_observable_type[{\\n",
"stix2/test/test_properties.py::test_property_list_of_dictionary"
]
| []
| [
"stix2/test/test_custom.py::test_identity_custom_property",
"stix2/test/test_custom.py::test_identity_custom_property_invalid",
"stix2/test/test_custom.py::test_identity_custom_property_allowed",
"stix2/test/test_custom.py::test_parse_identity_custom_property[{\\n",
"stix2/test/test_custom.py::test_custom_property_in_bundled_object",
"stix2/test/test_custom.py::test_identity_custom_property_revoke",
"stix2/test/test_custom.py::test_identity_custom_property_edit_markings",
"stix2/test/test_custom.py::test_custom_marking_no_init_1",
"stix2/test/test_custom.py::test_custom_marking_no_init_2",
"stix2/test/test_custom.py::test_custom_object_raises_exception",
"stix2/test/test_custom.py::test_custom_object_type",
"stix2/test/test_custom.py::test_custom_object_no_init_1",
"stix2/test/test_custom.py::test_custom_object_no_init_2",
"stix2/test/test_custom.py::test_parse_custom_object_type",
"stix2/test/test_custom.py::test_parse_unregistered_custom_object_type",
"stix2/test/test_custom.py::test_parse_unregistered_custom_object_type_w_allow_custom",
"stix2/test/test_custom.py::test_custom_observable_object_1",
"stix2/test/test_custom.py::test_custom_observable_object_2",
"stix2/test/test_custom.py::test_custom_observable_object_3",
"stix2/test/test_custom.py::test_custom_observable_raises_exception",
"stix2/test/test_custom.py::test_custom_observable_object_no_init_1",
"stix2/test/test_custom.py::test_custom_observable_object_no_init_2",
"stix2/test/test_custom.py::test_custom_observable_object_invalid_ref_property",
"stix2/test/test_custom.py::test_custom_observable_object_invalid_refs_property",
"stix2/test/test_custom.py::test_custom_observable_object_invalid_refs_list_property",
"stix2/test/test_custom.py::test_custom_observable_object_invalid_valid_refs",
"stix2/test/test_custom.py::test_custom_no_properties_raises_exception",
"stix2/test/test_custom.py::test_custom_wrong_properties_arg_raises_exception",
"stix2/test/test_custom.py::test_parse_custom_observable_object",
"stix2/test/test_custom.py::test_parse_unregistered_custom_observable_object",
"stix2/test/test_custom.py::test_parse_invalid_custom_observable_object",
"stix2/test/test_custom.py::test_observable_custom_property",
"stix2/test/test_custom.py::test_observable_custom_property_invalid",
"stix2/test/test_custom.py::test_observable_custom_property_allowed",
"stix2/test/test_custom.py::test_observed_data_with_custom_observable_object",
"stix2/test/test_custom.py::test_custom_extension_raises_exception",
"stix2/test/test_custom.py::test_custom_extension",
"stix2/test/test_custom.py::test_custom_extension_wrong_observable_type",
"stix2/test/test_custom.py::test_custom_extension_invalid_observable",
"stix2/test/test_custom.py::test_custom_extension_no_properties",
"stix2/test/test_custom.py::test_custom_extension_empty_properties",
"stix2/test/test_custom.py::test_custom_extension_dict_properties",
"stix2/test/test_custom.py::test_custom_extension_no_init_1",
"stix2/test/test_custom.py::test_custom_extension_no_init_2",
"stix2/test/test_custom.py::test_parse_observable_with_custom_extension",
"stix2/test/test_custom.py::test_parse_observable_with_unregistered_custom_extension",
"stix2/test/test_custom.py::test_register_custom_object",
"stix2/test/test_custom.py::test_extension_property_location",
"stix2/test/test_properties.py::test_property",
"stix2/test/test_properties.py::test_basic_clean",
"stix2/test/test_properties.py::test_property_default",
"stix2/test/test_properties.py::test_fixed_property",
"stix2/test/test_properties.py::test_list_property",
"stix2/test/test_properties.py::test_string_property",
"stix2/test/test_properties.py::test_type_property",
"stix2/test/test_properties.py::test_id_property",
"stix2/test/test_properties.py::test_integer_property_valid[2]",
"stix2/test/test_properties.py::test_integer_property_valid[-1]",
"stix2/test/test_properties.py::test_integer_property_valid[3.14]",
"stix2/test/test_properties.py::test_integer_property_valid[False]",
"stix2/test/test_properties.py::test_integer_property_invalid[something]",
"stix2/test/test_properties.py::test_integer_property_invalid[value1]",
"stix2/test/test_properties.py::test_float_property_valid[2]",
"stix2/test/test_properties.py::test_float_property_valid[-1]",
"stix2/test/test_properties.py::test_float_property_valid[3.14]",
"stix2/test/test_properties.py::test_float_property_valid[False]",
"stix2/test/test_properties.py::test_float_property_invalid[something]",
"stix2/test/test_properties.py::test_float_property_invalid[value1]",
"stix2/test/test_properties.py::test_boolean_property_valid[True0]",
"stix2/test/test_properties.py::test_boolean_property_valid[False0]",
"stix2/test/test_properties.py::test_boolean_property_valid[True1]",
"stix2/test/test_properties.py::test_boolean_property_valid[False1]",
"stix2/test/test_properties.py::test_boolean_property_valid[true]",
"stix2/test/test_properties.py::test_boolean_property_valid[false]",
"stix2/test/test_properties.py::test_boolean_property_valid[TRUE]",
"stix2/test/test_properties.py::test_boolean_property_valid[FALSE]",
"stix2/test/test_properties.py::test_boolean_property_valid[T]",
"stix2/test/test_properties.py::test_boolean_property_valid[F]",
"stix2/test/test_properties.py::test_boolean_property_valid[t]",
"stix2/test/test_properties.py::test_boolean_property_valid[f]",
"stix2/test/test_properties.py::test_boolean_property_valid[1]",
"stix2/test/test_properties.py::test_boolean_property_valid[0]",
"stix2/test/test_properties.py::test_boolean_property_invalid[abc]",
"stix2/test/test_properties.py::test_boolean_property_invalid[value1]",
"stix2/test/test_properties.py::test_boolean_property_invalid[value2]",
"stix2/test/test_properties.py::test_boolean_property_invalid[2]",
"stix2/test/test_properties.py::test_boolean_property_invalid[-1]",
"stix2/test/test_properties.py::test_reference_property",
"stix2/test/test_properties.py::test_timestamp_property_valid[2017-01-01T12:34:56Z]",
"stix2/test/test_properties.py::test_timestamp_property_valid[2017-01-01",
"stix2/test/test_properties.py::test_timestamp_property_valid[Jan",
"stix2/test/test_properties.py::test_timestamp_property_invalid",
"stix2/test/test_properties.py::test_binary_property",
"stix2/test/test_properties.py::test_hex_property",
"stix2/test/test_properties.py::test_dictionary_property_valid[d0]",
"stix2/test/test_properties.py::test_dictionary_property_valid[d1]",
"stix2/test/test_properties.py::test_dictionary_property_invalid_key[d0]",
"stix2/test/test_properties.py::test_dictionary_property_invalid_key[d1]",
"stix2/test/test_properties.py::test_dictionary_property_invalid_key[d2]",
"stix2/test/test_properties.py::test_dictionary_property_invalid[d0]",
"stix2/test/test_properties.py::test_dictionary_property_invalid[d1]",
"stix2/test/test_properties.py::test_hashes_property_valid[value0]",
"stix2/test/test_properties.py::test_hashes_property_valid[value1]",
"stix2/test/test_properties.py::test_hashes_property_invalid[value0]",
"stix2/test/test_properties.py::test_hashes_property_invalid[value1]",
"stix2/test/test_properties.py::test_embedded_property",
"stix2/test/test_properties.py::test_enum_property_valid[value0]",
"stix2/test/test_properties.py::test_enum_property_valid[value1]",
"stix2/test/test_properties.py::test_enum_property_valid[b]",
"stix2/test/test_properties.py::test_enum_property_invalid",
"stix2/test/test_properties.py::test_extension_property_valid",
"stix2/test/test_properties.py::test_extension_property_invalid[1]",
"stix2/test/test_properties.py::test_extension_property_invalid[data1]",
"stix2/test/test_properties.py::test_extension_property_invalid_type",
"stix2/test/test_properties.py::test_extension_at_least_one_property_constraint"
]
| []
| BSD 3-Clause "New" or "Revised" License | 2,403 | [
"stix2/properties.py",
"stix2/utils.py"
]
| [
"stix2/properties.py",
"stix2/utils.py"
]
|
|
hgrecco__pint-630 | 90174a33c00a1364fcf2af9c59adf7859b0706b2 | 2018-04-13 17:39:43 | bc754ae302b0c03d1802daddcd76c103a5fdfb67 | diff --git a/.travis.yml b/.travis.yml
index 800263e..fd6781b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,7 +1,6 @@
language: python
env:
- - UNCERTAINTIES="N" PYTHON="2.7" NUMPY_VERSION=1.11.2
- UNCERTAINTIES="N" PYTHON="3.3" NUMPY_VERSION=1.9.2
- UNCERTAINTIES="N" PYTHON="3.4" NUMPY_VERSION=1.11.2
- UNCERTAINTIES="N" PYTHON="3.5" NUMPY_VERSION=1.11.2
@@ -9,6 +8,12 @@ env:
- UNCERTAINTIES="N" PYTHON="3.6" NUMPY_VERSION=1.11.2
- UNCERTAINTIES="N" PYTHON="2.7" NUMPY_VERSION=0
- UNCERTAINTIES="N" PYTHON="3.5" NUMPY_VERSION=0
+ # Test with the latest numpy version
+ - UNCERTAINTIES="N" PYTHON="2.7" NUMPY_VERSION=1.14
+ - UNCERTAINTIES="N" PYTHON="3.4" NUMPY_VERSION=1.14
+ - UNCERTAINTIES="N" PYTHON="3.5" NUMPY_VERSION=1.14
+ - UNCERTAINTIES="Y" PYTHON="3.5" NUMPY_VERSION=1.14
+ - UNCERTAINTIES="N" PYTHON="3.6" NUMPY_VERSION=1.14
before_install:
- sudo apt-get update
diff --git a/pint/quantity.py b/pint/quantity.py
index 8a6599b..88bfdac 100644
--- a/pint/quantity.py
+++ b/pint/quantity.py
@@ -1003,8 +1003,8 @@ class _Quantity(PrettyIPython, SharedRegistryObject):
raise OffsetUnitCalculusError(self._units)
if getattr(other, 'dimensionless', False):
- other = other.to_base_units()
- self._units **= other.magnitude
+ other = other.to_base_units().magnitude
+ self._units **= other
elif not getattr(other, 'dimensionless', True):
raise DimensionalityError(self._units, 'dimensionless')
else:
@@ -1090,6 +1090,20 @@ class _Quantity(PrettyIPython, SharedRegistryObject):
# We compare to the base class of Quantity because
# each Quantity class is unique.
if not isinstance(other, _Quantity):
+ if _eq(other, 0, True):
+ # Handle the special case in which we compare to zero
+ # (or an array of zeros)
+ if self._is_multiplicative:
+ # compare magnitude
+ return _eq(self._magnitude, other, False)
+ else:
+ # compare the magnitude after converting the
+ # non-multiplicative quantity to base units
+ if self._REGISTRY.autoconvert_offset_to_baseunit:
+ return _eq(self.to_base_units()._magnitude, other, False)
+ else:
+ raise OffsetUnitCalculusError(self._units)
+
return (self.dimensionless and
_eq(self._convert_magnitude(UnitsContainer()), other, False))
@@ -1115,6 +1129,19 @@ class _Quantity(PrettyIPython, SharedRegistryObject):
if not isinstance(other, self.__class__):
if self.dimensionless:
return op(self._convert_magnitude_not_inplace(UnitsContainer()), other)
+ elif _eq(other, 0, True):
+ # Handle the special case in which we compare to zero
+ # (or an array of zeros)
+ if self._is_multiplicative:
+ # compare magnitude
+ return op(self._magnitude, other)
+ else:
+ # compare the magnitude after converting the
+ # non-multiplicative quantity to base units
+ if self._REGISTRY.autoconvert_offset_to_baseunit:
+ return op(self.to_base_units()._magnitude, other)
+ else:
+ raise OffsetUnitCalculusError(self._units)
else:
raise ValueError('Cannot compare Quantity and {}'.format(type(other)))
| Unit tests fails with numpy 1.13.1
Refs: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=876921
Thanks for fixing.
```
S.S................................................./usr/lib/python2.7/unittest/case.py:340: RuntimeWarning: TestResult has no addExpectedFailure method, reporting as passes
RuntimeWarning)
.......S...S............................SSSSSSSSSSS.................S...................................................................................................................................................E.E...E.................................................................................................................................................................................................................................................................................................../build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_quantity.py:287: RuntimeWarning: to_compact applied to non numerical types has an undefined behavior.
self.assertQuantityAlmostIdentical(q.to_compact(unit=unit),
.......................................F..F..........................................................................................................................................................................
======================================================================
ERROR: test_inplace_exponentiation (pint.testsuite.test_quantity.TestOffsetUnitMath) [with input = ((10, u'degC'), (2, u'')); expected_output = [u'error', (80173.92249999999, u'kelvin**2')]]
----------------------------------------------------------------------
Traceback (most recent call last):
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/parameterized.py", line 116, in new_method
return method(self, *param_values)
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/parameterized.py", line 137, in newfunc
return func(*arg, **kwargs)
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_quantity.py", line 1165, in test_inplace_exponentiation
self.assertEqual(op.ipow(in1_cp, in2).units, expected.units)
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/quantity.py", line 961, in __ipow__
self._magnitude **= _to_magnitude(other, self.force_ndarray)
TypeError: unsupported operand type(s) for ** or pow(): 'numpy.ndarray' and 'Quantity'
======================================================================
ERROR: test_inplace_exponentiation (pint.testsuite.test_quantity.TestOffsetUnitMath) [with input = ((10, u'kelvin'), (2, u'')); expected_output = [(100.0, u'kelvin**2'), (100.0, u'kelvin**2')]]
----------------------------------------------------------------------
Traceback (most recent call last):
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/parameterized.py", line 116, in new_method
return method(self, *param_values)
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/parameterized.py", line 137, in newfunc
return func(*arg, **kwargs)
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_quantity.py", line 1165, in test_inplace_exponentiation
self.assertEqual(op.ipow(in1_cp, in2).units, expected.units)
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/quantity.py", line 961, in __ipow__
self._magnitude **= _to_magnitude(other, self.force_ndarray)
TypeError: unsupported operand type(s) for ** or pow(): 'numpy.ndarray' and 'Quantity'
======================================================================
ERROR: test_inplace_exponentiation (pint.testsuite.test_quantity.TestOffsetUnitMath) [with input = ((10, u'degC'), (500.0, u'millikelvin/kelvin')); expected_output = [u'error', (16.827061537891872, u'kelvin**0.5')]]
----------------------------------------------------------------------
Traceback (most recent call last):
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/parameterized.py", line 116, in new_method
return method(self, *param_values)
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/parameterized.py", line 137, in newfunc
return func(*arg, **kwargs)
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_quantity.py", line 1165, in test_inplace_exponentiation
self.assertEqual(op.ipow(in1_cp, in2).units, expected.units)
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/quantity.py", line 961, in __ipow__
self._magnitude **= _to_magnitude(other, self.force_ndarray)
TypeError: unsupported operand type(s) for ** or pow(): 'numpy.ndarray' and 'Quantity'
======================================================================
FAIL: test_isfinite (pint.testsuite.test_umath.TestFloatingUfuncs)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_umath.py", line 617, in test_isfinite
(self.q1, self.qm, self.qless))
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_umath.py", line 101, in _testn
self._test1(func, ok_with, raise_with, output_units=None, results=results)
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_umath.py", line 85, in _test1
self.assertQuantityAlmostEqual(qm, res, rtol=rtol, msg=err_msg)
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/__init__.py", line 117, in assertQuantityAlmostEqual
np.testing.assert_allclose(m1, m2, rtol=rtol, atol=atol, err_msg=msg)
File "/usr/lib/python2.7/dist-packages/numpy/testing/utils.py", line 1395, in assert_allclose
verbose=verbose, header=header, equal_nan=equal_nan)
File "/usr/lib/python2.7/dist-packages/numpy/testing/utils.py", line 778, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=1e-06, atol=0
At isreal with [ 1. 2. 3. 4.] joule
(mismatch 100.0%)
x: array(False, dtype=bool)
y: array([ True, True, True, True], dtype=bool)
======================================================================
FAIL: test_isreal (pint.testsuite.test_umath.TestFloatingUfuncs)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_umath.py", line 609, in test_isreal
(self.q1, self.qm, self.qless))
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_umath.py", line 101, in _testn
self._test1(func, ok_with, raise_with, output_units=None, results=results)
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/test_umath.py", line 85, in _test1
self.assertQuantityAlmostEqual(qm, res, rtol=rtol, msg=err_msg)
File "/build/1st/python-pint-0.8.1/.pybuild/pythonX.Y_2.7/build/pint/testsuite/__init__.py", line 117, in assertQuantityAlmostEqual
np.testing.assert_allclose(m1, m2, rtol=rtol, atol=atol, err_msg=msg)
File "/usr/lib/python2.7/dist-packages/numpy/testing/utils.py", line 1395, in assert_allclose
verbose=verbose, header=header, equal_nan=equal_nan)
File "/usr/lib/python2.7/dist-packages/numpy/testing/utils.py", line 778, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=1e-06, atol=0
At isreal with [ 1. 2. 3. 4.] joule
(mismatch 100.0%)
x: array(False, dtype=bool)
y: array([ True, True, True, True], dtype=bool)
----------------------------------------------------------------------
Ran 779 tests in 40.510s
``` | hgrecco/pint | diff --git a/pint/testsuite/test_quantity.py b/pint/testsuite/test_quantity.py
index 54d460a..5c0e22b 100644
--- a/pint/testsuite/test_quantity.py
+++ b/pint/testsuite/test_quantity.py
@@ -1296,3 +1296,107 @@ class TestTimedelta(QuantityTestCase):
after = 3 * self.ureg.second
with self.assertRaises(DimensionalityError):
after -= d
+
+
+class TestCompareZero(QuantityTestCase):
+ """This test case checks the special treatment that the zero value
+ receives in the comparisons: pint>=0.9 supports comparisons against zero
+ even for non-dimensionless quantities
+ """
+
+ def test_equal_zero(self):
+ ureg = self.ureg
+ ureg.autoconvert_offset_to_baseunit = False
+ self.assertTrue(ureg.Quantity(0, ureg.J) == 0)
+ self.assertFalse(ureg.Quantity(0, ureg.J) == ureg.Quantity(0, ''))
+ self.assertFalse(ureg.Quantity(5, ureg.J) == 0)
+
+ @helpers.requires_numpy()
+ def test_equal_zero_NP(self):
+ ureg = self.ureg
+ ureg.autoconvert_offset_to_baseunit = False
+ aeq = np.testing.assert_array_equal
+ aeq(ureg.Quantity(0, ureg.J) == np.zeros(3),
+ np.asarray([True, True, True]))
+ aeq(ureg.Quantity(5, ureg.J) == np.zeros(3),
+ np.asarray([False, False, False]))
+ aeq(ureg.Quantity(np.arange(3), ureg.J) == np.zeros(3),
+ np.asarray([True, False, False]))
+ self.assertFalse(ureg.Quantity(np.arange(4), ureg.J) == np.zeros(3))
+
+ def test_offset_equal_zero(self):
+ ureg = self.ureg
+ ureg.autoconvert_offset_to_baseunit = False
+ q0 = ureg.Quantity(-273.15, 'degC')
+ q1 = ureg.Quantity(0, 'degC')
+ q2 = ureg.Quantity(5, 'degC')
+ self.assertRaises(OffsetUnitCalculusError, q0.__eq__, 0)
+ self.assertRaises(OffsetUnitCalculusError, q1.__eq__, 0)
+ self.assertRaises(OffsetUnitCalculusError, q2.__eq__, 0)
+ self.assertFalse(q0 == ureg.Quantity(0, ''))
+
+ def test_offset_autoconvert_equal_zero(self):
+ ureg = self.ureg
+ ureg.autoconvert_offset_to_baseunit = True
+ q0 = ureg.Quantity(-273.15, 'degC')
+ q1 = ureg.Quantity(0, 'degC')
+ q2 = ureg.Quantity(5, 'degC')
+ self.assertTrue(q0 == 0)
+ self.assertFalse(q1 == 0)
+ self.assertFalse(q2 == 0)
+ self.assertFalse(q0 == ureg.Quantity(0, ''))
+
+ def test_gt_zero(self):
+ ureg = self.ureg
+ ureg.autoconvert_offset_to_baseunit = False
+ q0 = ureg.Quantity(0, 'J')
+ q0m = ureg.Quantity(0, 'm')
+ q0less = ureg.Quantity(0, '')
+ qpos = ureg.Quantity(5, 'J')
+ qneg = ureg.Quantity(-5, 'J')
+ self.assertTrue(qpos > q0)
+ self.assertTrue(qpos > 0)
+ self.assertFalse(qneg > 0)
+ self.assertRaises(DimensionalityError, qpos.__gt__, q0less)
+ self.assertRaises(DimensionalityError, qpos.__gt__, q0m)
+
+ @helpers.requires_numpy()
+ def test_gt_zero_NP(self):
+ ureg = self.ureg
+ ureg.autoconvert_offset_to_baseunit = False
+ qpos = ureg.Quantity(5, 'J')
+ qneg = ureg.Quantity(-5, 'J')
+ aeq = np.testing.assert_array_equal
+ aeq(qpos > np.zeros(3), np.asarray([True, True, True]))
+ aeq(qneg > np.zeros(3), np.asarray([False, False, False]))
+ aeq(ureg.Quantity(np.arange(-1, 2), ureg.J) > np.zeros(3),
+ np.asarray([False, False, True]))
+ aeq(ureg.Quantity(np.arange(-1, 2), ureg.J) > np.zeros(3),
+ np.asarray([False, False, True]))
+ self.assertRaises(ValueError,
+ ureg.Quantity(np.arange(-1, 2), ureg.J).__gt__,
+ np.zeros(4))
+
+ def test_offset_gt_zero(self):
+ ureg = self.ureg
+ ureg.autoconvert_offset_to_baseunit = False
+ q0 = ureg.Quantity(-273.15, 'degC')
+ q1 = ureg.Quantity(0, 'degC')
+ q2 = ureg.Quantity(5, 'degC')
+ self.assertRaises(OffsetUnitCalculusError, q0.__gt__, 0)
+ self.assertRaises(OffsetUnitCalculusError, q1.__gt__, 0)
+ self.assertRaises(OffsetUnitCalculusError, q2.__gt__, 0)
+ self.assertRaises(DimensionalityError, q1.__gt__,
+ ureg.Quantity(0, ''))
+
+ def test_offset_autoconvert_gt_zero(self):
+ ureg = self.ureg
+ ureg.autoconvert_offset_to_baseunit = True
+ q0 = ureg.Quantity(-273.15, 'degC')
+ q1 = ureg.Quantity(0, 'degC')
+ q2 = ureg.Quantity(5, 'degC')
+ self.assertFalse(q0 > 0)
+ self.assertTrue(q1 > 0)
+ self.assertTrue(q2 > 0)
+ self.assertRaises(DimensionalityError, q1.__gt__,
+ ureg.Quantity(0, ''))
\ No newline at end of file
diff --git a/pint/testsuite/test_umath.py b/pint/testsuite/test_umath.py
index 0d0c544..589aaa4 100644
--- a/pint/testsuite/test_umath.py
+++ b/pint/testsuite/test_umath.py
@@ -613,7 +613,7 @@ class TestFloatingUfuncs(TestUFuncs):
(self.q1, self.qm, self.qless))
def test_isfinite(self):
- self._testn(np.isreal,
+ self._testn(np.isfinite,
(self.q1, self.qm, self.qless))
def test_isinf(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 2
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"numpy>=1.16.0",
"matplotlib>=2.0.0",
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
cycler==0.11.0
execnet==1.9.0
importlib-metadata==4.8.3
iniconfig==1.1.1
kiwisolver==1.3.1
matplotlib==3.3.4
numpy==1.19.5
packaging==21.3
Pillow==8.4.0
-e git+https://github.com/hgrecco/pint.git@90174a33c00a1364fcf2af9c59adf7859b0706b2#egg=Pint
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: pint
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- cycler==0.11.0
- execnet==1.9.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- kiwisolver==1.3.1
- matplotlib==3.3.4
- numpy==1.19.5
- packaging==21.3
- pillow==8.4.0
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/pint
| [
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_exponentiation_00007",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_exponentiation_00009",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_exponentiation_00013",
"pint/testsuite/test_quantity.py::TestCompareZero::test_equal_zero",
"pint/testsuite/test_quantity.py::TestCompareZero::test_equal_zero_NP",
"pint/testsuite/test_quantity.py::TestCompareZero::test_gt_zero",
"pint/testsuite/test_quantity.py::TestCompareZero::test_gt_zero_NP",
"pint/testsuite/test_quantity.py::TestCompareZero::test_offset_autoconvert_equal_zero",
"pint/testsuite/test_quantity.py::TestCompareZero::test_offset_autoconvert_gt_zero",
"pint/testsuite/test_quantity.py::TestCompareZero::test_offset_equal_zero",
"pint/testsuite/test_quantity.py::TestCompareZero::test_offset_gt_zero",
"pint/testsuite/test_umath.py::TestFloatingUfuncs::test_isreal"
]
| []
| [
"pint/testsuite/test_quantity.py::TestQuantity::test_both_symbol",
"pint/testsuite/test_quantity.py::TestQuantity::test_context_attr",
"pint/testsuite/test_quantity.py::TestQuantity::test_convert",
"pint/testsuite/test_quantity.py::TestQuantity::test_default_formatting",
"pint/testsuite/test_quantity.py::TestQuantity::test_dimensionless_units",
"pint/testsuite/test_quantity.py::TestQuantity::test_exponent_formatting",
"pint/testsuite/test_quantity.py::TestQuantity::test_format_compact",
"pint/testsuite/test_quantity.py::TestQuantity::test_ipython",
"pint/testsuite/test_quantity.py::TestQuantity::test_offset",
"pint/testsuite/test_quantity.py::TestQuantity::test_offset_delta",
"pint/testsuite/test_quantity.py::TestQuantity::test_pickle",
"pint/testsuite/test_quantity.py::TestQuantity::test_quantity_bool",
"pint/testsuite/test_quantity.py::TestQuantity::test_quantity_comparison",
"pint/testsuite/test_quantity.py::TestQuantity::test_quantity_comparison_convert",
"pint/testsuite/test_quantity.py::TestQuantity::test_quantity_creation",
"pint/testsuite/test_quantity.py::TestQuantity::test_quantity_format",
"pint/testsuite/test_quantity.py::TestQuantity::test_quantity_hash",
"pint/testsuite/test_quantity.py::TestQuantity::test_quantity_repr",
"pint/testsuite/test_quantity.py::TestQuantity::test_retain_unit",
"pint/testsuite/test_quantity.py::TestQuantity::test_to_base_units",
"pint/testsuite/test_quantity.py::TestQuantityToCompact::test_derived_units",
"pint/testsuite/test_quantity.py::TestQuantityToCompact::test_dimensionally_simple_units",
"pint/testsuite/test_quantity.py::TestQuantityToCompact::test_fractional_exponent_units",
"pint/testsuite/test_quantity.py::TestQuantityToCompact::test_inverse_square_units",
"pint/testsuite/test_quantity.py::TestQuantityToCompact::test_inverse_units",
"pint/testsuite/test_quantity.py::TestQuantityToCompact::test_limits_magnitudes",
"pint/testsuite/test_quantity.py::TestQuantityToCompact::test_nonnumeric_magnitudes",
"pint/testsuite/test_quantity.py::TestQuantityToCompact::test_power_units",
"pint/testsuite/test_quantity.py::TestQuantityToCompact::test_unit_parameter",
"pint/testsuite/test_quantity.py::TestQuantityBasicMath::test_float",
"pint/testsuite/test_quantity.py::TestQuantityBasicMath::test_fraction",
"pint/testsuite/test_quantity.py::TestQuantityBasicMath::test_nparray",
"pint/testsuite/test_quantity.py::TestQuantityBasicMath::test_quantity_abs_round",
"pint/testsuite/test_quantity.py::TestQuantityBasicMath::test_quantity_float_complex",
"pint/testsuite/test_quantity.py::TestDimensions::test_dimensionality",
"pint/testsuite/test_quantity.py::TestDimensions::test_get_dimensionality",
"pint/testsuite/test_quantity.py::TestDimensions::test_inclusion",
"pint/testsuite/test_quantity.py::TestQuantityWithDefaultRegistry::test_dimensionality",
"pint/testsuite/test_quantity.py::TestQuantityWithDefaultRegistry::test_get_dimensionality",
"pint/testsuite/test_quantity.py::TestQuantityWithDefaultRegistry::test_inclusion",
"pint/testsuite/test_quantity.py::TestDimensionsWithDefaultRegistry::test_dimensionality",
"pint/testsuite/test_quantity.py::TestDimensionsWithDefaultRegistry::test_get_dimensionality",
"pint/testsuite/test_quantity.py::TestDimensionsWithDefaultRegistry::test_inclusion",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00001",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00002",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00003",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00004",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00005",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00006",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00007",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00008",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00009",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00010",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00011",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00012",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00013",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00014",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00015",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00016",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00017",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00018",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00019",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00020",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00021",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00022",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00023",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00024",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00025",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00026",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00027",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00028",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00029",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00030",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00031",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00032",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00033",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00034",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00035",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_addition_00036",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00001",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00002",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00003",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00004",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00005",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00006",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00007",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00008",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_division_with_scalar_00009",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00001",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00002",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00003",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00004",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00005",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00006",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00007",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00008",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00009",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00010",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00011",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00012",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_exponentiation_00013",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00001",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00002",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00003",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00004",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00005",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00006",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00007",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00008",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00009",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00010",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00011",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00012",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00013",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00014",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00015",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00016",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00017",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00018",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00019",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00020",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00021",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00022",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00023",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00024",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00025",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00026",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00027",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00028",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00029",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00030",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00031",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00032",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00033",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00034",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00035",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_addition_00036",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_exponentiation_00001",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_exponentiation_00002",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_exponentiation_00003",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_exponentiation_00004",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_exponentiation_00005",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_exponentiation_00006",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_exponentiation_00008",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_exponentiation_00010",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_exponentiation_00011",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_exponentiation_00012",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00001",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00002",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00003",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00004",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00005",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00006",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00007",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00008",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00009",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00010",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00011",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00012",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00013",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00014",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00015",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00016",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00017",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00018",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00019",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00020",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00021",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00022",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00023",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00024",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00025",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00026",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00027",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00028",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00029",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00030",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00031",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00032",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00033",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00034",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00035",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_00036",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00001",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00002",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00003",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00004",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00005",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00006",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00007",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00008",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00009",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00010",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00011",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00012",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00013",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00014",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00015",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00016",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00017",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00018",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00019",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_multiplication_with_autoconvert_00020",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00001",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00002",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00003",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00004",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00005",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00006",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00007",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00008",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00009",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00010",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00011",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00012",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00013",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00014",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00015",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00016",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00017",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00018",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00019",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00020",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00021",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00022",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00023",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00024",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00025",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00026",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00027",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00028",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00029",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00030",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00031",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00032",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00033",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00034",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00035",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_subtraction_00036",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00001",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00002",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00003",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00004",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00005",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00006",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00007",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00008",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00009",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00010",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00011",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00012",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00013",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00014",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00015",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00016",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00017",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00018",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00019",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00020",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00021",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00022",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00023",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00024",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00025",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00026",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00027",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00028",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00029",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00030",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00031",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00032",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00033",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00034",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00035",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_inplace_truedivision_00036",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00001",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00002",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00003",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00004",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00005",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00006",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00007",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00008",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00009",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00010",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00011",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00012",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00013",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00014",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00015",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00016",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00017",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00018",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00019",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00020",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00021",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00022",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00023",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00024",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00025",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00026",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00027",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00028",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00029",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00030",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00031",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00032",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00033",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00034",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00035",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_00036",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00001",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00002",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00003",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00004",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00005",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00006",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00007",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00008",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00009",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00010",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00011",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00012",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00013",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00014",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00015",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00016",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00017",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00018",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00019",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_autoconvert_00020",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00001",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00002",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00003",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00004",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00005",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00006",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_multiplication_with_scalar_00007",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00001",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00002",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00003",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00004",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00005",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00006",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00007",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00008",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00009",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00010",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00011",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00012",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00013",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00014",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00015",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00016",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00017",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00018",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00019",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00020",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00021",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00022",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00023",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00024",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00025",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00026",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00027",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00028",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00029",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00030",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00031",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00032",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00033",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00034",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00035",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_subtraction_00036",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00001",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00002",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00003",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00004",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00005",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00006",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00007",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00008",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00009",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00010",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00011",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00012",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00013",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00014",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00015",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00016",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00017",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00018",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00019",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00020",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00021",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00022",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00023",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00024",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00025",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00026",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00027",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00028",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00029",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00030",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00031",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00032",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00033",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00034",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00035",
"pint/testsuite/test_quantity.py::TestOffsetUnitMath::test_truedivision_00036",
"pint/testsuite/test_quantity.py::TestDimensionReduction::test_imul_and_div_reduction",
"pint/testsuite/test_quantity.py::TestDimensionReduction::test_mul_and_div_reduction",
"pint/testsuite/test_quantity.py::TestDimensionReduction::test_nocoerce_creation",
"pint/testsuite/test_quantity.py::TestDimensionReduction::test_reduction_to_dimensionless",
"pint/testsuite/test_quantity.py::TestTimedelta::test_add_sub",
"pint/testsuite/test_quantity.py::TestTimedelta::test_iadd_isub",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_absolute",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_add",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_conj",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_divide",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_exp",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_exp2",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_expm1",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_floor_divide",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_fmod",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_log",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_log10",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_log2",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_logaddexp",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_logaddexp2",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_mod",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_multiply",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_negative",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_reciprocal",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_remainder",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_rint",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_sqrt",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_square",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_subtract",
"pint/testsuite/test_umath.py::TestMathUfuncs::test_true_divide",
"pint/testsuite/test_umath.py::TestTrigUfuncs::test_arccos",
"pint/testsuite/test_umath.py::TestTrigUfuncs::test_arccosh",
"pint/testsuite/test_umath.py::TestTrigUfuncs::test_arcsin",
"pint/testsuite/test_umath.py::TestTrigUfuncs::test_arcsinh",
"pint/testsuite/test_umath.py::TestTrigUfuncs::test_arctan",
"pint/testsuite/test_umath.py::TestTrigUfuncs::test_arctan2",
"pint/testsuite/test_umath.py::TestTrigUfuncs::test_arctanh",
"pint/testsuite/test_umath.py::TestTrigUfuncs::test_cos",
"pint/testsuite/test_umath.py::TestTrigUfuncs::test_cosh",
"pint/testsuite/test_umath.py::TestTrigUfuncs::test_deg2rad",
"pint/testsuite/test_umath.py::TestTrigUfuncs::test_hypot",
"pint/testsuite/test_umath.py::TestTrigUfuncs::test_rad2deg",
"pint/testsuite/test_umath.py::TestTrigUfuncs::test_sin",
"pint/testsuite/test_umath.py::TestTrigUfuncs::test_sinh",
"pint/testsuite/test_umath.py::TestTrigUfuncs::test_tan",
"pint/testsuite/test_umath.py::TestTrigUfuncs::test_tanh",
"pint/testsuite/test_umath.py::TestComparisonUfuncs::test_equal",
"pint/testsuite/test_umath.py::TestComparisonUfuncs::test_greater",
"pint/testsuite/test_umath.py::TestComparisonUfuncs::test_greater_equal",
"pint/testsuite/test_umath.py::TestComparisonUfuncs::test_less",
"pint/testsuite/test_umath.py::TestComparisonUfuncs::test_less_equal",
"pint/testsuite/test_umath.py::TestComparisonUfuncs::test_not_equal",
"pint/testsuite/test_umath.py::TestFloatingUfuncs::test_ceil",
"pint/testsuite/test_umath.py::TestFloatingUfuncs::test_copysign",
"pint/testsuite/test_umath.py::TestFloatingUfuncs::test_floor",
"pint/testsuite/test_umath.py::TestFloatingUfuncs::test_fmod",
"pint/testsuite/test_umath.py::TestFloatingUfuncs::test_frexp",
"pint/testsuite/test_umath.py::TestFloatingUfuncs::test_iscomplex",
"pint/testsuite/test_umath.py::TestFloatingUfuncs::test_isfinite",
"pint/testsuite/test_umath.py::TestFloatingUfuncs::test_isinf",
"pint/testsuite/test_umath.py::TestFloatingUfuncs::test_isnan",
"pint/testsuite/test_umath.py::TestFloatingUfuncs::test_ldexp",
"pint/testsuite/test_umath.py::TestFloatingUfuncs::test_modf",
"pint/testsuite/test_umath.py::TestFloatingUfuncs::test_nextafter",
"pint/testsuite/test_umath.py::TestFloatingUfuncs::test_signbit",
"pint/testsuite/test_umath.py::TestFloatingUfuncs::test_trunc"
]
| []
| BSD | 2,404 | [
".travis.yml",
"pint/quantity.py"
]
| [
".travis.yml",
"pint/quantity.py"
]
|
|
TheFriendlyCoder__friendlypins-17 | 51374c346ee260e0825f4d83f1bd95469c17d0ae | 2018-04-14 19:13:38 | 51374c346ee260e0825f4d83f1bd95469c17d0ae | diff --git a/src/friendlypins/board.py b/src/friendlypins/board.py
index 7cfd3c2..f541582 100644
--- a/src/friendlypins/board.py
+++ b/src/friendlypins/board.py
@@ -82,7 +82,7 @@ class Board(object):
assert 'data' in raw
for cur_item in raw['data']:
- retval.append(Pin(cur_item))
+ retval.append(Pin(cur_item, self._root_url, self._token))
self._log.debug("Raw keys are %s", raw.keys())
self._log.debug("Paged info is %s", raw['page'])
diff --git a/src/friendlypins/pin.py b/src/friendlypins/pin.py
index 9c89f66..9394aaa 100644
--- a/src/friendlypins/pin.py
+++ b/src/friendlypins/pin.py
@@ -1,15 +1,22 @@
"""Primitives for operating on Pinterest pins"""
import logging
import json
+import requests
+from friendlypins.headers import Headers
class Pin(object):
"""Abstraction around a Pinterest pin
- :param dict data: Raw Pinterest API data describing a pin"""
+ :param dict data: Raw Pinterest API data describing a pin
+ :param str root_url: URL of the Pinterest REST API
+ :param str token: Authentication token for interacting with the API
+ """
- def __init__(self, data):
+ def __init__(self, data, root_url, token):
self._log = logging.getLogger(__name__)
+ self._root_url = root_url
+ self._token = token
self._data = data
def __str__(self):
@@ -69,5 +76,19 @@ class Pin(object):
return self._data['media']['type']
+ def delete(self):
+ """Removes this pin from it's respective board"""
+ temp_url = '{0}/pins/{1}/'.format(
+ self._root_url,
+ self.unique_id)
+ temp_url += "?access_token={0}".format(self._token)
+
+ response = requests.delete(temp_url)
+
+ header = Headers(response.headers)
+ self._log.debug("Boards query response header %s", header)
+
+ response.raise_for_status()
+
if __name__ == "__main__":
pass
| Add method for deleting pins
To facilitate deletion of pins, we need to add a new `delete()` method to the Pins class to serve this purpose. | TheFriendlyCoder/friendlypins | diff --git a/unit_tests/test_api.py b/unit_tests/test_api.py
index 8a0cc08..0a6e5c1 100644
--- a/unit_tests/test_api.py
+++ b/unit_tests/test_api.py
@@ -27,6 +27,8 @@ def test_get_user():
assert expected_lastname == result.last_name
assert expected_id == result.unique_id
+ mock_response.raise_for_status.assert_called_once()
+
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
diff --git a/unit_tests/test_board.py b/unit_tests/test_board.py
index eac8bbc..41eef02 100644
--- a/unit_tests/test_board.py
+++ b/unit_tests/test_board.py
@@ -56,5 +56,7 @@ def test_get_all_pins():
assert expected_note == result[0].note
assert expected_id == result[0].unique_id
assert expected_mediatype == result[0].media_type
+
+ mock_response.raise_for_status.assert_called_once()
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
diff --git a/unit_tests/test_pin.py b/unit_tests/test_pin.py
index f18318b..1bbc348 100644
--- a/unit_tests/test_pin.py
+++ b/unit_tests/test_pin.py
@@ -18,7 +18,7 @@ def test_pin_properties():
}
}
- obj = Pin(sample_data)
+ obj = Pin(sample_data, "http://www.pinterest.com", "1234abcd")
assert obj.unique_id == expected_id
assert obj.note == expected_note
@@ -31,7 +31,6 @@ def test_pin_missing_media_type():
expected_note = "Here's my note"
expected_url = "https://www.pinterest.ca/MyName/MyPin/"
expected_link = "http://www.google.ca"
- expected_media_type = "video"
sample_data = {
"id": str(expected_id),
"note": expected_note,
@@ -39,7 +38,7 @@ def test_pin_missing_media_type():
"url": expected_url,
}
- obj = Pin(sample_data)
+ obj = Pin(sample_data, "http://www.pinterest.com", "1234abcd")
assert obj.unique_id == expected_id
assert obj.note == expected_note
@@ -47,5 +46,22 @@ def test_pin_missing_media_type():
assert obj.link == expected_link
assert obj.media_type is None
+def test_delete():
+ api_url = "https://pinterest_url/v1"
+ token = "1234abcd"
+
+ data = {
+ "id": "12345678"
+ }
+
+ obj = Pin(data, api_url, token)
+
+ with mock.patch("friendlypins.pin.requests") as mock_requests:
+ mock_response = mock.MagicMock()
+ mock_requests.delete.return_value = mock_response
+
+ obj.delete()
+ mock_requests.delete.assert_called_once()
+ mock_response.raise_for_status.assert_called_once()
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
\ No newline at end of file
diff --git a/unit_tests/test_user.py b/unit_tests/test_user.py
index 04c0809..596f239 100644
--- a/unit_tests/test_user.py
+++ b/unit_tests/test_user.py
@@ -30,5 +30,7 @@ def test_get_boards():
assert expected_name == result[0].name
assert expected_id == result[0].unique_id
+ mock_response.raise_for_status.assert_called_once()
+
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
astroid==2.6.6
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
Babel==2.11.0
bleach==4.1.0
cachetools==4.2.4
certifi==2021.5.30
chardet==5.0.0
charset-normalizer==2.0.12
colorama==0.4.5
coverage==6.2
dateutils==0.6.12
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
-e git+https://github.com/TheFriendlyCoder/friendlypins.git@51374c346ee260e0825f4d83f1bd95469c17d0ae#egg=friendlypins
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==5.10.1
Jinja2==3.0.3
lazy-object-proxy==1.7.1
mando==0.7.1
MarkupSafe==2.0.1
mccabe==0.6.1
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pkginfo==1.10.0
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
Pygments==2.14.0
pylint==3.0.0a4
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
radon==6.0.1
readme-renderer==34.0
requests==2.27.1
requests-toolbelt==1.0.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==4.0.0a9
tqdm==4.64.1
twine==1.15.0
typed-ast==1.4.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
virtualenv==20.17.1
webencodings==0.5.1
wrapt==1.12.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: friendlypins
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- astroid==2.6.6
- babel==2.11.0
- bleach==4.1.0
- cachetools==4.2.4
- chardet==5.0.0
- charset-normalizer==2.0.12
- colorama==0.4.5
- coverage==6.2
- dateutils==0.6.12
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- isort==5.10.1
- jinja2==3.0.3
- lazy-object-proxy==1.7.1
- mando==0.7.1
- markupsafe==2.0.1
- mccabe==0.6.1
- mock==5.2.0
- pkginfo==1.10.0
- platformdirs==2.4.0
- pygments==2.14.0
- pylint==3.0.0a4
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- radon==6.0.1
- readme-renderer==34.0
- requests==2.27.1
- requests-toolbelt==1.0.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- tox==4.0.0a9
- tqdm==4.64.1
- twine==1.15.0
- typed-ast==1.4.3
- urllib3==1.26.20
- virtualenv==20.17.1
- webencodings==0.5.1
- wrapt==1.12.1
prefix: /opt/conda/envs/friendlypins
| [
"unit_tests/test_pin.py::test_pin_properties",
"unit_tests/test_pin.py::test_pin_missing_media_type",
"unit_tests/test_pin.py::test_delete"
]
| []
| [
"unit_tests/test_api.py::test_get_user",
"unit_tests/test_board.py::test_board_properties",
"unit_tests/test_board.py::test_get_all_pins",
"unit_tests/test_user.py::test_get_boards"
]
| []
| Apache License 2.0 | 2,405 | [
"src/friendlypins/board.py",
"src/friendlypins/pin.py"
]
| [
"src/friendlypins/board.py",
"src/friendlypins/pin.py"
]
|
|
TheFriendlyCoder__friendlypins-25 | 0517a65d8d98dd15c1de3eca4d804d9b21744aeb | 2018-04-15 01:19:59 | 0517a65d8d98dd15c1de3eca4d804d9b21744aeb | diff --git a/src/friendlypins/api.py b/src/friendlypins/api.py
index 01b3d4a..ee3b00a 100644
--- a/src/friendlypins/api.py
+++ b/src/friendlypins/api.py
@@ -37,6 +37,7 @@ class API(object): # pylint: disable=too-few-public-methods
else:
temp_url = "{0}/me".format(self._root_url)
temp_url += "?access_token={0}".format(self._token)
+ temp_url += "&fields=id,username,first_name,last_name,bio,created_at,counts,image"
response = requests.get(temp_url)
response.raise_for_status()
diff --git a/src/friendlypins/board.py b/src/friendlypins/board.py
index 3a19248..1fea44c 100644
--- a/src/friendlypins/board.py
+++ b/src/friendlypins/board.py
@@ -58,6 +58,14 @@ class Board(object):
"""
return self._data['url']
+ @property
+ def num_pins(self):
+ """Gets the total number of pins linked to this board
+
+ :rtype: :class:`int`
+ """
+ return int(self._data['counts']['pins'])
+
@property
def all_pins(self):
"""Gets a list of all pins from this board
diff --git a/src/friendlypins/user.py b/src/friendlypins/user.py
index 78268e5..3121d1d 100644
--- a/src/friendlypins/user.py
+++ b/src/friendlypins/user.py
@@ -79,6 +79,22 @@ class User(object):
"""
return self._data['url']
+ @property
+ def num_pins(self):
+ """Gets the total number of pins owned by this user
+
+ :rtype: :class:`int`
+ """
+ return int(self._data['counts']['pins'])
+
+ @property
+ def num_boards(self):
+ """Gets the total number of boards owned by this user
+
+ :rtype: :class:`int`
+ """
+ return int(self._data['counts']['boards'])
+
@property
def boards(self):
"""Gets a list of boards owned by this user
@@ -89,6 +105,7 @@ class User(object):
temp_url = '{0}/me/boards/'.format(self._root_url)
temp_url += "?access_token={0}".format(self._token)
+ temp_url += "&fields=id,name,url,description,creator,created_at,counts,image"
response = requests.get(temp_url)
header = Headers(response.headers)
| Add accessor to get number of pins on a board
It appears as though there is some optional metadata that can be read from a board API endpoint, one of which contains the total number of pins on that board. We should enhance the Board class to pull down that extra metadata upon instantiation, and provide an accessor that reads the number of pins on the board.
This has the added benefit of not having to read in all the pins contained in the board en-mass to calculate this simple statistic. | TheFriendlyCoder/friendlypins | diff --git a/unit_tests/test_board.py b/unit_tests/test_board.py
index fe44dc9..e2eafe1 100644
--- a/unit_tests/test_board.py
+++ b/unit_tests/test_board.py
@@ -6,16 +6,21 @@ def test_board_properties():
expected_id = 1234
expected_name = "MyBoard"
expected_url = "https://www.pinterest.ca/MyName/MyBoard/"
+ expected_pin_count = 42
sample_data = {
"id": str(expected_id),
"name": expected_name,
- "url": expected_url
+ "url": expected_url,
+ "counts": {
+ "pins": str(expected_pin_count)
+ }
}
obj = Board(sample_data, 'http://pinterest_url', '1234abcd')
assert obj.unique_id == expected_id
assert obj.name == expected_name
assert obj.url == expected_url
+ assert obj.num_pins == expected_pin_count
def test_get_all_pins():
data = {
diff --git a/unit_tests/test_user.py b/unit_tests/test_user.py
index 19e7a8a..5850b5b 100644
--- a/unit_tests/test_user.py
+++ b/unit_tests/test_user.py
@@ -2,6 +2,33 @@ import pytest
import mock
from friendlypins.user import User
+def test_user_properties():
+ expected_url = 'https://www.pinterest.com/MyUserName/'
+ expected_firstname = "John"
+ expected_lastname = "Doe"
+ expected_id = 12345678
+ expected_board_count = 32
+ expected_pin_count = 512
+ data = {
+ 'url': expected_url,
+ 'first_name': expected_firstname,
+ 'last_name': expected_lastname,
+ 'id': str(expected_id),
+ 'counts': {
+ 'boards': str(expected_board_count),
+ 'pins': str(expected_pin_count)
+ }
+ }
+
+ obj = User(data, "https://pinterest_url/v1", "1234abcd")
+ assert expected_url == obj.url
+ assert expected_firstname == obj.first_name
+ assert expected_lastname == obj.last_name
+ assert expected_id == obj.unique_id
+ assert expected_board_count == obj.num_boards
+ assert expected_pin_count == obj.num_pins
+
+
def test_get_boards():
data = {
"first_name": "John",
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
astroid==2.6.6
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
Babel==2.11.0
bleach==4.1.0
cachetools==4.2.4
certifi==2021.5.30
chardet==5.0.0
charset-normalizer==2.0.12
colorama==0.4.5
coverage==6.2
dateutils==0.6.12
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
-e git+https://github.com/TheFriendlyCoder/friendlypins.git@0517a65d8d98dd15c1de3eca4d804d9b21744aeb#egg=friendlypins
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==5.10.1
Jinja2==3.0.3
lazy-object-proxy==1.7.1
mando==0.7.1
MarkupSafe==2.0.1
mccabe==0.6.1
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pkginfo==1.10.0
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
Pygments==2.14.0
pylint==3.0.0a4
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
radon==6.0.1
readme-renderer==34.0
requests==2.27.1
requests-toolbelt==1.0.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==4.0.0a9
tqdm==4.64.1
twine==1.15.0
typed-ast==1.4.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
virtualenv==20.17.1
webencodings==0.5.1
wrapt==1.12.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: friendlypins
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- astroid==2.6.6
- babel==2.11.0
- bleach==4.1.0
- cachetools==4.2.4
- chardet==5.0.0
- charset-normalizer==2.0.12
- colorama==0.4.5
- coverage==6.2
- dateutils==0.6.12
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- isort==5.10.1
- jinja2==3.0.3
- lazy-object-proxy==1.7.1
- mando==0.7.1
- markupsafe==2.0.1
- mccabe==0.6.1
- mock==5.2.0
- pkginfo==1.10.0
- platformdirs==2.4.0
- pygments==2.14.0
- pylint==3.0.0a4
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- radon==6.0.1
- readme-renderer==34.0
- requests==2.27.1
- requests-toolbelt==1.0.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- tox==4.0.0a9
- tqdm==4.64.1
- twine==1.15.0
- typed-ast==1.4.3
- urllib3==1.26.20
- virtualenv==20.17.1
- webencodings==0.5.1
- wrapt==1.12.1
prefix: /opt/conda/envs/friendlypins
| [
"unit_tests/test_board.py::test_board_properties",
"unit_tests/test_user.py::test_user_properties"
]
| []
| [
"unit_tests/test_board.py::test_get_all_pins",
"unit_tests/test_user.py::test_get_boards"
]
| []
| Apache License 2.0 | 2,406 | [
"src/friendlypins/board.py",
"src/friendlypins/user.py",
"src/friendlypins/api.py"
]
| [
"src/friendlypins/board.py",
"src/friendlypins/user.py",
"src/friendlypins/api.py"
]
|
|
TheFriendlyCoder__friendlypins-27 | bb21be18fb37d054ecc015d31912e3bfc249fd0c | 2018-04-15 01:59:25 | bb21be18fb37d054ecc015d31912e3bfc249fd0c | diff --git a/src/friendlypins/scripts/fpins.py b/src/friendlypins/scripts/fpins.py
index 0835d2d..2fd8974 100644
--- a/src/friendlypins/scripts/fpins.py
+++ b/src/friendlypins/scripts/fpins.py
@@ -13,7 +13,7 @@ def _download_thumbnails(args):
:returns: zero on success, non-zero on failure
:rtype: :class:`int`
"""
- return download_thumbnails(args.token, args.board, args.path)
+ return download_thumbnails(args.token, args.board, args.path, args.delete)
def get_args(args):
@@ -59,6 +59,11 @@ def get_args(args):
required=True,
help="Path to the folder where thumbnails are to be downloaded",
)
+ thumbnails_cmd.add_argument(
+ '--delete', '-d',
+ action="store_true",
+ help="Deletes each pin as it's thumbnail is downloaded"
+ )
# If we've been given debugging arguments, convert them to the
# format argparse expects
diff --git a/src/friendlypins/utils/console_actions.py b/src/friendlypins/utils/console_actions.py
index e6208fd..84c63b7 100644
--- a/src/friendlypins/utils/console_actions.py
+++ b/src/friendlypins/utils/console_actions.py
@@ -38,12 +38,13 @@ def _download_pin(pin, folder):
return 2
return 0
-def download_thumbnails(api_token, board_name, output_folder):
+def download_thumbnails(api_token, board_name, output_folder, delete):
"""Downloads thumbnails of all pins on a board
:param str api_token: Authentication token for accessing the Pinterest API
:param str board_name: name of the board containing the pins to process
:param str output_folder: path where the thumbnails are to be downloaded
+ :param bool delete: flag to delete pins as their thumbnails are downloaded
:returns:
status code describing the result of the action
zero on success, non-zero on failure
@@ -71,6 +72,9 @@ def download_thumbnails(api_token, board_name, output_folder):
retval = _download_pin(cur_pin, output_folder)
if retval:
return retval
+ if delete:
+ cur_pin.delete()
+
return 0
if __name__ == "__main__":
| Add option to delete pins post download
We should add an option to the `fpins dt` command to delete pins once their thumbnails have been successfully downloaded. I'm thinking adding support for an optional '--delete' parameter should do the trick. | TheFriendlyCoder/friendlypins | diff --git a/unit_tests/test_console_actions.py b/unit_tests/test_console_actions.py
index 0d5e174..d92a3e9 100644
--- a/unit_tests/test_console_actions.py
+++ b/unit_tests/test_console_actions.py
@@ -74,7 +74,7 @@ def test_download_thumbnails(api_requests, user_requests, board_requests, action
mock_os.path.exists.return_value = False
# Flex our code
- result = download_thumbnails("1234abcd", expected_board_name, "/tmp")
+ result = download_thumbnails("1234abcd", expected_board_name, "/tmp", False)
# Make sure the call was successful, and that our mock APIs
# that must have executed as part of the process were called
@@ -85,6 +85,91 @@ def test_download_thumbnails(api_requests, user_requests, board_requests, action
mock_open.assert_called()
[email protected]("friendlypins.utils.console_actions.os")
[email protected]("friendlypins.utils.console_actions.open")
[email protected]("friendlypins.utils.console_actions.requests")
[email protected]("friendlypins.board.requests")
[email protected]("friendlypins.user.requests")
[email protected]("friendlypins.api.requests")
[email protected]("friendlypins.pin.requests")
+def test_download_thumbnails_with_delete(pin_requests, api_requests, user_requests, board_requests, action_requests, mock_open, mock_os):
+
+ # Fake user data for the user authenticating to Pinterest
+ expected_user_data = {
+ 'data': {
+ 'url': 'https://www.pinterest.com/MyUserName/',
+ 'first_name': "John",
+ 'last_name': "Doe",
+ 'id': "12345678"
+ }
+ }
+
+ # Fake board data for the boards owned by the fake authenticated user
+ expected_board_name = "MyBoard"
+ expected_board_data = {
+ "data": [{
+ "id": "6789",
+ "name": expected_board_name,
+ "url": "https://www.pinterest.ca/MyName/MyBoard/"
+ }]
+ }
+
+ # Fake pin data for the fake board, with fake thumbnail metadata
+ expected_thumbnail_url = "https://i.pinimg.com/originals/1/2/3/abcd.jpg"
+ expected_pin_data = {
+ "data": [{
+ "id": "1234",
+ "url": "https://www.pinterest.ca/MyName/MyPin/",
+ "note": "My Pin descriptive text",
+ "link": "http://www.mysite.com/target",
+ "media": {
+ "type": "image"
+ },
+ "image": {
+ "original": {
+ "url": expected_thumbnail_url,
+ "width": "800",
+ "height": "600"
+ }
+ }
+ }],
+ "page": {
+ "cursor": None
+ }
+ }
+
+ # fake our Pinterest API data to flex our implementation logic
+ mock_user_response = mock.MagicMock()
+ mock_user_response.json.return_value = expected_user_data
+ api_requests.get.return_value = mock_user_response
+
+ mock_board_response = mock.MagicMock()
+ mock_board_response.json.return_value = expected_board_data
+ user_requests.get.return_value = mock_board_response
+
+ mock_pin_response = mock.MagicMock()
+ mock_pin_response.json.return_value = expected_pin_data
+ board_requests.get.return_value = mock_pin_response
+
+ mock_delete_response = mock.MagicMock()
+ pin_requests.delete.return_value = mock_delete_response
+
+ # Make sure the code think's the output file where the
+ # thumbnail is to be downloaded doesn't already exist
+ mock_os.path.exists.return_value = False
+
+ # Flex our code
+ result = download_thumbnails("1234abcd", expected_board_name, "/tmp", True)
+
+ # Make sure the call was successful, and that our mock APIs
+ # that must have executed as part of the process were called
+ assert result == 0
+ action_requests.get.assert_called_once_with(expected_thumbnail_url, stream=True)
+ mock_os.makedirs.assert_called()
+ mock_os.path.exists.assert_called()
+ mock_open.assert_called()
+ pin_requests.delete.assert_called_once()
+
@mock.patch("friendlypins.utils.console_actions.os")
@mock.patch("friendlypins.utils.console_actions.open")
@mock.patch("friendlypins.utils.console_actions.requests")
@@ -160,7 +245,7 @@ def test_download_thumbnails_error(api_requests, user_requests, board_requests,
action_requests.get.return_value = mock_action_response
# Flex our code
- result = download_thumbnails("1234abcd", expected_board_name, "/tmp")
+ result = download_thumbnails("1234abcd", expected_board_name, "/tmp", False)
# Make sure the call was successful, and that our mock APIs
# that must have executed as part of the process were called
@@ -239,7 +324,7 @@ def test_download_thumbnails_missing_board(api_requests, user_requests, board_re
mock_os.path.exists.return_value = False
# Flex our code
- result = download_thumbnails("1234abcd", "FuBar", "/tmp")
+ result = download_thumbnails("1234abcd", "FuBar", "/tmp", False)
# Make sure the call was successful, and that our mock APIs
# that must have executed as part of the process were called
@@ -323,7 +408,7 @@ def test_download_thumbnails_exists(api_requests, user_requests, board_requests,
# Flex our code
output_folder = "/tmp"
- result = download_thumbnails("1234abcd", expected_board_name, output_folder)
+ result = download_thumbnails("1234abcd", expected_board_name, output_folder, False)
# Make sure the call was successful, and that our mock APIs
# that must have executed as part of the process were called
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requests six dateutils wheel twine pytest pytest-cov mock radon pylint sphinx tox",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster @ file:///home/ktietz/src/ci/alabaster_1611921544520/work
appdirs==1.4.4
astroid @ file:///tmp/build/80754af9/astroid_1628055155277/work
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
Babel @ file:///tmp/build/80754af9/babel_1620871417480/work
bleach @ file:///opt/conda/conda-bld/bleach_1641577558959/work
brotlipy==0.7.0
certifi==2021.5.30
cffi @ file:///tmp/build/80754af9/cffi_1625814693874/work
charset-normalizer @ file:///tmp/build/80754af9/charset-normalizer_1630003229654/work
cmarkgfm @ file:///tmp/build/80754af9/cmarkgfm_1613500901851/work
colorama @ file:///tmp/build/80754af9/colorama_1607707115595/work
coverage @ file:///tmp/build/80754af9/coverage_1614614208500/work
cryptography @ file:///tmp/build/80754af9/cryptography_1635366128178/work
dateutils @ file:///home/conda/feedstock_root/build_artifacts/dateutils_1603450192521/work
distlib @ file:///tmp/build/80754af9/distlib_1622544193819/work
docutils @ file:///tmp/build/80754af9/docutils_1620827982266/work
filelock @ file:///tmp/build/80754af9/filelock_1638521398314/work
flake8 @ file:///opt/conda/conda-bld/flake8_1648129545443/work
flake8-polyfill==1.0.2
-e git+https://github.com/TheFriendlyCoder/friendlypins.git@bb21be18fb37d054ecc015d31912e3bfc249fd0c#egg=friendlypins
future==0.18.2
idna @ file:///tmp/build/80754af9/idna_1637925883363/work
imagesize @ file:///tmp/build/80754af9/imagesize_1637939814114/work
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
importlib-resources @ file:///tmp/build/80754af9/importlib_resources_1625135880749/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort @ file:///tmp/build/80754af9/isort_1628603791788/work
jeepney @ file:///tmp/build/80754af9/jeepney_1627537048313/work
Jinja2 @ file:///opt/conda/conda-bld/jinja2_1647436528585/work
keyring @ file:///tmp/build/80754af9/keyring_1629312932578/work
lazy-object-proxy @ file:///tmp/build/80754af9/lazy-object-proxy_1616526919073/work
mando==0.6.4
MarkupSafe @ file:///tmp/build/80754af9/markupsafe_1621528150516/work
mccabe==0.6.1
mock @ file:///tmp/build/80754af9/mock_1607622725907/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pkginfo @ file:///tmp/build/80754af9/pkginfo_1643162084911/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycodestyle @ file:///tmp/build/80754af9/pycodestyle_1636635402688/work
pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work
pyflakes @ file:///tmp/build/80754af9/pyflakes_1636644436481/work
Pygments @ file:///opt/conda/conda-bld/pygments_1644249106324/work
pylint @ file:///tmp/build/80754af9/pylint_1627536508692/work
pyOpenSSL @ file:///opt/conda/conda-bld/pyopenssl_1643788558760/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
PySocks @ file:///tmp/build/80754af9/pysocks_1605305763431/work
pytest==6.2.4
pytest-cov @ file:///tmp/build/80754af9/pytest-cov_1637159997573/work
python-dateutil @ file:///tmp/build/80754af9/python-dateutil_1626374649649/work
pytz==2021.3
radon @ file:///tmp/build/80754af9/radon_1620736734533/work
readme-renderer @ file:///tmp/build/80754af9/readme_renderer_1613501421530/work
requests @ file:///opt/conda/conda-bld/requests_1641824580448/work
requests-toolbelt @ file:///Users/ktietz/demo/mc3/conda-bld/requests-toolbelt_1629456163440/work
rfc3986 @ file:///Users/ktietz/demo/mc3/conda-bld/rfc3986_1629478296451/work
SecretStorage @ file:///tmp/build/80754af9/secretstorage_1614022884787/work
six @ file:///tmp/build/80754af9/six_1644875935023/work
snowballstemmer @ file:///tmp/build/80754af9/snowballstemmer_1637937080595/work
Sphinx @ file:///opt/conda/conda-bld/sphinx_1643644169832/work
sphinxcontrib-applehelp @ file:///home/ktietz/src/ci/sphinxcontrib-applehelp_1611920841464/work
sphinxcontrib-devhelp @ file:///home/ktietz/src/ci/sphinxcontrib-devhelp_1611920923094/work
sphinxcontrib-htmlhelp @ file:///tmp/build/80754af9/sphinxcontrib-htmlhelp_1623945626792/work
sphinxcontrib-jsmath @ file:///home/ktietz/src/ci/sphinxcontrib-jsmath_1611920942228/work
sphinxcontrib-qthelp @ file:///home/ktietz/src/ci/sphinxcontrib-qthelp_1611921055322/work
sphinxcontrib-serializinghtml @ file:///tmp/build/80754af9/sphinxcontrib-serializinghtml_1624451540180/work
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tox @ file:///home/conda/feedstock_root/build_artifacts/tox_1656596559115/work
tqdm @ file:///opt/conda/conda-bld/tqdm_1647339053476/work
twine==1.15.0
typed-ast @ file:///tmp/build/80754af9/typed-ast_1624953671446/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3 @ file:///opt/conda/conda-bld/urllib3_1643638302206/work
virtualenv @ file:///tmp/build/80754af9/virtualenv_1620973272978/work
webencodings==0.5.1
wrapt==1.12.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: friendlypins
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- alabaster=0.7.12=pyhd3eb1b0_0
- appdirs=1.4.4=pyhd3eb1b0_0
- astroid=2.6.6=py36h06a4308_0
- attrs=21.4.0=pyhd3eb1b0_0
- babel=2.9.1=pyhd3eb1b0_0
- bleach=4.1.0=pyhd3eb1b0_0
- brotlipy=0.7.0=py36h27cfd23_1003
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- cffi=1.14.6=py36h400218f_0
- charset-normalizer=2.0.4=pyhd3eb1b0_0
- cmarkgfm=0.4.2=py36h27cfd23_0
- colorama=0.4.4=pyhd3eb1b0_0
- coverage=5.5=py36h27cfd23_2
- cryptography=35.0.0=py36hd23ed53_0
- dateutils=0.6.12=py_0
- dbus=1.13.18=hb2f20db_0
- distlib=0.3.2=pyhd3eb1b0_0
- docutils=0.17.1=py36h06a4308_1
- expat=2.6.4=h6a678d5_0
- filelock=3.4.0=pyhd3eb1b0_0
- flake8=4.0.1=pyhd3eb1b0_1
- flake8-polyfill=1.0.2=pyhd3eb1b0_1
- future=0.18.2=py36_1
- glib=2.69.1=h4ff587b_1
- idna=3.3=pyhd3eb1b0_0
- imagesize=1.3.0=pyhd3eb1b0_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- importlib_resources=5.2.0=pyhd3eb1b0_1
- iniconfig=1.1.1=pyhd3eb1b0_0
- isort=5.9.3=pyhd3eb1b0_0
- jeepney=0.7.1=pyhd3eb1b0_0
- jinja2=3.0.3=pyhd3eb1b0_0
- keyring=23.1.0=py36h06a4308_0
- lazy-object-proxy=1.6.0=py36h27cfd23_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- markupsafe=2.0.1=py36h27cfd23_0
- mccabe=0.6.1=py36_1
- mock=4.0.3=pyhd3eb1b0_0
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pcre=8.45=h295c915_0
- pip=21.2.2=py36h06a4308_0
- pkginfo=1.8.2=pyhd3eb1b0_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pycodestyle=2.8.0=pyhd3eb1b0_0
- pycparser=2.21=pyhd3eb1b0_0
- pyflakes=2.4.0=pyhd3eb1b0_0
- pygments=2.11.2=pyhd3eb1b0_0
- pylint=2.9.6=py36h06a4308_1
- pyopenssl=22.0.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pysocks=1.7.1=py36h06a4308_0
- pytest=6.2.4=py36h06a4308_2
- pytest-cov=3.0.0=pyhd3eb1b0_0
- python=3.6.13=h12debd9_1
- python-dateutil=2.8.2=pyhd3eb1b0_0
- pytz=2021.3=pyhd3eb1b0_0
- radon=4.5.1=pyhd3eb1b0_0
- readline=8.2=h5eee18b_0
- readme_renderer=24.0=py36h06a4308_0
- requests=2.27.1=pyhd3eb1b0_0
- requests-toolbelt=0.9.1=pyhd3eb1b0_0
- rfc3986=1.4.0=pyhd3eb1b0_0
- secretstorage=3.3.1=py36h06a4308_0
- setuptools=58.0.4=py36h06a4308_0
- six=1.16.0=pyhd3eb1b0_1
- snowballstemmer=2.2.0=pyhd3eb1b0_0
- sphinx=4.4.0=pyhd3eb1b0_0
- sphinxcontrib-applehelp=1.0.2=pyhd3eb1b0_0
- sphinxcontrib-devhelp=1.0.2=pyhd3eb1b0_0
- sphinxcontrib-htmlhelp=2.0.0=pyhd3eb1b0_0
- sphinxcontrib-jsmath=1.0.1=pyhd3eb1b0_0
- sphinxcontrib-qthelp=1.0.3=pyhd3eb1b0_0
- sphinxcontrib-serializinghtml=1.1.5=pyhd3eb1b0_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- tox=3.25.1=pyhd8ed1ab_0
- tqdm=4.63.0=pyhd3eb1b0_0
- typed-ast=1.4.3=py36h7f8727e_1
- typing-extensions=4.1.1=hd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- urllib3=1.26.8=pyhd3eb1b0_0
- virtualenv=20.4.6=py36h06a4308_1
- webencodings=0.5.1=py36_1
- wheel=0.37.1=pyhd3eb1b0_0
- wrapt=1.12.1=py36h7b6447c_1
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- mando==0.6.4
- twine==1.15.0
prefix: /opt/conda/envs/friendlypins
| [
"unit_tests/test_console_actions.py::test_download_thumbnails",
"unit_tests/test_console_actions.py::test_download_thumbnails_with_delete",
"unit_tests/test_console_actions.py::test_download_thumbnails_error",
"unit_tests/test_console_actions.py::test_download_thumbnails_missing_board",
"unit_tests/test_console_actions.py::test_download_thumbnails_exists"
]
| []
| []
| []
| Apache License 2.0 | 2,407 | [
"src/friendlypins/scripts/fpins.py",
"src/friendlypins/utils/console_actions.py"
]
| [
"src/friendlypins/scripts/fpins.py",
"src/friendlypins/utils/console_actions.py"
]
|
|
TheFriendlyCoder__friendlypins-28 | bb21be18fb37d054ecc015d31912e3bfc249fd0c | 2018-04-15 02:00:08 | bb21be18fb37d054ecc015d31912e3bfc249fd0c | diff --git a/src/friendlypins/scripts/fpins.py b/src/friendlypins/scripts/fpins.py
index 0835d2d..26a279b 100644
--- a/src/friendlypins/scripts/fpins.py
+++ b/src/friendlypins/scripts/fpins.py
@@ -13,7 +13,7 @@ def _download_thumbnails(args):
:returns: zero on success, non-zero on failure
:rtype: :class:`int`
"""
- return download_thumbnails(args.token, args.board, args.path)
+ return download_thumbnails(args.token, args.board, args.path, args.delete)
def get_args(args):
@@ -59,6 +59,11 @@ def get_args(args):
required=True,
help="Path to the folder where thumbnails are to be downloaded",
)
+ thumbnails_cmd.add_argument(
+ '--delete', '-d',
+ action="store_true",
+ help="Deletes each pin as it's thumbnail is downloaded"
+ )
# If we've been given debugging arguments, convert them to the
# format argparse expects
@@ -83,6 +88,10 @@ def configure_logging(verbosity):
# Configure a console logger for everything that should show up
# on the shell to the user
console_handler = logging.StreamHandler(sys.stdout)
+ if verbosity == 0:
+ console_handler.setLevel(logging.INFO)
+ else:
+ console_handler.setLevel(logging.DEBUG)
# Configure a file logger for all verbose output to be streamed
# to regardless of the source
@@ -98,17 +107,10 @@ def configure_logging(verbosity):
global_log.addHandler(file_handler)
- # Next, create our application logger here, and configure
- # it's verbosity based on user input
- log = logging.getLogger('friendlypins')
- if verbosity == 0:
- log.setLevel(logging.INFO)
- else:
- log.setLevel(logging.DEBUG)
-
# Make sure we hook our console loggers to the appropriate logger
# based on the level of verbosity the user has requested
if verbosity < 2:
+ log = logging.getLogger('friendlypins')
log.addHandler(console_handler)
else:
global_log.addHandler(console_handler)
@@ -134,7 +136,7 @@ def main(args=None):
if retval == 0:
log.info("Operation completed successfully!")
return retval
- except: # pylint: disable=bare-except
+ except Exception: # pylint: disable=broad-except
log.error("Critical error processing command")
log.error("See verbose output for details")
log.debug("Details: ", exc_info=True)
diff --git a/src/friendlypins/utils/console_actions.py b/src/friendlypins/utils/console_actions.py
index e6208fd..84c63b7 100644
--- a/src/friendlypins/utils/console_actions.py
+++ b/src/friendlypins/utils/console_actions.py
@@ -38,12 +38,13 @@ def _download_pin(pin, folder):
return 2
return 0
-def download_thumbnails(api_token, board_name, output_folder):
+def download_thumbnails(api_token, board_name, output_folder, delete):
"""Downloads thumbnails of all pins on a board
:param str api_token: Authentication token for accessing the Pinterest API
:param str board_name: name of the board containing the pins to process
:param str output_folder: path where the thumbnails are to be downloaded
+ :param bool delete: flag to delete pins as their thumbnails are downloaded
:returns:
status code describing the result of the action
zero on success, non-zero on failure
@@ -71,6 +72,9 @@ def download_thumbnails(api_token, board_name, output_folder):
retval = _download_pin(cur_pin, output_folder)
if retval:
return retval
+ if delete:
+ cur_pin.delete()
+
return 0
if __name__ == "__main__":
| Fix bug with verbose logging
I just noticed that debug logs are not being redirected to the verbose log file. This needs to be fixed. | TheFriendlyCoder/friendlypins | diff --git a/unit_tests/test_console_actions.py b/unit_tests/test_console_actions.py
index 0d5e174..d92a3e9 100644
--- a/unit_tests/test_console_actions.py
+++ b/unit_tests/test_console_actions.py
@@ -74,7 +74,7 @@ def test_download_thumbnails(api_requests, user_requests, board_requests, action
mock_os.path.exists.return_value = False
# Flex our code
- result = download_thumbnails("1234abcd", expected_board_name, "/tmp")
+ result = download_thumbnails("1234abcd", expected_board_name, "/tmp", False)
# Make sure the call was successful, and that our mock APIs
# that must have executed as part of the process were called
@@ -85,6 +85,91 @@ def test_download_thumbnails(api_requests, user_requests, board_requests, action
mock_open.assert_called()
[email protected]("friendlypins.utils.console_actions.os")
[email protected]("friendlypins.utils.console_actions.open")
[email protected]("friendlypins.utils.console_actions.requests")
[email protected]("friendlypins.board.requests")
[email protected]("friendlypins.user.requests")
[email protected]("friendlypins.api.requests")
[email protected]("friendlypins.pin.requests")
+def test_download_thumbnails_with_delete(pin_requests, api_requests, user_requests, board_requests, action_requests, mock_open, mock_os):
+
+ # Fake user data for the user authenticating to Pinterest
+ expected_user_data = {
+ 'data': {
+ 'url': 'https://www.pinterest.com/MyUserName/',
+ 'first_name': "John",
+ 'last_name': "Doe",
+ 'id': "12345678"
+ }
+ }
+
+ # Fake board data for the boards owned by the fake authenticated user
+ expected_board_name = "MyBoard"
+ expected_board_data = {
+ "data": [{
+ "id": "6789",
+ "name": expected_board_name,
+ "url": "https://www.pinterest.ca/MyName/MyBoard/"
+ }]
+ }
+
+ # Fake pin data for the fake board, with fake thumbnail metadata
+ expected_thumbnail_url = "https://i.pinimg.com/originals/1/2/3/abcd.jpg"
+ expected_pin_data = {
+ "data": [{
+ "id": "1234",
+ "url": "https://www.pinterest.ca/MyName/MyPin/",
+ "note": "My Pin descriptive text",
+ "link": "http://www.mysite.com/target",
+ "media": {
+ "type": "image"
+ },
+ "image": {
+ "original": {
+ "url": expected_thumbnail_url,
+ "width": "800",
+ "height": "600"
+ }
+ }
+ }],
+ "page": {
+ "cursor": None
+ }
+ }
+
+ # fake our Pinterest API data to flex our implementation logic
+ mock_user_response = mock.MagicMock()
+ mock_user_response.json.return_value = expected_user_data
+ api_requests.get.return_value = mock_user_response
+
+ mock_board_response = mock.MagicMock()
+ mock_board_response.json.return_value = expected_board_data
+ user_requests.get.return_value = mock_board_response
+
+ mock_pin_response = mock.MagicMock()
+ mock_pin_response.json.return_value = expected_pin_data
+ board_requests.get.return_value = mock_pin_response
+
+ mock_delete_response = mock.MagicMock()
+ pin_requests.delete.return_value = mock_delete_response
+
+ # Make sure the code think's the output file where the
+ # thumbnail is to be downloaded doesn't already exist
+ mock_os.path.exists.return_value = False
+
+ # Flex our code
+ result = download_thumbnails("1234abcd", expected_board_name, "/tmp", True)
+
+ # Make sure the call was successful, and that our mock APIs
+ # that must have executed as part of the process were called
+ assert result == 0
+ action_requests.get.assert_called_once_with(expected_thumbnail_url, stream=True)
+ mock_os.makedirs.assert_called()
+ mock_os.path.exists.assert_called()
+ mock_open.assert_called()
+ pin_requests.delete.assert_called_once()
+
@mock.patch("friendlypins.utils.console_actions.os")
@mock.patch("friendlypins.utils.console_actions.open")
@mock.patch("friendlypins.utils.console_actions.requests")
@@ -160,7 +245,7 @@ def test_download_thumbnails_error(api_requests, user_requests, board_requests,
action_requests.get.return_value = mock_action_response
# Flex our code
- result = download_thumbnails("1234abcd", expected_board_name, "/tmp")
+ result = download_thumbnails("1234abcd", expected_board_name, "/tmp", False)
# Make sure the call was successful, and that our mock APIs
# that must have executed as part of the process were called
@@ -239,7 +324,7 @@ def test_download_thumbnails_missing_board(api_requests, user_requests, board_re
mock_os.path.exists.return_value = False
# Flex our code
- result = download_thumbnails("1234abcd", "FuBar", "/tmp")
+ result = download_thumbnails("1234abcd", "FuBar", "/tmp", False)
# Make sure the call was successful, and that our mock APIs
# that must have executed as part of the process were called
@@ -323,7 +408,7 @@ def test_download_thumbnails_exists(api_requests, user_requests, board_requests,
# Flex our code
output_folder = "/tmp"
- result = download_thumbnails("1234abcd", expected_board_name, output_folder)
+ result = download_thumbnails("1234abcd", expected_board_name, output_folder, False)
# Make sure the call was successful, and that our mock APIs
# that must have executed as part of the process were called
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
astroid==2.6.6
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
Babel==2.11.0
bleach==4.1.0
cachetools==4.2.4
certifi==2021.5.30
chardet==5.0.0
charset-normalizer==2.0.12
colorama==0.4.5
coverage==6.2
dateutils==0.6.12
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
-e git+https://github.com/TheFriendlyCoder/friendlypins.git@bb21be18fb37d054ecc015d31912e3bfc249fd0c#egg=friendlypins
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==5.10.1
Jinja2==3.0.3
lazy-object-proxy==1.7.1
mando==0.7.1
MarkupSafe==2.0.1
mccabe==0.6.1
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pkginfo==1.10.0
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
Pygments==2.14.0
pylint==3.0.0a4
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
radon==6.0.1
readme-renderer==34.0
requests==2.27.1
requests-toolbelt==1.0.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==4.0.0a9
tqdm==4.64.1
twine==1.15.0
typed-ast==1.4.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
virtualenv==20.17.1
webencodings==0.5.1
wrapt==1.12.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: friendlypins
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- astroid==2.6.6
- babel==2.11.0
- bleach==4.1.0
- cachetools==4.2.4
- chardet==5.0.0
- charset-normalizer==2.0.12
- colorama==0.4.5
- coverage==6.2
- dateutils==0.6.12
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- isort==5.10.1
- jinja2==3.0.3
- lazy-object-proxy==1.7.1
- mando==0.7.1
- markupsafe==2.0.1
- mccabe==0.6.1
- mock==5.2.0
- pkginfo==1.10.0
- platformdirs==2.4.0
- pygments==2.14.0
- pylint==3.0.0a4
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- radon==6.0.1
- readme-renderer==34.0
- requests==2.27.1
- requests-toolbelt==1.0.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- tox==4.0.0a9
- tqdm==4.64.1
- twine==1.15.0
- typed-ast==1.4.3
- urllib3==1.26.20
- virtualenv==20.17.1
- webencodings==0.5.1
- wrapt==1.12.1
prefix: /opt/conda/envs/friendlypins
| [
"unit_tests/test_console_actions.py::test_download_thumbnails",
"unit_tests/test_console_actions.py::test_download_thumbnails_with_delete",
"unit_tests/test_console_actions.py::test_download_thumbnails_error",
"unit_tests/test_console_actions.py::test_download_thumbnails_missing_board",
"unit_tests/test_console_actions.py::test_download_thumbnails_exists"
]
| []
| []
| []
| Apache License 2.0 | 2,408 | [
"src/friendlypins/scripts/fpins.py",
"src/friendlypins/utils/console_actions.py"
]
| [
"src/friendlypins/scripts/fpins.py",
"src/friendlypins/utils/console_actions.py"
]
|
|
PlasmaPy__PlasmaPy-361 | b550058279e51ad7da88282e19283760adb9c9a2 | 2018-04-15 18:27:51 | b550058279e51ad7da88282e19283760adb9c9a2 | diff --git a/plasmapy/physics/parameters.py b/plasmapy/physics/parameters.py
index d2354c91..40745ed2 100644
--- a/plasmapy/physics/parameters.py
+++ b/plasmapy/physics/parameters.py
@@ -334,9 +334,11 @@ def ion_sound_speed(T_e,
@utils.check_relativistic
@utils.check_quantity({
- 'T': {'units': u.K, 'can_be_negative': False}
+ 'T': {'units': u.K, 'can_be_negative': False},
+ 'mass': {'units': u.kg, 'can_be_negative': False, 'can_be_nan': True}
})
-def thermal_speed(T, particle="e-", method="most_probable"):
[email protected]_input
+def thermal_speed(T, particle: atomic.Particle="e-", method="most_probable", mass=np.nan*u.kg):
r"""
Return the most probable speed for a particle within a Maxwellian
distribution.
@@ -356,6 +358,11 @@ def thermal_speed(T, particle="e-", method="most_probable"):
Method to be used for calculating the thermal speed. Options are
`'most_probable'` (default), `'rms'`, and `'mean_magnitude'`.
+ mass : ~astropy.units.Quantity
+ The particle's mass override. Defaults to NaN and if so, doesn't do
+ anything, but if set, overrides mass acquired from `particle`. Useful
+ with relative velocities of particles.
+
Returns
-------
V : ~astropy.units.Quantity
@@ -417,10 +424,7 @@ def thermal_speed(T, particle="e-", method="most_probable"):
T = T.to(u.K, equivalencies=u.temperature_energy())
- try:
- m = atomic.particle_mass(particle)
- except AtomicError:
- raise ValueError("Unable to find {particle} mass in thermal_speed")
+ m = mass if np.isfinite(mass) else atomic.particle_mass(particle)
# different methods, as per https://en.wikipedia.org/wiki/Thermal_velocity
if method == "most_probable":
diff --git a/plasmapy/physics/transport/collisions.py b/plasmapy/physics/transport/collisions.py
index b061e316..82868951 100644
--- a/plasmapy/physics/transport/collisions.py
+++ b/plasmapy/physics/transport/collisions.py
@@ -7,14 +7,13 @@
import warnings
# plasmapy modules
-import plasmapy.atomic as atomic
from plasmapy import utils
from plasmapy.utils.checks import (check_quantity,
_check_relativistic)
from plasmapy.constants import (c, m_e, k_B, e, eps0, pi, hbar)
-from plasmapy.atomic import (particle_mass, integer_charge)
-from plasmapy.physics.parameters import (Debye_length)
+from plasmapy import atomic
+from plasmapy.physics import parameters
from plasmapy.physics.quantum import (Wigner_Seitz_radius,
thermal_deBroglie_wavelength,
chemical_potential)
@@ -246,29 +245,16 @@ def _boilerPlate(T, particles, V):
"list or tuple containing representations of two "
f"charged particles. Got {particles} instead.")
- masses = np.zeros(2) * u.kg
- charges = np.zeros(2) * u.C
-
- for particle, i in zip(particles, range(2)):
-
- try:
- masses[i] = particle_mass(particles[i])
- except Exception:
- raise ValueError("Unable to find mass of particle: "
- f"{particles[i]}.")
- try:
- charges[i] = np.abs(e * integer_charge(particles[i]))
- if charges[i] is None:
- raise ValueError("Unable to find charge of particle: "
- f"{particles[i]}.")
- except Exception:
- raise ValueError("Unable to find charge of particle: "
- f"{particles[i]}.")
+ particles = [atomic.Particle(p) for p in particles]
+ masses = [p.mass for p in particles]
+ charges = [np.abs(p.charge) for p in particles]
+
# obtaining reduced mass of 2 particle collision system
- reduced_mass = masses[0] * masses[1] / (masses[0] + masses[1])
+ reduced_mass = atomic.reduced_mass(*particles)
+
# getting thermal velocity of system if no velocity is given
if np.isnan(V):
- V = np.sqrt(2 * k_B * T / reduced_mass).to(u.m / u.s)
+ V = parameters.thermal_speed(T, mass=reduced_mass)
_check_relativistic(V, 'V')
return T, masses, charges, reduced_mass, V
@@ -485,7 +471,7 @@ def impact_parameter(T,
raise ValueError("Must provide a z_mean for GMS-2, GMS-5, and "
"GMS-6 methods.")
# Debye length
- lambdaDe = Debye_length(T, n_e)
+ lambdaDe = parameters.Debye_length(T, n_e)
# deBroglie wavelength
lambdaBroglie = hbar / (2 * reduced_mass * V)
# distance of closest approach in 90 degree Coulomb collision
| Split up `_boilerPlate()` in `transport.py`
`_boilerPlate()` currently does a few different things like fetching particle data, calculating thermal velocity, tests/checks. Each piece of functionality should be split into its own function.
See #191 | PlasmaPy/PlasmaPy | diff --git a/plasmapy/physics/tests/test_distribution.py b/plasmapy/physics/tests/test_distribution.py
index f4c3f3ba..35e7ffb0 100644
--- a/plasmapy/physics/tests/test_distribution.py
+++ b/plasmapy/physics/tests/test_distribution.py
@@ -104,16 +104,6 @@ def test_std(self):
T_distri = (std**2 / k_B * m_e).to(u.K)
assert np.isclose(T_distri.value, self.T_e.value)
- def test_valErr(self):
- """
- Tests whether ValueError is raised when invalid particle name
- string is passed.
- """
- with pytest.raises(ValueError):
- Maxwellian_1D(1 * u.m / u.s,
- T=1 * u.K,
- particle='XXX')
-
def test_units_no_vTh(self):
"""
Tests distribution function with units, but not passing vTh.
@@ -813,17 +803,6 @@ def test_std(self):
T_distri = (std**2 / k_B * m_e).to(u.K)
assert np.isclose(T_distri.value, self.T_e.value)
- def test_valErr(self):
- """
- Tests whether ValueError is raised when invalid particle name
- string is passed.
- """
- with pytest.raises(ValueError):
- kappa_velocity_1D(1 * u.m / u.s,
- T=1 * u.K,
- kappa=self.kappa,
- particle='XXX')
-
def test_units_no_vTh(self):
"""
Tests distribution function with units, but not passing vTh.
diff --git a/plasmapy/physics/tests/test_parameters.py b/plasmapy/physics/tests/test_parameters.py
index c9a0e5f5..30ebb26a 100644
--- a/plasmapy/physics/tests/test_parameters.py
+++ b/plasmapy/physics/tests/test_parameters.py
@@ -305,7 +305,7 @@ def test_thermal_speed():
with pytest.raises(RelativityError):
thermal_speed(1e14 * u.K, particle='p')
- with pytest.raises(ValueError):
+ with pytest.raises(InvalidParticleError):
thermal_speed(T_i, particle='asdfasd')
with pytest.warns(u.UnitsWarning):
@@ -561,9 +561,6 @@ def test_gyroradius():
with pytest.raises(TypeError):
gyroradius(u.T, particle="p", Vperp=8 * u.m / u.s)
- with pytest.raises(ValueError):
- gyroradius(B, particle='asfdas', T_i=T_i)
-
with pytest.raises(ValueError):
gyroradius(B, particle='p', T_i=-1 * u.K)
diff --git a/plasmapy/physics/transport/tests/test_collisions.py b/plasmapy/physics/transport/tests/test_collisions.py
index bc60f0f4..5a2ceaa6 100644
--- a/plasmapy/physics/transport/tests/test_collisions.py
+++ b/plasmapy/physics/transport/tests/test_collisions.py
@@ -11,7 +11,7 @@
Knudsen_number,
coupling_parameter)
from plasmapy.physics.transport.collisions import Spitzer_resistivity
-from plasmapy.utils import RelativityWarning, RelativityError, PhysicsWarning
+from plasmapy.utils import exceptions
from plasmapy.constants import m_p, m_e, c
@@ -117,7 +117,7 @@ def test_Chen_fusion(self):
# velocity. Chen uses v**2 = k * T / m whereas we use
# v ** 2 = 2 * k * T / m
lnLambdaChen = 16 + np.log(2)
- with pytest.warns(RelativityWarning):
+ with pytest.warns(exceptions.RelativityWarning):
lnLambda = Coulomb_logarithm(T, n, ('e', 'p'))
testTrue = np.isclose(lnLambda,
lnLambdaChen,
@@ -140,7 +140,7 @@ def test_Chen_laser(self):
# velocity. Chen uses v**2 = k * T / m whereas we use
# v ** 2 = 2 * k * T / m
lnLambdaChen = 6.8 + np.log(2)
- with pytest.warns(RelativityWarning):
+ with pytest.warns(exceptions.RelativityWarning):
lnLambda = Coulomb_logarithm(T, n, ('e', 'p'))
testTrue = np.isclose(lnLambda,
lnLambdaChen,
@@ -155,7 +155,7 @@ def test_GMS1(self):
Test for first version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature1,
self.density1,
self.particles,
@@ -176,7 +176,7 @@ def test_GMS1_negative(self):
Murillo, and Schlanges PRE (2002). This checks for when
a negative (invalid) Coulomb logarithm is returned.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature2,
self.density2,
self.particles,
@@ -196,7 +196,7 @@ def test_GMS2(self):
Test for second version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature1,
self.density1,
self.particles,
@@ -217,7 +217,7 @@ def test_GMS2_negative(self):
Murillo, and Schlanges PRE (2002). This checks for when
a negative (invalid) Coulomb logarithm is returned.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature2,
self.density2,
self.particles,
@@ -237,7 +237,7 @@ def test_GMS3(self):
Test for third version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature1,
self.density1,
self.particles,
@@ -259,7 +259,7 @@ def test_GMS3_negative(self):
a positive value is returned whereas the classical Coulomb
logarithm would return a negative value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature2,
self.density2,
self.particles,
@@ -279,7 +279,7 @@ def test_GMS4(self):
Test for fourth version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature1,
self.density1,
self.particles,
@@ -301,7 +301,7 @@ def test_GMS4_negative(self):
a positive value is returned whereas the classical Coulomb
logarithm would return a negative value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature2,
self.density2,
self.particles,
@@ -321,7 +321,7 @@ def test_GMS5(self):
Test for fifth version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature1,
self.density1,
self.particles,
@@ -343,7 +343,7 @@ def test_GMS5_negative(self):
a positive value is returned whereas the classical Coulomb
logarithm would return a negative value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature2,
self.density2,
self.particles,
@@ -363,7 +363,7 @@ def test_GMS6(self):
Test for sixth version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature1,
self.density1,
self.particles,
@@ -385,7 +385,7 @@ def test_GMS6_negative(self):
a positive value is returned whereas the classical Coulomb
logarithm would return a negative value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature2,
self.density2,
self.particles,
@@ -435,12 +435,12 @@ def test_GMS6_zmean_error(self):
def test_relativity_warn(self):
"""Tests whether relativity warning is raised at high velocity."""
- with pytest.warns(RelativityWarning):
+ with pytest.warns(exceptions.RelativityWarning):
Coulomb_logarithm(1e5 * u.K, 1 * u.m ** -3, ('e', 'p'), V=0.9 * c)
def test_relativity_error(self):
"""Tests whether relativity error is raised at light speed."""
- with pytest.raises(RelativityError):
+ with pytest.raises(exceptions.RelativityError):
Coulomb_logarithm(1e5 * u.K, 1 * u.m ** -3, ('e', 'p'), V=1.1 * c)
def test_unit_conversion_error(self):
@@ -464,7 +464,7 @@ def test_invalid_particle_error(self):
Tests whether an error is raised when an invalid particle name
is given.
"""
- with pytest.raises(ValueError):
+ with pytest.raises(exceptions.InvalidParticleError):
Coulomb_logarithm(1 * u.K, 5 * u.m ** -3, ('e', 'g'))
n_e = np.array([1e9, 1e9, 1e24]) * u.cm ** -3
@@ -605,7 +605,7 @@ def test_known1(self):
"""
Test for known value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(self.T,
self.n,
self.particles,
@@ -626,7 +626,7 @@ def test_fail1(self):
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(self.T,
self.n,
self.particles,
@@ -645,7 +645,7 @@ def test_electrons(self):
"""
Testing collision frequency between electrons.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(self.T,
self.n,
self.electrons,
@@ -664,7 +664,7 @@ def test_protons(self):
"""
Testing collision frequency between protons (ions).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(self.T,
self.n,
self.protons,
@@ -683,7 +683,7 @@ def test_zmean(self):
"""
Test collisional frequency function when given arbitrary z_mean.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(self.T,
self.n,
self.particles,
@@ -714,7 +714,7 @@ def test_known1(self):
"""
Test for known value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mean_free_path(self.T,
self.n_e,
self.particles,
@@ -735,7 +735,7 @@ def test_fail1(self):
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mean_free_path(self.T,
self.n_e,
self.particles,
@@ -834,7 +834,7 @@ def test_known1(self):
"""
Test for known value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mobility(self.T,
self.n_e,
self.particles,
@@ -855,7 +855,7 @@ def test_fail1(self):
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mobility(self.T,
self.n_e,
self.particles,
@@ -872,7 +872,7 @@ def test_fail1(self):
def test_zmean(self):
"""Testing mobility when z_mean is passed."""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mobility(self.T,
self.n_e,
self.particles,
@@ -904,7 +904,7 @@ def test_known1(self):
"""
Test for known value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Knudsen_number(self.length,
self.T,
self.n_e,
@@ -926,7 +926,7 @@ def test_fail1(self):
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Knudsen_number(self.length,
self.T,
self.n_e,
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/automated-code-tests.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asteval==1.0.6
astropy==6.0.1
astropy-iers-data==0.2025.3.31.0.36.18
certifi==2025.1.31
charset-normalizer==3.4.1
colorama==0.4.6
contourpy==1.3.0
coverage==7.8.0
coveralls==4.0.1
cycler==0.12.1
Cython==3.0.12
dill==0.3.9
docopt==0.6.2
exceptiongroup==1.2.2
flake8==7.2.0
fonttools==4.56.0
idna==3.10
importlib_resources==6.5.2
iniconfig==2.1.0
kiwisolver==1.4.7
lmfit==1.3.3
matplotlib==3.9.4
mccabe==0.7.0
mpmath==1.3.0
numpy==1.26.4
packaging==24.2
pillow==11.1.0
-e git+https://github.com/PlasmaPy/PlasmaPy.git@b550058279e51ad7da88282e19283760adb9c9a2#egg=plasmapy
pluggy==1.5.0
pycodestyle==2.13.0
pyerfa==2.0.1.5
pyflakes==3.3.2
pyparsing==3.2.3
pytest==8.3.5
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
roman==5.0
scipy==1.13.1
six==1.17.0
tomli==2.2.1
uncertainties==3.2.2
urllib3==2.3.0
zipp==3.21.0
| name: PlasmaPy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asteval==1.0.6
- astropy==6.0.1
- astropy-iers-data==0.2025.3.31.0.36.18
- certifi==2025.1.31
- charset-normalizer==3.4.1
- colorama==0.4.6
- contourpy==1.3.0
- coverage==7.8.0
- coveralls==4.0.1
- cycler==0.12.1
- cython==3.0.12
- dill==0.3.9
- docopt==0.6.2
- exceptiongroup==1.2.2
- flake8==7.2.0
- fonttools==4.56.0
- idna==3.10
- importlib-resources==6.5.2
- iniconfig==2.1.0
- kiwisolver==1.4.7
- lmfit==1.3.3
- matplotlib==3.9.4
- mccabe==0.7.0
- mpmath==1.3.0
- numpy==1.26.4
- packaging==24.2
- pillow==11.1.0
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyerfa==2.0.1.5
- pyflakes==3.3.2
- pyparsing==3.2.3
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- requests==2.32.3
- roman==5.0
- scipy==1.13.1
- six==1.17.0
- tomli==2.2.1
- uncertainties==3.2.2
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/PlasmaPy
| [
"plasmapy/physics/tests/test_parameters.py::test_thermal_speed",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_invalid_particle_error"
]
| [
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_units_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_units_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_unitless_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_unitless_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_zero_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_1D::test_units_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_1D::test_units_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_1D::test_unitless_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_1D::test_unitless_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_1D::test_zero_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_velocity_3D::test_units_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_velocity_3D::test_units_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_velocity_3D::test_unitless_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_velocity_3D::test_unitless_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_velocity_3D::test_zero_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_3D::test_units_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_3D::test_units_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_3D::test_unitless_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_3D::test_unitless_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_3D::test_zero_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_units_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_units_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_unitless_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_unitless_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_zero_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_value_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_3D::test_units_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_3D::test_units_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_3D::test_unitless_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_3D::test_unitless_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_3D::test_zero_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_3D::test_value_drift_units",
"plasmapy/physics/tests/test_parameters.py::test_Alfven_speed",
"plasmapy/physics/tests/test_parameters.py::test_ion_sound_speed",
"plasmapy/physics/tests/test_parameters.py::test_gyrofrequency",
"plasmapy/physics/tests/test_parameters.py::test_plasma_frequency",
"plasmapy/physics/tests/test_parameters.py::test_magnetic_energy_density",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS1_negative",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS2",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS2_negative",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS3",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS4",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS4_negative",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS5",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS5_negative",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS6",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS6_negative",
"plasmapy/physics/transport/tests/test_collisions.py::Test_coupling_parameter::test_quantum"
]
| [
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_max_noDrift",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_max_drift",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_norm",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_std",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_value_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_1D::test_norm",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_1D::test_value_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_velocity_3D::test_norm",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_velocity_3D::test_value_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_3D::test_norm",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_3D::test_value_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_invalid_kappa",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_max_noDrift",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_max_drift",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_maxwellian_limit",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_norm",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_std",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_3D::test_invalid_kappa",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_3D::test_norm",
"plasmapy/physics/tests/test_parameters.py::Test_mass_density::test_particleless",
"plasmapy/physics/tests/test_parameters.py::Test_mass_density::test_wrong_units",
"plasmapy/physics/tests/test_parameters.py::Test_kappa_thermal_speed::test_invalid_kappa",
"plasmapy/physics/tests/test_parameters.py::Test_kappa_thermal_speed::test_invalid_method",
"plasmapy/physics/tests/test_parameters.py::Test_kappa_thermal_speed::test_probable1",
"plasmapy/physics/tests/test_parameters.py::Test_kappa_thermal_speed::test_rms1",
"plasmapy/physics/tests/test_parameters.py::Test_kappa_thermal_speed::test_mean1",
"plasmapy/physics/tests/test_parameters.py::test_gyroradius",
"plasmapy/physics/tests/test_parameters.py::test_Debye_length",
"plasmapy/physics/tests/test_parameters.py::test_Debye_number",
"plasmapy/physics/tests/test_parameters.py::test_inertial_length",
"plasmapy/physics/tests/test_parameters.py::test_magnetic_pressure",
"plasmapy/physics/tests/test_parameters.py::test_upper_hybrid_frequency",
"plasmapy/physics/tests/test_parameters.py::test_lower_hybrid_frequency",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_Chen_Q_machine",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_Chen_lab",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_Chen_torus",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_Chen_fusion",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_Chen_laser",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS3_negative",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS2_zmean_error",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS5_zmean_error",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS6_zmean_error",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_relativity_warn",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_relativity_error",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_unit_conversion_error",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_single_particle_error",
"plasmapy/physics/transport/tests/test_collisions.py::Test_b_perp::test_known1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_b_perp::test_fail1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_impact_parameter::test_known1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_impact_parameter::test_fail1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_impact_parameter::test_bad_method",
"plasmapy/physics/transport/tests/test_collisions.py::Test_collision_frequency::test_known1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_collision_frequency::test_fail1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_collision_frequency::test_electrons",
"plasmapy/physics/transport/tests/test_collisions.py::Test_collision_frequency::test_protons",
"plasmapy/physics/transport/tests/test_collisions.py::Test_collision_frequency::test_zmean",
"plasmapy/physics/transport/tests/test_collisions.py::Test_mean_free_path::test_known1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_mean_free_path::test_fail1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Spitzer_resistivity::test_known1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Spitzer_resistivity::test_fail1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Spitzer_resistivity::test_zmean",
"plasmapy/physics/transport/tests/test_collisions.py::Test_mobility::test_known1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_mobility::test_fail1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_mobility::test_zmean",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Knudsen_number::test_known1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Knudsen_number::test_fail1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_coupling_parameter::test_known1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_coupling_parameter::test_fail1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_coupling_parameter::test_zmean"
]
| []
| BSD 3-Clause "New" or "Revised" License | 2,409 | [
"plasmapy/physics/parameters.py",
"plasmapy/physics/transport/collisions.py"
]
| [
"plasmapy/physics/parameters.py",
"plasmapy/physics/transport/collisions.py"
]
|
|
TheFriendlyCoder__friendlypins-38 | b52793c458ee2bc4057c22a233d43cc2b1439f8c | 2018-04-15 23:57:05 | b52793c458ee2bc4057c22a233d43cc2b1439f8c | diff --git a/setup.py b/setup.py
index 6604fe6..7c44b31 100755
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,9 @@ PROJECT_NAME = 'friendlypins'
PROJECT_DEPENDENCIES = [
'requests',
'six',
- 'dateutils']
+ 'dateutils',
+ 'tqdm',
+ 'pillow']
PROJECT_DEV_DEPENDENCIES = [
'wheel<1.0.0',
'twine<2.0.0',
diff --git a/src/friendlypins/headers.py b/src/friendlypins/headers.py
index 5df1ea4..b05b9f8 100644
--- a/src/friendlypins/headers.py
+++ b/src/friendlypins/headers.py
@@ -79,5 +79,13 @@ class Headers(object):
# return time data in current locale for convenience
return date_with_tz.astimezone(tz.tzlocal())
+ @property
+ def bytes(self):
+ """Gets the number of bytes contained in the response data
+
+ :rtype: :class:`int`
+ """
+ return int(self._data['Content-Length'])
+
if __name__ == "__main__":
pass
diff --git a/src/friendlypins/utils/console_actions.py b/src/friendlypins/utils/console_actions.py
index 84c63b7..864bfbc 100644
--- a/src/friendlypins/utils/console_actions.py
+++ b/src/friendlypins/utils/console_actions.py
@@ -3,7 +3,9 @@ import logging
import os
from six.moves import urllib
import requests
+from tqdm import tqdm
from friendlypins.api import API
+from friendlypins.headers import Headers
def _download_pin(pin, folder):
"""Helper method for downloading a thumbnail from a single pin
@@ -15,6 +17,7 @@ def _download_pin(pin, folder):
:rtype: :class:`int`
"""
log = logging.getLogger(__name__)
+
temp_url = urllib.parse.urlparse(pin.thumbnail.url)
temp_filename = os.path.basename(temp_url.path)
output_file = os.path.join(folder, temp_filename)
@@ -28,6 +31,9 @@ def _download_pin(pin, folder):
try:
response = requests.get(pin.thumbnail.url, stream=True)
response.raise_for_status()
+ headers = Headers(response.headers)
+ log.debug(headers)
+
with open(output_file, "wb") as handle:
for data in response.iter_content():
handle.write(data)
@@ -64,16 +70,18 @@ def download_thumbnails(api_token, board_name, output_folder, delete):
return 1
all_pins = selected_board.all_pins
- log.info('Downloading %s thumbnails...', len(all_pins))
+ log.info('Downloading thumbnails...')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
- for cur_pin in all_pins:
- retval = _download_pin(cur_pin, output_folder)
- if retval:
- return retval
- if delete:
- cur_pin.delete()
+ with tqdm(total=selected_board.num_pins, unit='b', ncols=80) as pbar:
+ for cur_pin in all_pins:
+ retval = _download_pin(cur_pin, output_folder)
+ if retval:
+ return retval
+ if delete:
+ cur_pin.delete()
+ pbar.update()
return 0
| Add progress bar support to fpins console app
To make it easier to track the overall progress of a lengthy download operation, we should add support for showing a progress bar to the fpins console app. | TheFriendlyCoder/friendlypins | diff --git a/unit_tests/test_console_actions.py b/unit_tests/test_console_actions.py
index d92a3e9..b91cd0d 100644
--- a/unit_tests/test_console_actions.py
+++ b/unit_tests/test_console_actions.py
@@ -28,7 +28,10 @@ def test_download_thumbnails(api_requests, user_requests, board_requests, action
"data": [{
"id": "6789",
"name": expected_board_name,
- "url": "https://www.pinterest.ca/MyName/MyBoard/"
+ "url": "https://www.pinterest.ca/MyName/MyBoard/",
+ "counts": {
+ "pins": 1
+ }
}]
}
@@ -110,7 +113,10 @@ def test_download_thumbnails_with_delete(pin_requests, api_requests, user_reques
"data": [{
"id": "6789",
"name": expected_board_name,
- "url": "https://www.pinterest.ca/MyName/MyBoard/"
+ "url": "https://www.pinterest.ca/MyName/MyBoard/",
+ "counts": {
+ "pins": 1
+ }
}]
}
@@ -194,7 +200,10 @@ def test_download_thumbnails_error(api_requests, user_requests, board_requests,
"data": [{
"id": "6789",
"name": expected_board_name,
- "url": "https://www.pinterest.ca/MyName/MyBoard/"
+ "url": "https://www.pinterest.ca/MyName/MyBoard/",
+ "counts": {
+ "pins": 1
+ }
}]
}
@@ -278,7 +287,10 @@ def test_download_thumbnails_missing_board(api_requests, user_requests, board_re
"data": [{
"id": "6789",
"name": "MyBoard",
- "url": "https://www.pinterest.ca/MyName/MyBoard/"
+ "url": "https://www.pinterest.ca/MyName/MyBoard/",
+ "counts": {
+ "pins": 1
+ }
}]
}
@@ -358,7 +370,10 @@ def test_download_thumbnails_exists(api_requests, user_requests, board_requests,
"data": [{
"id": "6789",
"name": expected_board_name,
- "url": "https://www.pinterest.ca/MyName/MyBoard/"
+ "url": "https://www.pinterest.ca/MyName/MyBoard/",
+ "counts": {
+ "pins": 1
+ }
}]
}
diff --git a/unit_tests/test_headers.py b/unit_tests/test_headers.py
index e7bf38f..0a5e77e 100644
--- a/unit_tests/test_headers.py
+++ b/unit_tests/test_headers.py
@@ -3,8 +3,9 @@ import mock
from friendlypins.headers import Headers
from dateutil import tz
-sample_rate_limit = "200"
-sample_rate_max = "150"
+sample_rate_limit = 200
+sample_rate_max = 150
+sample_content_length = 1024
sample_header = {
'Access-Control-Allow-Origin': '*',
'Age': '0',
@@ -14,12 +15,13 @@ sample_header = {
'Pinterest-Version': 'e3f92ef',
'X-Content-Type-Options': 'nosniff',
'X-Pinterest-RID': '12345678',
- 'X-Ratelimit-Limit': sample_rate_limit,
- 'X-Ratelimit-Remaining': sample_rate_max,
+ 'X-Ratelimit-Limit': str(sample_rate_limit),
+ 'X-Ratelimit-Remaining': str(sample_rate_max),
'Transfer-Encoding': 'chunked',
'Date': 'Sat, 31 Mar 2018 10:58:09 GMT',
'Connection': 'keep-alive',
- 'Pinterest-Generated-By': ''
+ 'Pinterest-Generated-By': '',
+ 'Content-Length': str(sample_content_length)
}
@@ -31,18 +33,22 @@ def test_get_date_locale():
def test_get_rate_limit():
obj = Headers(sample_header)
- assert obj.rate_limit == 200
+ assert obj.rate_limit == sample_rate_limit
def test_get_rate_max():
obj = Headers(sample_header)
- assert obj.rate_remaining == 150
+ assert obj.rate_remaining == sample_rate_max
def test_get_rate_percent():
obj = Headers(sample_header)
assert obj.percent_rate_remaining == 75
+def test_get_num_bytes():
+ obj = Headers(sample_header)
+
+ assert obj.bytes == sample_content_length
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
\ No newline at end of file
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pylint",
"pytest",
"pytest-cov",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
astroid==2.11.7
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
Babel==2.11.0
bleach==4.1.0
cachetools==4.2.4
certifi==2021.5.30
chardet==5.0.0
charset-normalizer==2.0.12
colorama==0.4.5
coverage==6.2
dateutils==0.6.12
dill==0.3.4
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
-e git+https://github.com/TheFriendlyCoder/friendlypins.git@b52793c458ee2bc4057c22a233d43cc2b1439f8c#egg=friendlypins
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==5.10.1
Jinja2==3.0.3
lazy-object-proxy==1.7.1
mando==0.7.1
MarkupSafe==2.0.1
mccabe==0.7.0
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pkginfo==1.10.0
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
Pygments==2.14.0
pylint==2.13.9
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
radon==6.0.1
readme-renderer==34.0
requests==2.27.1
requests-toolbelt==1.0.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==4.0.0a9
tqdm==4.64.1
twine==1.15.0
typed-ast==1.5.5
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
virtualenv==20.17.1
webencodings==0.5.1
wrapt==1.16.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: friendlypins
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- astroid==2.11.7
- babel==2.11.0
- bleach==4.1.0
- cachetools==4.2.4
- chardet==5.0.0
- charset-normalizer==2.0.12
- colorama==0.4.5
- coverage==6.2
- dateutils==0.6.12
- dill==0.3.4
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- isort==5.10.1
- jinja2==3.0.3
- lazy-object-proxy==1.7.1
- mando==0.7.1
- markupsafe==2.0.1
- mccabe==0.7.0
- mock==5.2.0
- pkginfo==1.10.0
- platformdirs==2.4.0
- pygments==2.14.0
- pylint==2.13.9
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- radon==6.0.1
- readme-renderer==34.0
- requests==2.27.1
- requests-toolbelt==1.0.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- tox==4.0.0a9
- tqdm==4.64.1
- twine==1.15.0
- typed-ast==1.5.5
- urllib3==1.26.20
- virtualenv==20.17.1
- webencodings==0.5.1
- wrapt==1.16.0
prefix: /opt/conda/envs/friendlypins
| [
"unit_tests/test_headers.py::test_get_num_bytes"
]
| []
| [
"unit_tests/test_console_actions.py::test_download_thumbnails",
"unit_tests/test_console_actions.py::test_download_thumbnails_with_delete",
"unit_tests/test_console_actions.py::test_download_thumbnails_error",
"unit_tests/test_console_actions.py::test_download_thumbnails_missing_board",
"unit_tests/test_console_actions.py::test_download_thumbnails_exists",
"unit_tests/test_headers.py::test_get_date_locale",
"unit_tests/test_headers.py::test_get_rate_limit",
"unit_tests/test_headers.py::test_get_rate_max",
"unit_tests/test_headers.py::test_get_rate_percent"
]
| []
| Apache License 2.0 | 2,411 | [
"setup.py",
"src/friendlypins/utils/console_actions.py",
"src/friendlypins/headers.py"
]
| [
"setup.py",
"src/friendlypins/utils/console_actions.py",
"src/friendlypins/headers.py"
]
|
|
TheFriendlyCoder__friendlypins-39 | ad685c13d7a42e93fca3adf3b1b8894549fa296e | 2018-04-16 03:08:15 | ad685c13d7a42e93fca3adf3b1b8894549fa296e | diff --git a/.gitignore b/.gitignore
index edbaf19..d76012a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -22,4 +22,5 @@ venv*/
**/*.orig
# project files
-.vscode/
\ No newline at end of file
+.vscode/
+.idea/
\ No newline at end of file
diff --git a/.idea/codeStyles/codeStyleConfig.xml b/.idea/codeStyles/codeStyleConfig.xml
new file mode 100644
index 0000000..a55e7a1
--- /dev/null
+++ b/.idea/codeStyles/codeStyleConfig.xml
@@ -0,0 +1,5 @@
+<component name="ProjectCodeStyleConfiguration">
+ <state>
+ <option name="PREFERRED_PROJECT_CODE_STYLE" value="Default" />
+ </state>
+</component>
\ No newline at end of file
diff --git a/docs/friendlypins.utils.rest_io.rst b/docs/friendlypins.utils.rest_io.rst
new file mode 100644
index 0000000..c29f06c
--- /dev/null
+++ b/docs/friendlypins.utils.rest_io.rst
@@ -0,0 +1,7 @@
+friendlypins.utils.rest\_io module
+==================================
+
+.. automodule:: friendlypins.utils.rest_io
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/friendlypins.utils.rst b/docs/friendlypins.utils.rst
index f3a2e9a..300853a 100644
--- a/docs/friendlypins.utils.rst
+++ b/docs/friendlypins.utils.rst
@@ -7,6 +7,7 @@ Submodules
.. toctree::
friendlypins.utils.console_actions
+ friendlypins.utils.rest_io
Module contents
---------------
diff --git a/src/friendlypins/api.py b/src/friendlypins/api.py
index ee3b00a..cff46b6 100644
--- a/src/friendlypins/api.py
+++ b/src/friendlypins/api.py
@@ -1,9 +1,9 @@
"""Primary entry point for the Friendly Pinterest library"""
from __future__ import print_function
import logging
-import requests
from friendlypins.user import User
-from friendlypins.headers import Headers
+from friendlypins.utils.rest_io import RestIO
+
class API(object): # pylint: disable=too-few-public-methods
"""High level abstraction for the core Pinterest API
@@ -13,15 +13,12 @@ class API(object): # pylint: disable=too-few-public-methods
Pinterest data
"""
- # URL of the root namespace for the Pinterest API
- _root_url = 'https://api.pinterest.com/v1'
-
def __init__(self, personal_access_token):
self._log = logging.getLogger(__name__)
- self._token = personal_access_token
+ self._io = RestIO(personal_access_token)
def get_user(self, username=None):
- """Gets all primitives associated with a particular Pinterst user
+ """Gets all primitives associated with a particular Pinterest user
:param str username:
Optional name of a user to look up
@@ -34,23 +31,13 @@ class API(object): # pylint: disable=too-few-public-methods
if username:
raise NotImplementedError(
"Querying arbitrary Pinerest users is not yet supported.")
- else:
- temp_url = "{0}/me".format(self._root_url)
- temp_url += "?access_token={0}".format(self._token)
- temp_url += "&fields=id,username,first_name,last_name,bio,created_at,counts,image"
- response = requests.get(temp_url)
- response.raise_for_status()
-
- header = Headers(response.headers)
- self._log.debug("Getting user query response: %s", header)
-
- raw = response.json()
- assert 'data' in raw
+ fields = "id,username,first_name,last_name,bio,created_at,counts,image"
+ result = self._io.get("me", {"fields": fields})
+ assert 'data' in result
- return User(raw['data'], self._root_url, self._token)
+ return User(result['data'], self._io)
-# pylint: disable-all
if __name__ == "__main__":
pass
diff --git a/src/friendlypins/board.py b/src/friendlypins/board.py
index 194d50a..045655f 100644
--- a/src/friendlypins/board.py
+++ b/src/friendlypins/board.py
@@ -1,8 +1,6 @@
"""Primitives for interacting with Pinterest boards"""
import logging
import json
-import requests
-from friendlypins.headers import Headers
from friendlypins.pin import Pin
@@ -10,15 +8,14 @@ class Board(object):
"""Abstraction around a Pinterest board
:param dict data: Raw Pinterest API data describing the board
- :param str root_url: URL of the Pinterest REST API
- :param str token: Authentication token for the REST API
+ :param rest_io: reference to the Pinterest REST API
+ :type rest_io: :class:`friendlypins.utils.rest_io.RestIO`
"""
- def __init__(self, data, root_url, token):
+ def __init__(self, data, rest_io):
self._log = logging.getLogger(__name__)
self._data = data
- self._root_url = root_url
- self._token = token
+ self._io = rest_io
def __str__(self):
"""String representation of this board, for debugging purposes
@@ -76,37 +73,40 @@ class Board(object):
:rtype: :class:`list` of :class:`friendlypins.pin.Pin`
"""
self._log.debug('Gettings all pins for board %s...', self.name)
-
- temp_url = '{0}/boards/{1}/pins/'.format(self._root_url, self.unique_id)
- temp_url += "?access_token={0}".format(self._token)
- temp_url += "&limit=100"
- temp_url += "&fields=id,link,url,creator,board,created_at,note,color"
- temp_url += ",counts,media,attribution,image,metadata,original_link"
- response = requests.get(temp_url)
- response.raise_for_status()
- retval = []
- header = Headers(response.headers)
- self._log.debug("Pins query response header %s", header)
+ retval = list()
+
+ properties = {
+ "fields": ','.join([
+ "id",
+ "link",
+ "url",
+ "creator",
+ "board",
+ "created_at",
+ "note,color",
+ "counts",
+ "media",
+ "attribution",
+ "image",
+ "metadata",
+ "original_link"
+ ])
+ }
while True:
- raw = response.json()
- assert 'data' in raw
-
- for cur_item in raw['data']:
- retval.append(Pin(cur_item, self._root_url, self._token))
-
- self._log.debug("Raw keys are %s", raw.keys())
- self._log.debug("Paged info is %s", raw['page'])
- if not raw['page']['cursor']:
+ result = self._io.get(
+ "boards/{0}/pins".format(self.unique_id),
+ properties)
+ assert 'data' in result
+
+ for cur_item in result['data']:
+ retval.append(Pin(cur_item, self._io))
+ if not result["page"]["cursor"]:
break
-
- paged_url = temp_url + "&cursor={0}".format(raw['page']['cursor'])
- response = requests.get(paged_url)
- response.raise_for_status()
- header = Headers(response.headers)
- self._log.debug("Pins query response header %s", header)
+ properties["cursor"] = result["page"]["cursor"]
return retval
+
if __name__ == "__main__":
pass
diff --git a/src/friendlypins/headers.py b/src/friendlypins/headers.py
index b05b9f8..7fc1552 100644
--- a/src/friendlypins/headers.py
+++ b/src/friendlypins/headers.py
@@ -3,6 +3,7 @@ from datetime import datetime
import json
from dateutil import tz
+
class Headers(object):
"""Abstraction around the Pinterest API HTTP response header
@@ -87,5 +88,6 @@ class Headers(object):
"""
return int(self._data['Content-Length'])
+
if __name__ == "__main__":
pass
diff --git a/src/friendlypins/pin.py b/src/friendlypins/pin.py
index 9faa11b..f727468 100644
--- a/src/friendlypins/pin.py
+++ b/src/friendlypins/pin.py
@@ -1,8 +1,6 @@
"""Primitives for operating on Pinterest pins"""
import logging
import json
-import requests
-from friendlypins.headers import Headers
from friendlypins.thumbnail import Thumbnail
@@ -10,14 +8,13 @@ class Pin(object):
"""Abstraction around a Pinterest pin
:param dict data: Raw Pinterest API data describing a pin
- :param str root_url: URL of the Pinterest REST API
- :param str token: Authentication token for interacting with the API
+ :param rest_io: reference to the Pinterest REST API
+ :type rest_io: :class:`friendlypins.utils.rest_io.RestIO`
"""
- def __init__(self, data, root_url, token):
+ def __init__(self, data, rest_io):
self._log = logging.getLogger(__name__)
- self._root_url = root_url
- self._token = token
+ self._io = rest_io
self._data = data
def __str__(self):
@@ -92,17 +89,8 @@ class Pin(object):
def delete(self):
"""Removes this pin from it's respective board"""
self._log.debug('Deleting pin %s', repr(self))
- temp_url = '{0}/pins/{1}/'.format(
- self._root_url,
- self.unique_id)
- temp_url += "?access_token={0}".format(self._token)
+ self._io.delete('pins/{0}'.format(self.unique_id))
- response = requests.delete(temp_url)
-
- header = Headers(response.headers)
- self._log.debug("Boards query response header %s", header)
-
- response.raise_for_status()
if __name__ == "__main__":
pass
diff --git a/src/friendlypins/scripts/fpins.py b/src/friendlypins/scripts/fpins.py
index 0115b46..1fe03d5 100644
--- a/src/friendlypins/scripts/fpins.py
+++ b/src/friendlypins/scripts/fpins.py
@@ -5,6 +5,7 @@ import shlex
import sys
from friendlypins.utils.console_actions import download_thumbnails
+
def _download_thumbnails(args):
"""Callback for performing the thumbnail download operation
@@ -141,5 +142,6 @@ def main(args=None):
log.debug("Details: ", exc_info=True)
return -1
+
if __name__ == "__main__":
pass
diff --git a/src/friendlypins/thumbnail.py b/src/friendlypins/thumbnail.py
index 58bc810..bf5cc5f 100644
--- a/src/friendlypins/thumbnail.py
+++ b/src/friendlypins/thumbnail.py
@@ -56,5 +56,6 @@ class Thumbnail(object):
"""
return self._data['original']['url']
+
if __name__ == "__main__":
pass
diff --git a/src/friendlypins/user.py b/src/friendlypins/user.py
index 3121d1d..de3306d 100644
--- a/src/friendlypins/user.py
+++ b/src/friendlypins/user.py
@@ -1,24 +1,21 @@
"""Interfaces for interacting with Pinterest users"""
import logging
import json
-import requests
from friendlypins.board import Board
-from friendlypins.headers import Headers
class User(object):
"""Abstraction around a Pinterest user and their associated data
:param dict data: JSON data parsed from the API
- :param str root_url: URL of the Pinterest REST API
- :param str token: Authentication token for interacting with the API
+ :param rest_io: reference to the Pinterest REST API
+ :type rest_io: :class:`friendlypins.utils.rest_io.RestIO`
"""
- def __init__(self, data, root_url, token):
+ def __init__(self, data, rest_io):
self._log = logging.getLogger(__name__)
self._data = data
- self._root_url = root_url
- self._token = token
+ self._io = rest_io
def __str__(self):
"""String representation of this user, for debugging purposes
@@ -103,22 +100,16 @@ class User(object):
"""
self._log.debug("Loading boards for user %s...", self.name)
- temp_url = '{0}/me/boards/'.format(self._root_url)
- temp_url += "?access_token={0}".format(self._token)
- temp_url += "&fields=id,name,url,description,creator,created_at,counts,image"
- response = requests.get(temp_url)
+ fields = "id,name,url,description,creator,created_at,counts,image"
+ result = self._io.get('me/boards', {"fields": fields})
- header = Headers(response.headers)
- self._log.debug("Boards query response header %s", header)
-
- response.raise_for_status()
- raw = response.json()
- assert 'data' in raw
+ assert 'data' in result
retval = []
- for cur_item in raw['data']:
- retval.append(Board(cur_item, self._root_url, self._token))
+ for cur_item in result['data']:
+ retval.append(Board(cur_item, self._io))
return retval
+
if __name__ == "__main__":
pass
diff --git a/src/friendlypins/utils/console_actions.py b/src/friendlypins/utils/console_actions.py
index 864bfbc..1ca66ef 100644
--- a/src/friendlypins/utils/console_actions.py
+++ b/src/friendlypins/utils/console_actions.py
@@ -7,6 +7,7 @@ from tqdm import tqdm
from friendlypins.api import API
from friendlypins.headers import Headers
+
def _download_pin(pin, folder):
"""Helper method for downloading a thumbnail from a single pin
@@ -44,6 +45,7 @@ def _download_pin(pin, folder):
return 2
return 0
+
def download_thumbnails(api_token, board_name, output_folder, delete):
"""Downloads thumbnails of all pins on a board
@@ -85,5 +87,6 @@ def download_thumbnails(api_token, board_name, output_folder, delete):
return 0
+
if __name__ == "__main__":
pass
diff --git a/src/friendlypins/utils/rest_io.py b/src/friendlypins/utils/rest_io.py
new file mode 100644
index 0000000..61d1aae
--- /dev/null
+++ b/src/friendlypins/utils/rest_io.py
@@ -0,0 +1,81 @@
+"""Abstraction around the raw Pinterest REST API calls"""
+import logging
+import requests
+from friendlypins.headers import Headers
+
+
+class RestIO(object):
+ """Interface for low level REST API interactions
+
+ :param str authentication_token:
+ Personal API token for authenticating to REST API
+ """
+
+ # URL of the root namespace for the Pinterest API
+ _root_url = 'https://api.pinterest.com/v1'
+
+ def __init__(self, authentication_token):
+ self._log = logging.getLogger(__name__)
+ self._token = authentication_token
+ self._latest_header = None
+
+ @property
+ def root_url(self):
+ """Gets root url"""
+ return self._root_url
+
+ @property
+ def token(self):
+ """Gets API token"""
+ return self._token
+
+ def get(self, path, properties=None):
+ """Gets API data from a given sub-path
+
+ :param str path: sub-path with in the REST API to query
+ :param dict properties:
+ optional set of request properties to append to the API call
+ :returns: json data returned from the API endpoint
+ :rtype: :class:`dict`
+ """
+ self._log.debug(
+ "Getting data from %s with options %s",
+ path,
+ properties
+ )
+ temp_url = "{0}/{1}".format(self._root_url, path)
+
+ if properties is None:
+ properties = dict()
+ properties["limit"] = "100"
+ properties["access_token"] = self._token
+
+ response = requests.get(temp_url, params=properties)
+ response.raise_for_status()
+
+ self._latest_header = Headers(response.headers)
+ self._log.debug("%s query header: %s", path, self._latest_header)
+
+ return response.json()
+
+ def delete(self, path):
+ """Sends a delete request to a remote endpoint
+
+ :param str path: API endpoint to send delete request to"""
+ temp_url = '{0}/{1}'.format(
+ self._root_url,
+ path)
+
+ properties = {
+ "access_token": self._token
+ }
+
+ response = requests.delete(temp_url, params=properties)
+ response.raise_for_status()
+ header = Headers(response.headers)
+ self._log.debug("Headers for delete on %s are: %s", path, header)
+ self._log.debug("Response from delete was %s", response.text)
+
+
+if __name__ == "__main__":
+ pass
| extract common API logic into a helper class
We have several objects in the API that encapsulate primitives from Pinterest. Most of these objects make calls to the Pinterest API to perform actions. Further, there is now some common logic - like accessing paged result sets - that is duplicated across the classes. This duplication should be eliminated by introducing a new helper class which interacts with the Pinterest API, abstracting out the low level details of those requests/responses. | TheFriendlyCoder/friendlypins | diff --git a/unit_tests/test_api.py b/unit_tests/test_api.py
index 0a6e5c1..b013c9e 100644
--- a/unit_tests/test_api.py
+++ b/unit_tests/test_api.py
@@ -3,7 +3,6 @@ import mock
from friendlypins.api import API
def test_get_user():
- obj = API('abcd1234')
expected_url = 'https://www.pinterest.com/MyUserName/'
expected_firstname = "John"
expected_lastname = "Doe"
@@ -16,10 +15,12 @@ def test_get_user():
'id': str(expected_id)
}
}
- with mock.patch("friendlypins.api.requests") as mock_requests:
- mock_response = mock.MagicMock()
- mock_response.json.return_value = expected_data
- mock_requests.get.return_value = mock_response
+ with mock.patch("friendlypins.api.RestIO") as mock_io:
+ mock_obj = mock.MagicMock()
+ mock_obj.get.return_value = expected_data
+ mock_io.return_value = mock_obj
+
+ obj = API('abcd1234')
result = obj.get_user()
assert expected_url == result.url
@@ -27,8 +28,6 @@ def test_get_user():
assert expected_lastname == result.last_name
assert expected_id == result.unique_id
- mock_response.raise_for_status.assert_called_once()
-
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
diff --git a/unit_tests/test_board.py b/unit_tests/test_board.py
index e2eafe1..5daac85 100644
--- a/unit_tests/test_board.py
+++ b/unit_tests/test_board.py
@@ -16,20 +16,19 @@ def test_board_properties():
}
}
- obj = Board(sample_data, 'http://pinterest_url', '1234abcd')
+ mock_io = mock.MagicMock()
+ obj = Board(sample_data, mock_io)
assert obj.unique_id == expected_id
assert obj.name == expected_name
assert obj.url == expected_url
assert obj.num_pins == expected_pin_count
+
def test_get_all_pins():
data = {
'id': '987654321',
'name': 'MyBoard'
}
- api_url = "https://pinterest_url/v1"
- token = "1234abcd"
- obj = Board(data, api_url, token)
expected_id = 1234
expected_url = "https://www.pinterest.ca/MyName/MyPin/"
@@ -51,19 +50,18 @@ def test_get_all_pins():
}
}
- with mock.patch("friendlypins.board.requests") as mock_requests:
- mock_response = mock.MagicMock()
- mock_response.json.return_value = expected_data
- mock_requests.get.return_value = mock_response
- result = obj.all_pins
+ mock_io = mock.MagicMock()
+ mock_io.get.return_value = expected_data
+ obj = Board(data, mock_io)
+
+ result = obj.all_pins
- assert len(result) == 1
- assert expected_url == result[0].url
- assert expected_note == result[0].note
- assert expected_id == result[0].unique_id
- assert expected_mediatype == result[0].media_type
+ assert len(result) == 1
+ assert expected_url == result[0].url
+ assert expected_note == result[0].note
+ assert expected_id == result[0].unique_id
+ assert expected_mediatype == result[0].media_type
- mock_response.raise_for_status.assert_called_once()
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
diff --git a/unit_tests/test_console_actions.py b/unit_tests/test_console_actions.py
index b91cd0d..170f224 100644
--- a/unit_tests/test_console_actions.py
+++ b/unit_tests/test_console_actions.py
@@ -7,10 +7,8 @@ from friendlypins.utils.console_actions import download_thumbnails
@mock.patch("friendlypins.utils.console_actions.os")
@mock.patch("friendlypins.utils.console_actions.open")
@mock.patch("friendlypins.utils.console_actions.requests")
[email protected]("friendlypins.board.requests")
[email protected]("friendlypins.user.requests")
[email protected]("friendlypins.api.requests")
-def test_download_thumbnails(api_requests, user_requests, board_requests, action_requests, mock_open, mock_os):
[email protected]("friendlypins.api.RestIO")
+def test_download_thumbnails(rest_io, action_requests, mock_open, mock_os):
# Fake user data for the user authenticating to Pinterest
expected_user_data = {
@@ -60,17 +58,13 @@ def test_download_thumbnails(api_requests, user_requests, board_requests, action
}
# fake our Pinterest API data to flex our implementation logic
- mock_user_response = mock.MagicMock()
- mock_user_response.json.return_value = expected_user_data
- api_requests.get.return_value = mock_user_response
-
- mock_board_response = mock.MagicMock()
- mock_board_response.json.return_value = expected_board_data
- user_requests.get.return_value = mock_board_response
-
- mock_pin_response = mock.MagicMock()
- mock_pin_response.json.return_value = expected_pin_data
- board_requests.get.return_value = mock_pin_response
+ mock_response = mock.MagicMock()
+ mock_response.get.side_effect = [
+ expected_user_data,
+ expected_board_data,
+ expected_pin_data
+ ]
+ rest_io.return_value = mock_response
# Make sure the code think's the output file where the
# thumbnail is to be downloaded doesn't already exist
@@ -91,11 +85,8 @@ def test_download_thumbnails(api_requests, user_requests, board_requests, action
@mock.patch("friendlypins.utils.console_actions.os")
@mock.patch("friendlypins.utils.console_actions.open")
@mock.patch("friendlypins.utils.console_actions.requests")
[email protected]("friendlypins.board.requests")
[email protected]("friendlypins.user.requests")
[email protected]("friendlypins.api.requests")
[email protected]("friendlypins.pin.requests")
-def test_download_thumbnails_with_delete(pin_requests, api_requests, user_requests, board_requests, action_requests, mock_open, mock_os):
[email protected]("friendlypins.api.RestIO")
+def test_download_thumbnails_with_delete(rest_io, action_requests, mock_open, mock_os):
# Fake user data for the user authenticating to Pinterest
expected_user_data = {
@@ -145,20 +136,13 @@ def test_download_thumbnails_with_delete(pin_requests, api_requests, user_reques
}
# fake our Pinterest API data to flex our implementation logic
- mock_user_response = mock.MagicMock()
- mock_user_response.json.return_value = expected_user_data
- api_requests.get.return_value = mock_user_response
-
- mock_board_response = mock.MagicMock()
- mock_board_response.json.return_value = expected_board_data
- user_requests.get.return_value = mock_board_response
-
- mock_pin_response = mock.MagicMock()
- mock_pin_response.json.return_value = expected_pin_data
- board_requests.get.return_value = mock_pin_response
-
- mock_delete_response = mock.MagicMock()
- pin_requests.delete.return_value = mock_delete_response
+ mock_response = mock.MagicMock()
+ mock_response.get.side_effect = [
+ expected_user_data,
+ expected_board_data,
+ expected_pin_data
+ ]
+ rest_io.return_value = mock_response
# Make sure the code think's the output file where the
# thumbnail is to be downloaded doesn't already exist
@@ -174,15 +158,13 @@ def test_download_thumbnails_with_delete(pin_requests, api_requests, user_reques
mock_os.makedirs.assert_called()
mock_os.path.exists.assert_called()
mock_open.assert_called()
- pin_requests.delete.assert_called_once()
+ mock_response.delete.assert_called_once()
@mock.patch("friendlypins.utils.console_actions.os")
@mock.patch("friendlypins.utils.console_actions.open")
@mock.patch("friendlypins.utils.console_actions.requests")
[email protected]("friendlypins.board.requests")
[email protected]("friendlypins.user.requests")
[email protected]("friendlypins.api.requests")
-def test_download_thumbnails_error(api_requests, user_requests, board_requests, action_requests, mock_open, mock_os):
[email protected]("friendlypins.api.RestIO")
+def test_download_thumbnails_error(rest_io, action_requests, mock_open, mock_os):
# Fake user data for the user authenticating to Pinterest
expected_user_data = {
@@ -232,17 +214,13 @@ def test_download_thumbnails_error(api_requests, user_requests, board_requests,
}
# fake our Pinterest API data to flex our implementation logic
- mock_user_response = mock.MagicMock()
- mock_user_response.json.return_value = expected_user_data
- api_requests.get.return_value = mock_user_response
-
- mock_board_response = mock.MagicMock()
- mock_board_response.json.return_value = expected_board_data
- user_requests.get.return_value = mock_board_response
-
- mock_pin_response = mock.MagicMock()
- mock_pin_response.json.return_value = expected_pin_data
- board_requests.get.return_value = mock_pin_response
+ mock_response = mock.MagicMock()
+ mock_response.get.side_effect = [
+ expected_user_data,
+ expected_board_data,
+ expected_pin_data
+ ]
+ rest_io.return_value = mock_response
# Make sure the code think's the output file where the
# thumbnail is to be downloaded doesn't already exist
@@ -267,10 +245,8 @@ def test_download_thumbnails_error(api_requests, user_requests, board_requests,
@mock.patch("friendlypins.utils.console_actions.os")
@mock.patch("friendlypins.utils.console_actions.open")
@mock.patch("friendlypins.utils.console_actions.requests")
[email protected]("friendlypins.board.requests")
[email protected]("friendlypins.user.requests")
[email protected]("friendlypins.api.requests")
-def test_download_thumbnails_missing_board(api_requests, user_requests, board_requests, action_requests, mock_open, mock_os):
[email protected]("friendlypins.api.RestIO")
+def test_download_thumbnails_missing_board(rest_io, action_requests, mock_open, mock_os):
# Fake user data for the user authenticating to Pinterest
expected_user_data = {
@@ -319,17 +295,13 @@ def test_download_thumbnails_missing_board(api_requests, user_requests, board_re
}
# fake our Pinterest API data to flex our implementation logic
- mock_user_response = mock.MagicMock()
- mock_user_response.json.return_value = expected_user_data
- api_requests.get.return_value = mock_user_response
-
- mock_board_response = mock.MagicMock()
- mock_board_response.json.return_value = expected_board_data
- user_requests.get.return_value = mock_board_response
-
- mock_pin_response = mock.MagicMock()
- mock_pin_response.json.return_value = expected_pin_data
- board_requests.get.return_value = mock_pin_response
+ mock_response = mock.MagicMock()
+ mock_response.get.side_effect = [
+ expected_user_data,
+ expected_board_data,
+ expected_pin_data
+ ]
+ rest_io.return_value = mock_response
# Make sure the code think's the output file where the
# thumbnail is to be downloaded doesn't already exist
@@ -349,10 +321,8 @@ def test_download_thumbnails_missing_board(api_requests, user_requests, board_re
@mock.patch("friendlypins.utils.console_actions.os")
@mock.patch("friendlypins.utils.console_actions.open")
@mock.patch("friendlypins.utils.console_actions.requests")
[email protected]("friendlypins.board.requests")
[email protected]("friendlypins.user.requests")
[email protected]("friendlypins.api.requests")
-def test_download_thumbnails_exists(api_requests, user_requests, board_requests, action_requests, mock_open, mock_os):
[email protected]("friendlypins.api.RestIO")
+def test_download_thumbnails_exists(rest_io, action_requests, mock_open, mock_os):
# Fake user data for the user authenticating to Pinterest
expected_user_data = {
@@ -403,17 +373,13 @@ def test_download_thumbnails_exists(api_requests, user_requests, board_requests,
}
# fake our Pinterest API data to flex our implementation logic
- mock_user_response = mock.MagicMock()
- mock_user_response.json.return_value = expected_user_data
- api_requests.get.return_value = mock_user_response
-
- mock_board_response = mock.MagicMock()
- mock_board_response.json.return_value = expected_board_data
- user_requests.get.return_value = mock_board_response
-
- mock_pin_response = mock.MagicMock()
- mock_pin_response.json.return_value = expected_pin_data
- board_requests.get.return_value = mock_pin_response
+ mock_response = mock.MagicMock()
+ mock_response.get.side_effect = [
+ expected_user_data,
+ expected_board_data,
+ expected_pin_data
+ ]
+ rest_io.return_value = mock_response
# Make sure the code think's the output file where the
# thumbnail is to be downloaded exists already
diff --git a/unit_tests/test_pin.py b/unit_tests/test_pin.py
index 7d5586c..7fd8924 100644
--- a/unit_tests/test_pin.py
+++ b/unit_tests/test_pin.py
@@ -18,7 +18,8 @@ def test_pin_properties():
}
}
- obj = Pin(sample_data, "http://www.pinterest.com", "1234abcd")
+ mock_io = mock.MagicMock()
+ obj = Pin(sample_data, mock_io)
assert obj.unique_id == expected_id
assert obj.note == expected_note
@@ -26,6 +27,7 @@ def test_pin_properties():
assert obj.link == expected_link
assert obj.media_type == expected_media_type
+
def test_pin_missing_media_type():
expected_id = 1234
expected_note = "Here's my note"
@@ -38,7 +40,8 @@ def test_pin_missing_media_type():
"url": expected_url,
}
- obj = Pin(sample_data, "http://www.pinterest.com", "1234abcd")
+ mock_io = mock.MagicMock()
+ obj = Pin(sample_data, mock_io)
assert obj.unique_id == expected_id
assert obj.note == expected_note
@@ -46,24 +49,19 @@ def test_pin_missing_media_type():
assert obj.link == expected_link
assert obj.media_type is None
-def test_delete():
- api_url = "https://pinterest_url/v1"
- token = "1234abcd"
+def test_delete():
data = {
"id": "12345678",
"note": "My Pin Description"
}
- obj = Pin(data, api_url, token)
+ mock_io = mock.MagicMock()
+ obj = Pin(data, mock_io)
+ obj.delete()
- with mock.patch("friendlypins.pin.requests") as mock_requests:
- mock_response = mock.MagicMock()
- mock_requests.delete.return_value = mock_response
+ mock_io.delete.assert_called_once()
- obj.delete()
- mock_requests.delete.assert_called_once()
- mock_response.raise_for_status.assert_called_once()
def test_get_thumbnail():
expected_url = "https://i.pinimg.com/r/pin/12345"
@@ -75,9 +73,12 @@ def test_get_thumbnail():
}
}
- obj = Pin(data, "http://www.pinterest.com", "1234abcd")
+ mock_io = mock.MagicMock()
+ obj = Pin(data, mock_io)
result = obj.thumbnail
assert result.url == expected_url
+
+
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
\ No newline at end of file
diff --git a/unit_tests/test_user.py b/unit_tests/test_user.py
index 5850b5b..9184835 100644
--- a/unit_tests/test_user.py
+++ b/unit_tests/test_user.py
@@ -20,7 +20,8 @@ def test_user_properties():
}
}
- obj = User(data, "https://pinterest_url/v1", "1234abcd")
+ mock_io = mock.MagicMock()
+ obj = User(data, mock_io)
assert expected_url == obj.url
assert expected_firstname == obj.first_name
assert expected_lastname == obj.last_name
@@ -34,9 +35,6 @@ def test_get_boards():
"first_name": "John",
"last_name": "Doe"
}
- api_url = "https://pinterest_url/v1"
- token = "1234abcd"
- obj = User(data, api_url, token)
expected_id = 1234
expected_name = "MyBoard"
@@ -49,18 +47,16 @@ def test_get_boards():
}]
}
- with mock.patch("friendlypins.user.requests") as mock_requests:
- mock_response = mock.MagicMock()
- mock_response.json.return_value = expected_data
- mock_requests.get.return_value = mock_response
- result = obj.boards
+ mock_io = mock.MagicMock()
+ mock_io.get.return_value = expected_data
+ obj = User(data, mock_io)
+ result = obj.boards
- assert len(result) == 1
- assert expected_url == result[0].url
- assert expected_name == result[0].name
- assert expected_id == result[0].unique_id
+ assert len(result) == 1
+ assert expected_url == result[0].url
+ assert expected_name == result[0].name
+ assert expected_id == result[0].unique_id
- mock_response.raise_for_status.assert_called_once()
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 10
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pylint",
"pytest",
"pytest-cov",
"mock"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
astroid==2.11.7
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
Babel==2.11.0
bleach==4.1.0
cachetools==4.2.4
certifi==2021.5.30
chardet==5.0.0
charset-normalizer==2.0.12
colorama==0.4.5
coverage==6.2
dateutils==0.6.12
dill==0.3.4
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
-e git+https://github.com/TheFriendlyCoder/friendlypins.git@ad685c13d7a42e93fca3adf3b1b8894549fa296e#egg=friendlypins
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==5.10.1
Jinja2==3.0.3
lazy-object-proxy==1.7.1
mando==0.7.1
MarkupSafe==2.0.1
mccabe==0.7.0
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
Pillow==8.4.0
pkginfo==1.10.0
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
Pygments==2.14.0
pylint==2.13.9
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
radon==6.0.1
readme-renderer==34.0
requests==2.27.1
requests-toolbelt==1.0.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==4.0.0a9
tqdm==4.64.1
twine==1.15.0
typed-ast==1.5.5
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
virtualenv==20.17.1
webencodings==0.5.1
wrapt==1.16.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: friendlypins
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- astroid==2.11.7
- babel==2.11.0
- bleach==4.1.0
- cachetools==4.2.4
- chardet==5.0.0
- charset-normalizer==2.0.12
- colorama==0.4.5
- coverage==6.2
- dateutils==0.6.12
- dill==0.3.4
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- isort==5.10.1
- jinja2==3.0.3
- lazy-object-proxy==1.7.1
- mando==0.7.1
- markupsafe==2.0.1
- mccabe==0.7.0
- mock==5.2.0
- pillow==8.4.0
- pkginfo==1.10.0
- platformdirs==2.4.0
- pygments==2.14.0
- pylint==2.13.9
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- radon==6.0.1
- readme-renderer==34.0
- requests==2.27.1
- requests-toolbelt==1.0.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- tox==4.0.0a9
- tqdm==4.64.1
- twine==1.15.0
- typed-ast==1.5.5
- urllib3==1.26.20
- virtualenv==20.17.1
- webencodings==0.5.1
- wrapt==1.16.0
prefix: /opt/conda/envs/friendlypins
| [
"unit_tests/test_api.py::test_get_user",
"unit_tests/test_board.py::test_board_properties",
"unit_tests/test_board.py::test_get_all_pins",
"unit_tests/test_console_actions.py::test_download_thumbnails",
"unit_tests/test_console_actions.py::test_download_thumbnails_with_delete",
"unit_tests/test_console_actions.py::test_download_thumbnails_error",
"unit_tests/test_console_actions.py::test_download_thumbnails_missing_board",
"unit_tests/test_console_actions.py::test_download_thumbnails_exists",
"unit_tests/test_pin.py::test_pin_properties",
"unit_tests/test_pin.py::test_pin_missing_media_type",
"unit_tests/test_pin.py::test_delete",
"unit_tests/test_pin.py::test_get_thumbnail",
"unit_tests/test_user.py::test_user_properties",
"unit_tests/test_user.py::test_get_boards"
]
| []
| []
| []
| Apache License 2.0 | 2,412 | [
"src/friendlypins/board.py",
"src/friendlypins/utils/rest_io.py",
"src/friendlypins/scripts/fpins.py",
".idea/codeStyles/codeStyleConfig.xml",
"docs/friendlypins.utils.rst",
"src/friendlypins/thumbnail.py",
".gitignore",
"src/friendlypins/user.py",
"docs/friendlypins.utils.rest_io.rst",
"src/friendlypins/api.py",
"src/friendlypins/utils/console_actions.py",
"src/friendlypins/headers.py",
"src/friendlypins/pin.py"
]
| [
"src/friendlypins/board.py",
"src/friendlypins/utils/rest_io.py",
"src/friendlypins/scripts/fpins.py",
".idea/codeStyles/codeStyleConfig.xml",
"docs/friendlypins.utils.rst",
"src/friendlypins/thumbnail.py",
".gitignore",
"src/friendlypins/user.py",
"docs/friendlypins.utils.rest_io.rst",
"src/friendlypins/api.py",
"src/friendlypins/utils/console_actions.py",
"src/friendlypins/headers.py",
"src/friendlypins/pin.py"
]
|
|
TheFriendlyCoder__friendlypins-45 | 9c0aa4ebcde5ad444e342aaa2b3315339dbc36f6 | 2018-04-16 03:40:54 | 9c0aa4ebcde5ad444e342aaa2b3315339dbc36f6 | diff --git a/src/friendlypins/board.py b/src/friendlypins/board.py
index 045655f..7f51a3b 100644
--- a/src/friendlypins/board.py
+++ b/src/friendlypins/board.py
@@ -64,16 +64,12 @@ class Board(object):
return int(self._data['counts']['pins'])
@property
- def all_pins(self):
- """Gets a list of all pins from this board
+ def pins(self):
+ """Generator for iterating over the pins linked to this board
- NOTE: This process may take a long time to complete and require
- a lot of memory for boards that contain large numbers of pins
-
- :rtype: :class:`list` of :class:`friendlypins.pin.Pin`
+ :rtype: Generator of :class:`friendlypins.pin.Pin`
"""
- self._log.debug('Gettings all pins for board %s...', self.name)
- retval = list()
+ self._log.debug('Loading pins for board %s...', self.name)
properties = {
"fields": ','.join([
@@ -93,19 +89,20 @@ class Board(object):
])
}
+ page = 0
while True:
+ self._log.debug("Loading pins page %s", page)
result = self._io.get(
"boards/{0}/pins".format(self.unique_id),
properties)
assert 'data' in result
for cur_item in result['data']:
- retval.append(Pin(cur_item, self._io))
+ yield Pin(cur_item, self._io)
if not result["page"]["cursor"]:
break
properties["cursor"] = result["page"]["cursor"]
-
- return retval
+ page += 1
if __name__ == "__main__":
diff --git a/src/friendlypins/user.py b/src/friendlypins/user.py
index de3306d..3d69138 100644
--- a/src/friendlypins/user.py
+++ b/src/friendlypins/user.py
@@ -94,21 +94,39 @@ class User(object):
@property
def boards(self):
- """Gets a list of boards owned by this user
+ """Generator for iterating over the boards owned by this user
- :rtype: :class:`list` of :class:`friendlypins.board.Board`
+ :rtype: Generator of :class:`friendlypins.board.Board`
"""
- self._log.debug("Loading boards for user %s...", self.name)
-
- fields = "id,name,url,description,creator,created_at,counts,image"
- result = self._io.get('me/boards', {"fields": fields})
-
- assert 'data' in result
-
- retval = []
- for cur_item in result['data']:
- retval.append(Board(cur_item, self._io))
- return retval
+ self._log.debug('Loading boards for user %s...', self.name)
+
+ properties = {
+ "fields": ','.join([
+ "id",
+ "name",
+ "url",
+ "description",
+ "creator",
+ "created_at",
+ "counts",
+ "image"
+ ])
+ }
+
+ page = 0
+ while True:
+ self._log.debug("Loading boards page %s", page)
+ result = self._io.get("me/boards", properties)
+ assert 'data' in result
+
+ for cur_item in result['data']:
+ yield Board(cur_item, self._io)
+
+ if not result["page"]["cursor"]:
+ break
+
+ properties["cursor"] = result["page"]["cursor"]
+ page += 1
if __name__ == "__main__":
diff --git a/src/friendlypins/utils/console_actions.py b/src/friendlypins/utils/console_actions.py
index 1ca66ef..da40dfc 100644
--- a/src/friendlypins/utils/console_actions.py
+++ b/src/friendlypins/utils/console_actions.py
@@ -71,13 +71,12 @@ def download_thumbnails(api_token, board_name, output_folder, delete):
log.error("Could not find selected board: %s", board_name)
return 1
- all_pins = selected_board.all_pins
log.info('Downloading thumbnails...')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
with tqdm(total=selected_board.num_pins, unit='b', ncols=80) as pbar:
- for cur_pin in all_pins:
+ for cur_pin in selected_board.pins:
retval = _download_pin(cur_pin, output_folder)
if retval:
return retval
| lazy load boards and pins
To simplify and optimize the interactions with boards and pins we should lazy-load the data from these two API calls using iterators:
User.boards
Board.all_pins | TheFriendlyCoder/friendlypins | diff --git a/unit_tests/test_board.py b/unit_tests/test_board.py
index 5daac85..2314b19 100644
--- a/unit_tests/test_board.py
+++ b/unit_tests/test_board.py
@@ -24,7 +24,7 @@ def test_board_properties():
assert obj.num_pins == expected_pin_count
-def test_get_all_pins():
+def test_get_pins():
data = {
'id': '987654321',
'name': 'MyBoard'
@@ -54,7 +54,9 @@ def test_get_all_pins():
mock_io.get.return_value = expected_data
obj = Board(data, mock_io)
- result = obj.all_pins
+ result = list()
+ for item in obj.pins:
+ result.append(item)
assert len(result) == 1
assert expected_url == result[0].url
diff --git a/unit_tests/test_console_actions.py b/unit_tests/test_console_actions.py
index 170f224..92138cc 100644
--- a/unit_tests/test_console_actions.py
+++ b/unit_tests/test_console_actions.py
@@ -267,7 +267,10 @@ def test_download_thumbnails_missing_board(rest_io, action_requests, mock_open,
"counts": {
"pins": 1
}
- }]
+ }],
+ "page": {
+ "cursor": None
+ }
}
# Fake pin data for the fake board, with fake thumbnail metadata
diff --git a/unit_tests/test_user.py b/unit_tests/test_user.py
index 9184835..0512b58 100644
--- a/unit_tests/test_user.py
+++ b/unit_tests/test_user.py
@@ -44,13 +44,19 @@ def test_get_boards():
"id": str(expected_id),
"name": expected_name,
"url": expected_url
- }]
+ }],
+ "page": {
+ "cursor": None
+ }
}
mock_io = mock.MagicMock()
mock_io.get.return_value = expected_data
obj = User(data, mock_io)
- result = obj.boards
+
+ result = list()
+ for item in obj.boards:
+ result.append(item)
assert len(result) == 1
assert expected_url == result[0].url
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"mock"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
astroid==2.6.6
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
Babel==2.11.0
bleach==4.1.0
cachetools==4.2.4
certifi==2021.5.30
chardet==5.0.0
charset-normalizer==2.0.12
colorama==0.4.5
coverage==6.2
dateutils==0.6.12
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
-e git+https://github.com/TheFriendlyCoder/friendlypins.git@9c0aa4ebcde5ad444e342aaa2b3315339dbc36f6#egg=friendlypins
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==5.10.1
Jinja2==3.0.3
lazy-object-proxy==1.7.1
mando==0.7.1
MarkupSafe==2.0.1
mccabe==0.6.1
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
Pillow==8.4.0
pkginfo==1.10.0
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
Pygments==2.14.0
pylint==3.0.0a4
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
radon==6.0.1
readme-renderer==34.0
requests==2.27.1
requests-toolbelt==1.0.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==4.0.0a9
tqdm==4.64.1
twine==1.15.0
typed-ast==1.4.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
virtualenv==20.17.1
webencodings==0.5.1
wrapt==1.12.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: friendlypins
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- astroid==2.6.6
- babel==2.11.0
- bleach==4.1.0
- cachetools==4.2.4
- chardet==5.0.0
- charset-normalizer==2.0.12
- colorama==0.4.5
- coverage==6.2
- dateutils==0.6.12
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- isort==5.10.1
- jinja2==3.0.3
- lazy-object-proxy==1.7.1
- mando==0.7.1
- markupsafe==2.0.1
- mccabe==0.6.1
- mock==5.2.0
- pillow==8.4.0
- pkginfo==1.10.0
- platformdirs==2.4.0
- pygments==2.14.0
- pylint==3.0.0a4
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- radon==6.0.1
- readme-renderer==34.0
- requests==2.27.1
- requests-toolbelt==1.0.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- tox==4.0.0a9
- tqdm==4.64.1
- twine==1.15.0
- typed-ast==1.4.3
- urllib3==1.26.20
- virtualenv==20.17.1
- webencodings==0.5.1
- wrapt==1.12.1
prefix: /opt/conda/envs/friendlypins
| [
"unit_tests/test_board.py::test_get_pins"
]
| []
| [
"unit_tests/test_board.py::test_board_properties",
"unit_tests/test_console_actions.py::test_download_thumbnails",
"unit_tests/test_console_actions.py::test_download_thumbnails_with_delete",
"unit_tests/test_console_actions.py::test_download_thumbnails_error",
"unit_tests/test_console_actions.py::test_download_thumbnails_missing_board",
"unit_tests/test_console_actions.py::test_download_thumbnails_exists",
"unit_tests/test_user.py::test_user_properties",
"unit_tests/test_user.py::test_get_boards"
]
| []
| Apache License 2.0 | 2,413 | [
"src/friendlypins/board.py",
"src/friendlypins/user.py",
"src/friendlypins/utils/console_actions.py"
]
| [
"src/friendlypins/board.py",
"src/friendlypins/user.py",
"src/friendlypins/utils/console_actions.py"
]
|
|
PyFilesystem__pyfilesystem2-161 | 41040141870d4842f4d3b970c07436c3673ce740 | 2018-04-16 12:41:21 | 21cbbfbf1cdf053762184413842f9b03992af115 | coveralls:
[](https://coveralls.io/builds/16536175)
Coverage decreased (-0.2%) to 99.759% when pulling **52d22ae2954464790a5719ec5f011a9766e0a247 on althonos:fix-160** into **41040141870d4842f4d3b970c07436c3673ce740 on PyFilesystem:master**.
coveralls:
[](https://coveralls.io/builds/16536175)
Coverage decreased (-0.2%) to 99.759% when pulling **52d22ae2954464790a5719ec5f011a9766e0a247 on althonos:fix-160** into **41040141870d4842f4d3b970c07436c3673ce740 on PyFilesystem:master**.
willmcgugan: Great, thanks. Will do a release once that's in.
multifs filters out duplicate paths, and I did some research. It seems that this is the fastest way to filter duplicates while preserving order:
```
directory = list(OrderedDict.fromkeys(directory))
```
althonos: > multifs filters out duplicate paths, and I did some research. It seems that this is the fastest way to filter duplicates while preserving order: `directory = list(OrderedDict.fromkeys(directory))`
I use the `unique_everseen` recipe most of the time, but this is a nice trick ! I'll add it right now.
althonos: Yes, no more work intended !
I'll rebase the typing branch as well.
willmcgugan: Great, thanks! | diff --git a/fs/tarfs.py b/fs/tarfs.py
index 543b203..917eb03 100644
--- a/fs/tarfs.py
+++ b/fs/tarfs.py
@@ -16,7 +16,7 @@ from .enums import ResourceType
from .info import Info
from .iotools import RawWrapper
from .opener import open_fs
-from .path import dirname, normpath, relpath, basename
+from .path import dirname, relpath, basename, isbase, parts, frombase
from .wrapfs import WrapFS
from .permissions import Permissions
@@ -256,9 +256,14 @@ class ReadTarFS(FS):
else:
try:
+ implicit = False
member = self._tar.getmember(self._encode(_path))
except KeyError:
- raise errors.ResourceNotFound(path)
+ if not self.isdir(_path):
+ raise errors.ResourceNotFound(path)
+ implicit = True
+ member = tarfile.TarInfo(_path)
+ member.type = tarfile.DIRTYPE
raw_info["basic"] = {
"name": basename(self._decode(member.name)),
@@ -268,10 +273,11 @@ class ReadTarFS(FS):
if "details" in namespaces:
raw_info["details"] = {
"size": member.size,
- "type": int(self.type_map[member.type]),
- "modified": member.mtime,
+ "type": int(self.type_map[member.type])
}
- if "access" in namespaces:
+ if not implicit:
+ raw_info["details"]["modified"] = member.mtime
+ if "access" in namespaces and not implicit:
raw_info["access"] = {
"gid": member.gid,
"group": member.gname,
@@ -279,7 +285,7 @@ class ReadTarFS(FS):
"uid": member.uid,
"user": member.uname,
}
- if "tar" in namespaces:
+ if "tar" in namespaces and not implicit:
raw_info["tar"] = _get_member_info(member, self.encoding)
raw_info["tar"].update({
k.replace('is', 'is_'):getattr(member, k)()
@@ -289,39 +295,46 @@ class ReadTarFS(FS):
return Info(raw_info)
+ def isdir(self, path):
+ _path = relpath(self.validatepath(path))
+ try:
+ return self._directory[_path].isdir()
+ except KeyError:
+ return any(isbase(_path, name) for name in self._directory)
+
+ def isfile(self, path):
+ _path = relpath(self.validatepath(path))
+ try:
+ return self._directory[_path].isfile()
+ except KeyError:
+ return False
+
def setinfo(self, path, info):
self.check()
raise errors.ResourceReadOnly(path)
def listdir(self, path):
- self.check()
- _path = relpath(path)
- info = self._directory.get(_path)
- if _path:
- if info is None:
- raise errors.ResourceNotFound(path)
- if not info.isdir():
- raise errors.DirectoryExpected(path)
- dir_list = [
- basename(name)
- for name in self._directory.keys()
- if dirname(name) == _path
- ]
- return dir_list
+ _path = relpath(self.validatepath(path))
+
+ if not self.gettype(path) is ResourceType.directory:
+ raise errors.DirectoryExpected(path)
+
+ children = (frombase(_path, n) for n in self._directory if isbase(_path, n))
+ content = (parts(child)[1] for child in children if relpath(child))
+ return list(OrderedDict.fromkeys(content))
def makedir(self, path, permissions=None, recreate=False):
self.check()
raise errors.ResourceReadOnly(path)
def openbin(self, path, mode="r", buffering=-1, **options):
- self.check()
- path = relpath(normpath(path))
+ _path = relpath(self.validatepath(path))
if 'w' in mode or '+' in mode or 'a' in mode:
raise errors.ResourceReadOnly(path)
try:
- member = self._tar.getmember(self._encode(path))
+ member = self._tar.getmember(self._encode(_path))
except KeyError:
six.raise_from(errors.ResourceNotFound(path), None)
| PyFilesystem can't find existing file in tar-fs
I wrote a stackoverflow post about this, I'm not entirely sure, but I suspect this to be a bug:
https://stackoverflow.com/questions/49811622/pyfilesystem-cant-find-existing-file-in-tar-fs | PyFilesystem/pyfilesystem2 | diff --git a/tests/test_tarfs.py b/tests/test_tarfs.py
index e894f6b..bfe0bc4 100644
--- a/tests/test_tarfs.py
+++ b/tests/test_tarfs.py
@@ -1,16 +1,20 @@
# -*- encoding: UTF-8
from __future__ import unicode_literals
+import io
import os
import six
import gzip
import tarfile
import getpass
+import tarfile
import tempfile
import unittest
+import uuid
from fs import tarfs
from fs import errors
+from fs.enums import ResourceType
from fs.compress import write_tar
from fs.opener import open_fs
from fs.opener.errors import NotWriteable
@@ -184,6 +188,73 @@ class TestReadTarFS(ArchiveTestCases, unittest.TestCase):
self.assertTrue(top.get('tar', 'is_file'))
+class TestImplicitDirectories(unittest.TestCase):
+ """Regression tests for #160.
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ cls.tmpfs = open_fs("temp://")
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.tmpfs.close()
+
+ def setUp(self):
+ self.tempfile = self.tmpfs.open('test.tar', 'wb+')
+ with tarfile.open(mode="w", fileobj=self.tempfile) as tf:
+ tf.addfile(tarfile.TarInfo("foo/bar/baz/spam.txt"), io.StringIO())
+ tf.addfile(tarfile.TarInfo("foo/eggs.bin"), io.StringIO())
+ tf.addfile(tarfile.TarInfo("foo/yolk/beans.txt"), io.StringIO())
+ info = tarfile.TarInfo("foo/yolk")
+ info.type = tarfile.DIRTYPE
+ tf.addfile(info, io.BytesIO())
+ self.tempfile.seek(0)
+ self.fs = tarfs.TarFS(self.tempfile)
+
+ def tearDown(self):
+ self.fs.close()
+ self.tempfile.close()
+
+ def test_isfile(self):
+ self.assertFalse(self.fs.isfile("foo"))
+ self.assertFalse(self.fs.isfile("foo/bar"))
+ self.assertFalse(self.fs.isfile("foo/bar/baz"))
+ self.assertTrue(self.fs.isfile("foo/bar/baz/spam.txt"))
+ self.assertTrue(self.fs.isfile("foo/yolk/beans.txt"))
+ self.assertTrue(self.fs.isfile("foo/eggs.bin"))
+ self.assertFalse(self.fs.isfile("foo/eggs.bin/baz"))
+
+ def test_isdir(self):
+ self.assertTrue(self.fs.isdir("foo"))
+ self.assertTrue(self.fs.isdir("foo/yolk"))
+ self.assertTrue(self.fs.isdir("foo/bar"))
+ self.assertTrue(self.fs.isdir("foo/bar/baz"))
+ self.assertFalse(self.fs.isdir("foo/bar/baz/spam.txt"))
+ self.assertFalse(self.fs.isdir("foo/eggs.bin"))
+ self.assertFalse(self.fs.isdir("foo/eggs.bin/baz"))
+ self.assertFalse(self.fs.isdir("foo/yolk/beans.txt"))
+
+ def test_listdir(self):
+ self.assertEqual(sorted(self.fs.listdir("foo")), ["bar", "eggs.bin", "yolk"])
+ self.assertEqual(self.fs.listdir("foo/bar"), ["baz"])
+ self.assertEqual(self.fs.listdir("foo/bar/baz"), ["spam.txt"])
+ self.assertEqual(self.fs.listdir("foo/yolk"), ["beans.txt"])
+
+ def test_getinfo(self):
+ info = self.fs.getdetails("foo/bar/baz")
+ self.assertEqual(info.name, "baz")
+ self.assertEqual(info.size, 0)
+ self.assertIs(info.type, ResourceType.directory)
+
+ info = self.fs.getdetails("foo")
+ self.assertEqual(info.name, "foo")
+ self.assertEqual(info.size, 0)
+ self.assertIs(info.type, ResourceType.directory)
+
+
+
+
class TestReadTarFSMem(TestReadTarFS):
def make_source_fs(self):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 1
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"mock",
"pyftpdlib",
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
attrs==22.2.0
certifi==2021.5.30
-e git+https://github.com/PyFilesystem/pyfilesystem2.git@41040141870d4842f4d3b970c07436c3673ce740#egg=fs
importlib-metadata==4.8.3
iniconfig==1.1.1
mock==5.2.0
nose==1.3.7
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyftpdlib==2.0.1
pyparsing==3.1.4
pytest==7.0.1
pytz==2025.2
six==1.10.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: pyfilesystem2
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- mock==5.2.0
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyftpdlib==2.0.1
- pyparsing==3.1.4
- pytest==7.0.1
- pytz==2025.2
- six==1.10.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/pyfilesystem2
| [
"tests/test_tarfs.py::TestImplicitDirectories::test_getinfo",
"tests/test_tarfs.py::TestImplicitDirectories::test_isdir",
"tests/test_tarfs.py::TestImplicitDirectories::test_listdir"
]
| []
| [
"tests/test_tarfs.py::TestWriteReadTarFS::test_unicode_paths",
"tests/test_tarfs.py::TestWriteTarFS::test_appendbytes",
"tests/test_tarfs.py::TestWriteTarFS::test_appendtext",
"tests/test_tarfs.py::TestWriteTarFS::test_basic",
"tests/test_tarfs.py::TestWriteTarFS::test_bin_files",
"tests/test_tarfs.py::TestWriteTarFS::test_close",
"tests/test_tarfs.py::TestWriteTarFS::test_copy",
"tests/test_tarfs.py::TestWriteTarFS::test_copy_dir_mem",
"tests/test_tarfs.py::TestWriteTarFS::test_copy_dir_temp",
"tests/test_tarfs.py::TestWriteTarFS::test_copy_file",
"tests/test_tarfs.py::TestWriteTarFS::test_copy_structure",
"tests/test_tarfs.py::TestWriteTarFS::test_copydir",
"tests/test_tarfs.py::TestWriteTarFS::test_create",
"tests/test_tarfs.py::TestWriteTarFS::test_desc",
"tests/test_tarfs.py::TestWriteTarFS::test_exists",
"tests/test_tarfs.py::TestWriteTarFS::test_files",
"tests/test_tarfs.py::TestWriteTarFS::test_filterdir",
"tests/test_tarfs.py::TestWriteTarFS::test_getbytes",
"tests/test_tarfs.py::TestWriteTarFS::test_getfile",
"tests/test_tarfs.py::TestWriteTarFS::test_getinfo",
"tests/test_tarfs.py::TestWriteTarFS::test_getmeta",
"tests/test_tarfs.py::TestWriteTarFS::test_getsize",
"tests/test_tarfs.py::TestWriteTarFS::test_getsyspath",
"tests/test_tarfs.py::TestWriteTarFS::test_gettext",
"tests/test_tarfs.py::TestWriteTarFS::test_geturl",
"tests/test_tarfs.py::TestWriteTarFS::test_geturl_purpose",
"tests/test_tarfs.py::TestWriteTarFS::test_invalid_chars",
"tests/test_tarfs.py::TestWriteTarFS::test_isdir",
"tests/test_tarfs.py::TestWriteTarFS::test_isempty",
"tests/test_tarfs.py::TestWriteTarFS::test_isfile",
"tests/test_tarfs.py::TestWriteTarFS::test_islink",
"tests/test_tarfs.py::TestWriteTarFS::test_listdir",
"tests/test_tarfs.py::TestWriteTarFS::test_makedir",
"tests/test_tarfs.py::TestWriteTarFS::test_makedirs",
"tests/test_tarfs.py::TestWriteTarFS::test_match",
"tests/test_tarfs.py::TestWriteTarFS::test_move",
"tests/test_tarfs.py::TestWriteTarFS::test_move_dir_mem",
"tests/test_tarfs.py::TestWriteTarFS::test_move_dir_temp",
"tests/test_tarfs.py::TestWriteTarFS::test_move_file_mem",
"tests/test_tarfs.py::TestWriteTarFS::test_move_file_same_fs",
"tests/test_tarfs.py::TestWriteTarFS::test_move_file_temp",
"tests/test_tarfs.py::TestWriteTarFS::test_move_same_fs",
"tests/test_tarfs.py::TestWriteTarFS::test_movedir",
"tests/test_tarfs.py::TestWriteTarFS::test_open",
"tests/test_tarfs.py::TestWriteTarFS::test_open_files",
"tests/test_tarfs.py::TestWriteTarFS::test_openbin",
"tests/test_tarfs.py::TestWriteTarFS::test_openbin_rw",
"tests/test_tarfs.py::TestWriteTarFS::test_opendir",
"tests/test_tarfs.py::TestWriteTarFS::test_remove",
"tests/test_tarfs.py::TestWriteTarFS::test_removedir",
"tests/test_tarfs.py::TestWriteTarFS::test_removetree",
"tests/test_tarfs.py::TestWriteTarFS::test_repeat_dir",
"tests/test_tarfs.py::TestWriteTarFS::test_scandir",
"tests/test_tarfs.py::TestWriteTarFS::test_setbinfile",
"tests/test_tarfs.py::TestWriteTarFS::test_setbytes",
"tests/test_tarfs.py::TestWriteTarFS::test_setfile",
"tests/test_tarfs.py::TestWriteTarFS::test_setinfo",
"tests/test_tarfs.py::TestWriteTarFS::test_settext",
"tests/test_tarfs.py::TestWriteTarFS::test_settimes",
"tests/test_tarfs.py::TestWriteTarFS::test_touch",
"tests/test_tarfs.py::TestWriteTarFS::test_tree",
"tests/test_tarfs.py::TestWriteTarFS::test_validatepath",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_appendbytes",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_appendtext",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_basic",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_bin_files",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_close",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_copy",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_copy_dir_mem",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_copy_dir_temp",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_copy_file",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_copy_structure",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_copydir",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_create",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_desc",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_exists",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_files",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_filterdir",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_getbytes",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_getfile",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_getinfo",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_getmeta",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_getsize",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_getsyspath",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_gettext",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_geturl",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_geturl_purpose",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_invalid_chars",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_isdir",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_isempty",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_isfile",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_islink",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_listdir",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_makedir",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_makedirs",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_match",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_move",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_move_dir_mem",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_move_dir_temp",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_move_file_mem",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_move_file_same_fs",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_move_file_temp",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_move_same_fs",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_movedir",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_open",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_open_files",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_openbin",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_openbin_rw",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_opendir",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_remove",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_removedir",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_removetree",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_repeat_dir",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_scandir",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_setbinfile",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_setbytes",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_setfile",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_setinfo",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_settext",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_settimes",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_touch",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_tree",
"tests/test_tarfs.py::TestWriteTarFSToFileobj::test_validatepath",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_appendbytes",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_appendtext",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_basic",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_bin_files",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_close",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_copy",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_copy_dir_mem",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_copy_dir_temp",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_copy_file",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_copy_structure",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_copydir",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_create",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_desc",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_exists",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_files",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_filterdir",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_getbytes",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_getfile",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_getinfo",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_getmeta",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_getsize",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_getsyspath",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_gettext",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_geturl",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_geturl_purpose",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_invalid_chars",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_isdir",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_isempty",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_isfile",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_islink",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_listdir",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_makedir",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_makedirs",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_match",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_move",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_move_dir_mem",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_move_dir_temp",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_move_file_mem",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_move_file_same_fs",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_move_file_temp",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_move_same_fs",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_movedir",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_open",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_open_files",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_openbin",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_openbin_rw",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_opendir",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_remove",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_removedir",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_removetree",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_repeat_dir",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_scandir",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_setbinfile",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_setbytes",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_setfile",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_setinfo",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_settext",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_settimes",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_touch",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_tree",
"tests/test_tarfs.py::TestWriteGZippedTarFS::test_validatepath",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_appendbytes",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_appendtext",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_basic",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_bin_files",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_close",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_copy",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_copy_dir_mem",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_copy_dir_temp",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_copy_file",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_copy_structure",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_copydir",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_create",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_desc",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_exists",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_files",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_filterdir",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_getbytes",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_getfile",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_getinfo",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_getmeta",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_getsize",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_getsyspath",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_gettext",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_geturl",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_geturl_purpose",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_invalid_chars",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_isdir",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_isempty",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_isfile",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_islink",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_listdir",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_makedir",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_makedirs",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_match",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_move",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_move_dir_mem",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_move_dir_temp",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_move_file_mem",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_move_file_same_fs",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_move_file_temp",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_move_same_fs",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_movedir",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_open",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_open_files",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_openbin",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_openbin_rw",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_opendir",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_remove",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_removedir",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_removetree",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_repeat_dir",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_scandir",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_setbinfile",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_setbytes",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_setfile",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_setinfo",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_settext",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_settimes",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_touch",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_tree",
"tests/test_tarfs.py::TestWriteXZippedTarFS::test_validatepath",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_appendbytes",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_appendtext",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_basic",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_bin_files",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_close",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_copy",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_copy_dir_mem",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_copy_dir_temp",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_copy_file",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_copy_structure",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_copydir",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_create",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_desc",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_exists",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_files",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_filterdir",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_getbytes",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_getfile",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_getinfo",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_getmeta",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_getsize",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_getsyspath",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_gettext",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_geturl",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_geturl_purpose",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_invalid_chars",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_isdir",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_isempty",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_isfile",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_islink",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_listdir",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_makedir",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_makedirs",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_match",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_move",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_move_dir_mem",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_move_dir_temp",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_move_file_mem",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_move_file_same_fs",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_move_file_temp",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_move_same_fs",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_movedir",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_open",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_open_files",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_openbin",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_openbin_rw",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_opendir",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_remove",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_removedir",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_removetree",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_repeat_dir",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_scandir",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_setbinfile",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_setbytes",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_setfile",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_setinfo",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_settext",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_settimes",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_touch",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_tree",
"tests/test_tarfs.py::TestWriteBZippedTarFS::test_validatepath",
"tests/test_tarfs.py::TestReadTarFS::test_getinfo",
"tests/test_tarfs.py::TestReadTarFS::test_gets",
"tests/test_tarfs.py::TestReadTarFS::test_implied_dir",
"tests/test_tarfs.py::TestReadTarFS::test_listdir",
"tests/test_tarfs.py::TestReadTarFS::test_open",
"tests/test_tarfs.py::TestReadTarFS::test_read_from_filename",
"tests/test_tarfs.py::TestReadTarFS::test_read_from_fileobject",
"tests/test_tarfs.py::TestReadTarFS::test_readonly",
"tests/test_tarfs.py::TestReadTarFS::test_repr",
"tests/test_tarfs.py::TestReadTarFS::test_str",
"tests/test_tarfs.py::TestReadTarFS::test_walk_files",
"tests/test_tarfs.py::TestImplicitDirectories::test_isfile",
"tests/test_tarfs.py::TestReadTarFSMem::test_getinfo",
"tests/test_tarfs.py::TestReadTarFSMem::test_gets",
"tests/test_tarfs.py::TestReadTarFSMem::test_implied_dir",
"tests/test_tarfs.py::TestReadTarFSMem::test_listdir",
"tests/test_tarfs.py::TestReadTarFSMem::test_open",
"tests/test_tarfs.py::TestReadTarFSMem::test_read_from_filename",
"tests/test_tarfs.py::TestReadTarFSMem::test_read_from_fileobject",
"tests/test_tarfs.py::TestReadTarFSMem::test_readonly",
"tests/test_tarfs.py::TestReadTarFSMem::test_repr",
"tests/test_tarfs.py::TestReadTarFSMem::test_str",
"tests/test_tarfs.py::TestReadTarFSMem::test_walk_files",
"tests/test_tarfs.py::TestOpener::test_not_writeable"
]
| []
| MIT License | 2,414 | [
"fs/tarfs.py"
]
| [
"fs/tarfs.py"
]
|
pika__pika-1020 | 7b6d7983db021ae4b84d08ea9cee4b8f960ada43 | 2018-04-16 21:52:51 | 7b6d7983db021ae4b84d08ea9cee4b8f960ada43 | diff --git a/docs/faq.rst b/docs/faq.rst
index 132b495..f70ef55 100644
--- a/docs/faq.rst
+++ b/docs/faq.rst
@@ -3,7 +3,7 @@ Frequently Asked Questions
- Is Pika thread safe?
- Pika does not have any notion of threading in the code. If you want to use Pika with threading, make sure you have a Pika connection per thread, created in that thread. It is not safe to share one Pika connection across threads.
+ Pika does not have any notion of threading in the code. If you want to use Pika with threading, make sure you have a Pika connection per thread, created in that thread. It is not safe to share one Pika connection across threads, with one exception: you may call the connection method `add_callback_threadsafe` from another thread to schedule a callback within an active pika connection.
- How do I report a bug with Pika?
diff --git a/pika/adapters/asyncio_connection.py b/pika/adapters/asyncio_connection.py
index b2ac570..57cb2d4 100644
--- a/pika/adapters/asyncio_connection.py
+++ b/pika/adapters/asyncio_connection.py
@@ -20,6 +20,17 @@ class IOLoopAdapter:
self.readers = set()
self.writers = set()
+ def close(self):
+ """Release ioloop's resources.
+
+ This method is intended to be called by the application or test code
+ only after the ioloop's outermost `start()` call returns. After calling
+ `close()`, no other interaction with the closed instance of ioloop
+ should be performed.
+
+ """
+ self.loop.close()
+
def add_timeout(self, deadline, callback_method):
"""Add the callback_method to the EventLoop timer to fire after deadline
seconds. Returns a Handle to the timeout.
@@ -41,6 +52,28 @@ class IOLoopAdapter:
"""
return handle.cancel()
+ def add_callback_threadsafe(self, callback):
+ """Requests a call to the given function as soon as possible in the
+ context of this IOLoop's thread.
+
+ NOTE: This is the only thread-safe method offered by the IOLoop adapter.
+ All other manipulations of the IOLoop adapter and its parent connection
+ must be performed from the connection's thread.
+
+ For example, a thread may request a call to the
+ `channel.basic_ack` method of a connection that is running in a
+ different thread via
+
+ ```
+ connection.add_callback_threadsafe(
+ functools.partial(channel.basic_ack, delivery_tag=...))
+ ```
+
+ :param method callback: The callback method; must be callable.
+
+ """
+ self.loop.call_soon_threadsafe(callback)
+
def add_handler(self, fd, cb, event_state):
""" Registers the given handler to receive the given events for ``fd``.
diff --git a/pika/adapters/base_connection.py b/pika/adapters/base_connection.py
index d6e8407..cc3836d 100644
--- a/pika/adapters/base_connection.py
+++ b/pika/adapters/base_connection.py
@@ -135,6 +135,32 @@ class BaseConnection(connection.Connection):
"""
self.ioloop.remove_timeout(timeout_id)
+ def add_callback_threadsafe(self, callback):
+ """Requests a call to the given function as soon as possible in the
+ context of this connection's IOLoop thread.
+
+ NOTE: This is the only thread-safe method offered by the connection. All
+ other manipulations of the connection must be performed from the
+ connection's thread.
+
+ For example, a thread may request a call to the
+ `channel.basic_ack` method of a connection that is running in a
+ different thread via
+
+ ```
+ connection.add_callback_threadsafe(
+ functools.partial(channel.basic_ack, delivery_tag=...))
+ ```
+
+ :param method callback: The callback method; must be callable.
+
+ """
+ if not callable(callback):
+ raise TypeError(
+ 'callback must be a callable, but got %r' % (callback,))
+
+ self.ioloop.add_callback_threadsafe(callback)
+
def _adapter_connect(self):
"""Connect to the RabbitMQ broker, returning True if connected.
diff --git a/pika/adapters/blocking_connection.py b/pika/adapters/blocking_connection.py
index 4f7751f..29701c2 100644
--- a/pika/adapters/blocking_connection.py
+++ b/pika/adapters/blocking_connection.py
@@ -10,6 +10,9 @@ and the :class:`~pika.adapters.blocking_connection.BlockingChannel`
classes.
"""
+# Suppress too-many-lines
+# pylint: disable=C0302
+
# Disable "access to protected member warnings: this wrapper implementation is
# a friend of those instances
# pylint: disable=W0212
@@ -155,7 +158,7 @@ class _CallbackResult(object):
with `append_element`
"""
assert self._ready, '_CallbackResult was not set'
- assert isinstance(self._values, list) and len(self._values) > 0, (
+ assert isinstance(self._values, list) and self._values, (
'_CallbackResult value is incompatible with append_element: %r'
% (self._values,))
@@ -378,7 +381,7 @@ class BlockingConnection(object):
def _cleanup(self):
"""Clean up members that might inhibit garbage collection"""
- self._impl.ioloop.deactivate_poller()
+ self._impl.ioloop.close()
self._ready_events.clear()
self._opened_result.reset()
self._open_error_result.reset()
@@ -525,6 +528,18 @@ class BlockingConnection(object):
"""
self._ready_events.append(evt)
+ def _on_threadsafe_callback(self, user_callback):
+ """Handle callback that was registered via `add_callback_threadsafe`.
+
+ :param user_callback: callback passed to `add_callback_threadsafe` by
+ the application.
+
+ """
+ # Turn it into a 0-delay timeout to take advantage of our existing logic
+ # that deals with reentrancy
+ self.add_timeout(0, user_callback)
+
+
def _on_connection_blocked(self, user_callback, method_frame):
"""Handle Connection.Blocked notification from RabbitMQ broker
@@ -632,6 +647,29 @@ class BlockingConnection(object):
return timer_id
+ def add_callback_threadsafe(self, callback):
+ """Requests a call to the given function as soon as possible in the
+ context of this connection's thread.
+
+ NOTE: This is the only thread-safe method in `BlockingConnection`. All
+ other manipulations of `BlockingConnection` must be performed from the
+ connection's thread.
+
+ For example, a thread may request a call to the
+ `BlockingChannel.basic_ack` method of a `BlockingConnection` that is
+ running in a different thread via
+
+ ```
+ connection.add_callback_threadsafe(
+ functools.partial(channel.basic_ack, delivery_tag=...))
+ ```
+
+ :param method callback: The callback method; must be callable
+
+ """
+ self._impl.add_callback_threadsafe(
+ functools.partial(self._on_threadsafe_callback, callback))
+
def remove_timeout(self, timeout_id):
"""Remove a timer if it's still in the timeout stack
@@ -874,7 +912,7 @@ class _ConsumerCancellationEvt(_ChannelPendingEvt):
`Basic.Cancel`
"""
- __slots__ = 'method_frame'
+ __slots__ = ('method_frame',)
def __init__(self, method_frame):
"""
@@ -1798,7 +1836,8 @@ class BlockingChannel(object):
"""Blocking consumption of a queue instead of via a callback. This
method is a generator that yields each message as a tuple of method,
properties, and body. The active generator iterator terminates when the
- consumer is cancelled by client or broker.
+ consumer is cancelled by client via `BlockingChannel.cancel()` or by
+ broker.
Example:
@@ -2398,7 +2437,8 @@ class BlockingChannel(object):
:param queue: The queue name
:type queue: str or unicode; if empty string, the broker will create a
unique queue name;
- :param bool passive: Only check to see if the queue exists
+ :param bool passive: Only check to see if the queue exists and raise
+ `ChannelClosed` if it doesn't;
:param bool durable: Survive reboots of the broker
:param bool exclusive: Only allow access by the current connection
:param bool auto_delete: Delete after consumer cancels or disconnects
diff --git a/pika/adapters/select_connection.py b/pika/adapters/select_connection.py
index d199139..f6970d5 100644
--- a/pika/adapters/select_connection.py
+++ b/pika/adapters/select_connection.py
@@ -3,16 +3,15 @@ platform pika is running on.
"""
import abc
-import os
+import collections
+import errno
+import functools
+import heapq
import logging
-import socket
import select
-import errno
import time
import threading
-from collections import defaultdict
-
import pika.compat
from pika.adapters.base_connection import BaseConnection
@@ -111,6 +110,173 @@ class SelectConnection(BaseConnection):
super(SelectConnection, self)._adapter_disconnect()
[email protected]_ordering
+class _Timeout(object):
+ """Represents a timeout"""
+
+ __slots__ = ('deadline', 'callback',)
+
+ def __init__(self, deadline, callback):
+ """
+ :param float deadline: timer expiration as non-negative epoch number
+ :param callable callback: callback to call when timeout expires
+ :raises ValueError, TypeError:
+ """
+
+ if deadline < 0:
+ raise ValueError(
+ 'deadline must be non-negative epoch number, but got %r' %
+ (deadline,))
+
+ if not callable(callback):
+ raise TypeError(
+ 'callback must be a callable, but got %r' % (callback,))
+
+ self.deadline = deadline
+ self.callback = callback
+
+ def __eq__(self, other):
+ """NOTE: not supporting sort stability"""
+ return self.deadline == other.deadline
+
+ def __lt__(self, other):
+ """NOTE: not supporting sort stability"""
+ return self.deadline < other.deadline
+
+ def __le__(self, other):
+ """NOTE: not supporting sort stability"""
+ return self.deadline <= other.deadline
+
+
+class _Timer(object):
+ """Manage timeouts for use in ioloop"""
+
+ # Cancellation count threshold for triggering garbage collection of
+ # cancelled timers
+ _GC_CANCELLATION_THRESHOLD = 1024
+
+ def __init__(self):
+ self._timeout_heap = []
+
+ # Number of canceled timeouts on heap; for scheduling garbage
+ # collection of canceled timeouts
+ self._num_cancellations = 0
+
+ def close(self):
+ """Release resources. Don't use the `_Timer` instance after closing
+ it
+ """
+ # Eliminate potential reference cycles to aid garbage-collection
+ if self._timeout_heap is not None:
+ for timeout in self._timeout_heap:
+ timeout.callback = None
+ self._timeout_heap = None
+
+ def call_later(self, delay, callback):
+ """Schedule a one-shot timeout given delay seconds.
+
+ NOTE: you may cancel the timer before dispatch of the callback. Timer
+ Manager cancels the timer upon dispatch of the callback.
+
+ :param float delay: Non-negative number of seconds from now until
+ expiration
+ :param method callback: The callback method, having the signature
+ `callback()`
+
+ :rtype: _Timeout
+ :raises ValueError, TypeError
+
+ """
+ if delay < 0:
+ raise ValueError(
+ 'call_later: delay must be non-negative, but got %r'
+ % (delay,))
+
+ now = time.time()
+
+ timeout = _Timeout(now + delay, callback)
+
+ heapq.heappush(self._timeout_heap, timeout)
+
+ LOGGER.debug('call_later: added timeout %r with deadline=%r and '
+ 'callback=%r; now=%s; delay=%s', timeout, timeout.deadline,
+ timeout.callback, now, delay)
+
+ return timeout
+
+ def remove_timeout(self, timeout):
+ """Cancel the timeout
+
+ :param _Timeout timeout: The timer to cancel
+
+ """
+ # NOTE removing from the heap is difficult, so we just deactivate the
+ # timeout and garbage-collect it at a later time; see discussion
+ # in http://docs.python.org/library/heapq.html
+ if timeout.callback is None:
+ LOGGER.warning(
+ 'remove_timeout: timeout was already removed or called %r',
+ timeout)
+ else:
+ LOGGER.debug('remove_timeout: removing timeout %r with deadline=%r '
+ 'and callback=%r', timeout, timeout.deadline,
+ timeout.callback)
+ timeout.callback = None
+ self._num_cancellations += 1
+
+ def get_remaining_interval(self):
+ """Get the interval to the next timeout expiration
+
+ :returns: non-negative number of seconds until next timer expiration;
+ None if there are no timers
+ :rtype: float
+
+ """
+ if self._timeout_heap:
+ interval = max(0, self._timeout_heap[0].deadline - time.time())
+ else:
+ interval = None
+
+ return interval
+
+ def process_timeouts(self):
+ """Process pending timeouts, invoking callbacks for those whose time has
+ come
+
+ """
+ if self._timeout_heap:
+ now = time.time()
+
+ # Remove ready timeouts from the heap now to prevent IO starvation
+ # from timeouts added during callback processing
+ ready_timeouts = []
+
+ while self._timeout_heap and self._timeout_heap[0].deadline <= now:
+ timeout = heapq.heappop(self._timeout_heap)
+ if timeout.callback is not None:
+ ready_timeouts.append(timeout)
+ else:
+ self._num_cancellations -= 1
+
+ # Invoke ready timeout callbacks
+ for timeout in ready_timeouts:
+ if timeout.callback is None:
+ # Must have been canceled from a prior callback
+ self._num_cancellations -= 1
+ continue
+
+ timeout.callback()
+ timeout.callback = None
+
+ # Garbage-collect canceled timeouts if they exceed threshold
+ if (self._num_cancellations >= self._GC_CANCELLATION_THRESHOLD and
+ self._num_cancellations > (len(self._timeout_heap) >> 1)):
+ self._num_cancellations = 0
+ self._timeout_heap = [t for t in self._timeout_heap
+ if t.callback is not None]
+ heapq.heapify(self._timeout_heap)
+
+
class IOLoop(object):
"""Singleton wrapper that decides which type of poller to use, creates an
instance of it in start_poller and keeps the invoking application in a
@@ -123,33 +289,67 @@ class IOLoop(object):
"""
def __init__(self):
- self._poller = self._get_poller()
+ self._timer = _Timer()
+
+ # Callbacks requested via `add_callback`
+ self._callbacks = collections.deque()
+
+ # Identity of this IOLoop's thread
+ self._thread_id = None
+
+ self._poller = self._get_poller(self._get_remaining_interval,
+ self.process_timeouts)
+
+ def close(self):
+ """Release IOLoop's resources.
+
+ `IOLoop.close` is intended to be called by the application or test code
+ only after `IOLoop.start()` returns. After calling `close()`, no other
+ interaction with the closed instance of `IOLoop` should be performed.
+
+ """
+ if self._callbacks is not None:
+ self._poller.close()
+ self._timer.close()
+ self._callbacks = None
@staticmethod
- def _get_poller():
- """Determine the best poller to use for this enviroment."""
+ def _get_poller(get_wait_seconds, process_timeouts):
+ """Determine the best poller to use for this environment and instantiate
+ it.
+
+ :param get_wait_seconds: Function for getting the maximum number of
+ seconds to wait for IO for use by the poller
+ :param process_timeouts: Function for processing timeouts for use by the
+ poller
+
+ :returns: the instantiated poller instance supporting `_PollerBase` API
+ """
poller = None
+ kwargs = dict(get_wait_seconds=get_wait_seconds,
+ process_timeouts=process_timeouts)
+
if hasattr(select, 'epoll'):
if not SELECT_TYPE or SELECT_TYPE == 'epoll':
LOGGER.debug('Using EPollPoller')
- poller = EPollPoller()
+ poller = EPollPoller(**kwargs)
if not poller and hasattr(select, 'kqueue'):
if not SELECT_TYPE or SELECT_TYPE == 'kqueue':
LOGGER.debug('Using KQueuePoller')
- poller = KQueuePoller()
+ poller = KQueuePoller(**kwargs)
if (not poller and hasattr(select, 'poll') and
hasattr(select.poll(), 'modify')): # pylint: disable=E1101
if not SELECT_TYPE or SELECT_TYPE == 'poll':
LOGGER.debug('Using PollPoller')
- poller = PollPoller()
+ poller = PollPoller(**kwargs)
if not poller:
LOGGER.debug('Using SelectPoller')
- poller = SelectPoller()
+ poller = SelectPoller(**kwargs)
return poller
@@ -164,7 +364,7 @@ class IOLoop(object):
:rtype: str
"""
- return self._poller.add_timeout(deadline, callback_method)
+ return self._timer.call_later(deadline, callback_method)
def remove_timeout(self, timeout_id):
"""[API] Remove a timeout
@@ -172,7 +372,58 @@ class IOLoop(object):
:param str timeout_id: The timeout id to remove
"""
- self._poller.remove_timeout(timeout_id)
+ self._timer.remove_timeout(timeout_id)
+
+ def add_callback_threadsafe(self, callback):
+ """Requests a call to the given function as soon as possible in the
+ context of this IOLoop's thread.
+
+ NOTE: This is the only thread-safe method in IOLoop. All other
+ manipulations of IOLoop must be performed from the IOLoop's thread.
+
+ For example, a thread may request a call to the `stop` method of an
+ ioloop that is running in a different thread via
+ `ioloop.add_callback_threadsafe(ioloop.stop)`
+
+ :param method callback: The callback method
+
+ """
+ if not callable(callback):
+ raise TypeError(
+ 'callback must be a callable, but got %r' % (callback,))
+
+ # NOTE: `deque.append` is atomic
+ self._callbacks.append(callback)
+ if threading.current_thread().ident != self._thread_id:
+ # Wake up the IOLoop running in another thread
+ self._poller.wake_threadsafe()
+
+ LOGGER.debug('add_callback_threadsafe: added callback=%r', callback)
+
+ def process_timeouts(self):
+ """[Extension] Process pending callbacks and timeouts, invoking those
+ whose time has come. Internal use only.
+
+ """
+ # Avoid I/O starvation by postponing new callbacks to the next iteration
+ for _ in pika.compat.xrange(len(self._callbacks)):
+ self._callbacks.popleft()()
+
+ self._timer.process_timeouts()
+
+ def _get_remaining_interval(self):
+ """Get the remaining interval to the next callback or timeout
+ expiration.
+
+ :returns: non-negative number of seconds until next callback or timer
+ expiration; None if there are no callbacks and timers
+ :rtype: float
+
+ """
+ if self._callbacks:
+ return 0
+
+ return self._timer.get_remaining_interval()
def add_handler(self, fileno, handler, events):
"""[API] Add a new fileno to the set to be monitored
@@ -206,27 +457,31 @@ class IOLoop(object):
exit. See `IOLoop.stop`.
"""
+ self._thread_id = threading.current_thread().ident
self._poller.start()
def stop(self):
"""[API] Request exit from the ioloop. The loop is NOT guaranteed to
- stop before this method returns. This is the only method that may be
- called from another thread.
+ stop before this method returns.
- """
- self._poller.stop()
+ To invoke `stop()` safely from a thread other than this IOLoop's thread,
+ call it via `add_callback_threadsafe`; e.g.,
- def process_timeouts(self):
- """[Extension] Process pending timeouts, invoking callbacks for those
- whose time has come
+ `ioloop.add_callback_threadsafe(ioloop.stop)`
"""
- self._poller.process_timeouts()
+ if (self._thread_id is not None and
+ threading.current_thread().ident != self._thread_id):
+ LOGGER.warning('Use add_callback_threadsafe to request '
+ 'ioloop.stop() from another thread')
+
+ self._poller.stop()
def activate_poller(self):
"""[Extension] Activate the poller
"""
+ self._thread_id = threading.current_thread().ident
self._poller.activate_poller()
def deactivate_poller(self):
@@ -259,7 +514,21 @@ class _PollerBase(_AbstractBase): # pylint: disable=R0902
# if the poller uses MS override with 1000
POLL_TIMEOUT_MULT = 1
- def __init__(self):
+ def __init__(self, get_wait_seconds, process_timeouts):
+ """
+ :param get_wait_seconds: Function for getting the maximum number of
+ seconds to wait for IO for use by the poller
+ :param process_timeouts: Function for processing timeouts for use by the
+ poller
+
+ """
+ self._get_wait_seconds = get_wait_seconds
+ self._process_timeouts = process_timeouts
+
+ # We guard access to the waking file descriptors to avoid races from
+ # closing them while another thread is calling our `wake()` method.
+ self._waking_mutex = threading.Lock()
+
# fd-to-handler function mappings
self._fd_handlers = dict()
@@ -271,105 +540,77 @@ class _PollerBase(_AbstractBase): # pylint: disable=R0902
# Reentrancy tracker of the `start` method
self._start_nesting_levels = 0
- self._timeouts = {}
- self._next_timeout = None
-
self._stopping = False
- # Mutex for controlling critical sections where ioloop-interrupt sockets
- # are created, used, and destroyed. Needed in case `stop()` is called
- # from a thread.
- self._mutex = threading.Lock()
+ # Create ioloop-interrupt socket pair and register read handler.
+ self._r_interrupt, self._w_interrupt = self._get_interrupt_pair()
+ self.add_handler(self._r_interrupt.fileno(), self._read_interrupt, READ)
- # ioloop-interrupt socket pair; initialized in start()
- self._r_interrupt = None
- self._w_interrupt = None
+ def close(self):
+ """Release poller's resources.
- def add_timeout(self, deadline, callback_method):
- """Add the callback_method to the IOLoop timer to fire after deadline
- seconds. Returns a handle to the timeout. Do not confuse with
- Tornado's timeout where you pass in the time you want to have your
- callback called. Only pass in the seconds until it's to be called.
-
- :param int deadline: The number of seconds to wait to call callback
- :param method callback_method: The callback method
- :rtype: str
+ `close()` is intended to be called after the poller's `start()` method
+ returns. After calling `close()`, no other interaction with the closed
+ poller instance should be performed.
"""
- timeout_at = time.time() + deadline
- value = {'deadline': timeout_at, 'callback': callback_method}
- # TODO when timer resolution is low (e.g., windows), we get id collision
- # when retrying failing connection with tiny (e.g., 0) retry interval
- timeout_id = hash(frozenset(value.items()))
- self._timeouts[timeout_id] = value
-
- if not self._next_timeout or timeout_at < self._next_timeout:
- self._next_timeout = timeout_at
-
- LOGGER.debug('add_timeout: added timeout %s; deadline=%s at %s',
- timeout_id, deadline, timeout_at)
- return timeout_id
-
- def remove_timeout(self, timeout_id):
- """Remove a timeout if it's still in the timeout stack
-
- :param str timeout_id: The timeout id to remove
+ # Unregister and close ioloop-interrupt socket pair; mutual exclusion is
+ # necessary to avoid race condition with `wake_threadsafe` executing in
+ # another thread's context
+ assert self._start_nesting_levels == 0, \
+ 'Cannot call close() before start() unwinds.'
+
+ with self._waking_mutex:
+ if self._w_interrupt is not None:
+ self.remove_handler(self._r_interrupt.fileno()) # pylint: disable=E1101
+ self._r_interrupt.close()
+ self._r_interrupt = None
+ self._w_interrupt.close()
+ self._w_interrupt = None
+
+ self.deactivate_poller()
+
+ self._fd_handlers = None
+ self._fd_events = None
+ self._processing_fd_event_map = None
+
+ def wake_threadsafe(self):
+ """Wake up the poller as soon as possible. As the name indicates, this
+ method is thread-safe.
"""
- try:
- timeout = self._timeouts.pop(timeout_id)
- except KeyError:
- LOGGER.warning('remove_timeout: %s not found', timeout_id)
- else:
- if timeout['deadline'] == self._next_timeout:
- self._next_timeout = None
+ with self._waking_mutex:
+ if self._w_interrupt is None:
+ return
- LOGGER.debug('remove_timeout: removed %s', timeout_id)
+ try:
+ # Send byte to interrupt the poll loop, use send() instead of
+ # os.write for Windows compatibility
+ self._w_interrupt.send(b'X')
+ except pika.compat.SOCKET_ERROR as err:
+ if err.errno != errno.EWOULDBLOCK:
+ raise
+ except Exception as err:
+ # There's nothing sensible to do here, we'll exit the interrupt
+ # loop after POLL_TIMEOUT secs in worst case anyway.
+ LOGGER.warning("Failed to send interrupt to poller: %s", err)
+ raise
- def _get_next_deadline(self):
- """Get the interval to the next timeout event, or a default interval
- """
- if self._next_timeout:
- timeout = max(self._next_timeout - time.time(), 0)
+ def _get_max_wait(self):
+ """Get the interval to the next timeout event, or a default interval
- elif self._timeouts:
- deadlines = [t['deadline'] for t in self._timeouts.values()]
- self._next_timeout = min(deadlines)
- timeout = max((self._next_timeout - time.time(), 0))
+ :returns: maximum number of self.POLL_TIMEOUT_MULT-scaled time units
+ to wait for IO events
+ """
+ delay = self._get_wait_seconds()
+ if delay is None:
+ delay = self._MAX_POLL_TIMEOUT
else:
- timeout = self._MAX_POLL_TIMEOUT
-
- timeout = min(timeout, self._MAX_POLL_TIMEOUT)
- return timeout * self.POLL_TIMEOUT_MULT
+ delay = min(delay, self._MAX_POLL_TIMEOUT)
- def process_timeouts(self):
- """Process pending timeouts, invoking callbacks for those whose time has
- come
-
- """
- now = time.time()
- # Run the timeouts in order of deadlines. Although this shouldn't
- # be strictly necessary it preserves old behaviour when timeouts
- # were only run periodically.
- to_run = sorted(
- [(k, timer)
- for (k, timer) in self._timeouts.items()
- if timer['deadline'] <= now],
- key=lambda item: item[1]['deadline'])
-
- for k, timer in to_run:
- if k not in self._timeouts:
- # Previous invocation(s) should have deleted the timer.
- continue
- try:
- timer['callback']()
- finally:
- # Don't do 'del self._timeout[k]' as the key might
- # have been deleted just now.
- if self._timeouts.pop(k, None) is not None:
- self._next_timeout = None
+ return delay * self.POLL_TIMEOUT_MULT
def add_handler(self, fileno, handler, events):
"""Add a new fileno to the set to be monitored
@@ -449,7 +690,7 @@ class _PollerBase(_AbstractBase): # pylint: disable=R0902
"""
# Activate the underlying poller and register current events
self._init_poller()
- fd_to_events = defaultdict(int)
+ fd_to_events = collections.defaultdict(int)
for event, file_descriptors in self._fd_events.items():
for fileno in file_descriptors:
fd_to_events[fileno] |= event
@@ -471,22 +712,10 @@ class _PollerBase(_AbstractBase): # pylint: disable=R0902
if self._start_nesting_levels == 1:
LOGGER.debug('Entering IOLoop')
- self._stopping = False
# Activate the underlying poller and register current events
self.activate_poller()
- # Create ioloop-interrupt socket pair and register read handler.
- # NOTE: we defer their creation because some users (e.g.,
- # BlockingConnection adapter) don't use the event loop and these
- # sockets would get reported as leaks
- with self._mutex:
- assert self._r_interrupt is None
- self._r_interrupt, self._w_interrupt = self._get_interrupt_pair(
- )
- self.add_handler(self._r_interrupt.fileno(),
- self._read_interrupt, READ)
-
else:
LOGGER.debug('Reentering IOLoop at nesting level=%s',
self._start_nesting_levels)
@@ -495,52 +724,32 @@ class _PollerBase(_AbstractBase): # pylint: disable=R0902
# Run event loop
while not self._stopping:
self.poll()
- self.process_timeouts()
+ self._process_timeouts()
finally:
self._start_nesting_levels -= 1
if self._start_nesting_levels == 0:
- LOGGER.debug('Cleaning up IOLoop')
- # Unregister and close ioloop-interrupt socket pair
- with self._mutex:
- self.remove_handler(self._r_interrupt.fileno())
- self._r_interrupt.close()
- self._r_interrupt = None
- self._w_interrupt.close()
- self._w_interrupt = None
-
- # Deactivate the underlying poller
- self.deactivate_poller()
+ try:
+ LOGGER.debug('Deactivating poller')
+
+ # Deactivate the underlying poller
+ self.deactivate_poller()
+ finally:
+ self._stopping = False
else:
LOGGER.debug('Leaving IOLoop with %s nesting levels remaining',
self._start_nesting_levels)
def stop(self):
"""Request exit from the ioloop. The loop is NOT guaranteed to stop
- before this method returns. This is the only method that may be called
- from another thread.
+ before this method returns.
"""
LOGGER.debug('Stopping IOLoop')
self._stopping = True
- with self._mutex:
- if self._w_interrupt is None:
- return
-
- try:
- # Send byte to interrupt the poll loop, use send() instead of
- # os.write for Windows compatibility
- self._w_interrupt.send(b'X')
- except pika.compat.SOCKET_ERROR as err:
- if err.errno != errno.EWOULDBLOCK:
- raise
- except Exception as err:
- # There's nothing sensible to do here, we'll exit the interrupt
- # loop after POLL_TIMEOUT secs in worst case anyway.
- LOGGER.warning("Failed to send ioloop interrupt: %s", err)
- raise
+ self.wake_threadsafe()
@abc.abstractmethod
def poll(self):
@@ -633,7 +842,7 @@ class _PollerBase(_AbstractBase): # pylint: disable=R0902
so use a pair of simple TCP sockets instead. The sockets will be
closed and garbage collected by python when the ioloop itself is.
"""
- return pika.compat._nonblocking_socketpair()
+ return pika.compat._nonblocking_socketpair() # pylint: disable=W0212
def _read_interrupt(self, interrupt_fd, events): # pylint: disable=W0613
""" Read the interrupt byte(s). We ignore the event mask as we can ony
@@ -644,7 +853,7 @@ class _PollerBase(_AbstractBase): # pylint: disable=R0902
"""
try:
# NOTE Use recv instead of os.read for windows compatibility
- self._r_interrupt.recv(512)
+ self._r_interrupt.recv(512) # pylint: disable=E1101
except pika.compat.SOCKET_ERROR as err:
if err.errno != errno.EAGAIN:
raise
@@ -659,12 +868,6 @@ class SelectPoller(_PollerBase):
# if the poller uses MS specify 1000
POLL_TIMEOUT_MULT = 1
- def __init__(self):
- """Create an instance of the SelectPoller
-
- """
- super(SelectPoller, self).__init__()
-
def poll(self):
"""Wait for events of interest on registered file descriptors until an
event of interest occurs or next timer deadline or _MAX_POLL_TIMEOUT,
@@ -677,12 +880,12 @@ class SelectPoller(_PollerBase):
self._fd_events[ERROR]):
read, write, error = select.select(
self._fd_events[READ], self._fd_events[WRITE],
- self._fd_events[ERROR], self._get_next_deadline())
+ self._fd_events[ERROR], self._get_max_wait())
else:
# NOTE When called without any FDs, select fails on
# Windows with error 10022, 'An invalid argument was
# supplied'.
- time.sleep(self._get_next_deadline())
+ time.sleep(self._get_max_wait())
read, write, error = [], [], []
break
@@ -694,7 +897,7 @@ class SelectPoller(_PollerBase):
# Build an event bit mask for each fileno we've received an event for
- fd_event_map = defaultdict(int)
+ fd_event_map = collections.defaultdict(int)
for fd_set, evt in zip((read, write, error), (READ, WRITE, ERROR)):
for fileno in fd_set:
fd_event_map[fileno] |= evt
@@ -750,17 +953,11 @@ class SelectPoller(_PollerBase):
class KQueuePoller(_PollerBase):
"""KQueuePoller works on BSD based systems and is faster than select"""
- def __init__(self):
+ def __init__(self, get_wait_seconds, process_timeouts):
"""Create an instance of the KQueuePoller
-
- :param int fileno: The file descriptor to check events for
- :param method handler: What is called when an event happens
- :param int events: The events to look for
-
"""
- super(KQueuePoller, self).__init__()
-
self._kqueue = None
+ super(KQueuePoller, self).__init__(get_wait_seconds, process_timeouts)
@staticmethod
def _map_event(kevent):
@@ -776,6 +973,9 @@ class KQueuePoller(_PollerBase):
elif kevent.flags & select.KQ_EV_ERROR:
return ERROR
+ # Should never happen
+ return None
+
def poll(self):
"""Wait for events of interest on registered file descriptors until an
event of interest occurs or next timer deadline or _MAX_POLL_TIMEOUT,
@@ -785,7 +985,7 @@ class KQueuePoller(_PollerBase):
while True:
try:
kevents = self._kqueue.control(None, 1000,
- self._get_next_deadline())
+ self._get_max_wait())
break
except _SELECT_ERRORS as error:
if _is_resumable(error):
@@ -793,7 +993,7 @@ class KQueuePoller(_PollerBase):
else:
raise
- fd_event_map = defaultdict(int)
+ fd_event_map = collections.defaultdict(int)
for event in kevents:
fd_event_map[event.ident] |= self._map_event(event)
@@ -807,8 +1007,9 @@ class KQueuePoller(_PollerBase):
def _uninit_poller(self):
"""Notify the implementation to release the poller resource"""
- self._kqueue.close()
- self._kqueue = None
+ if self._kqueue is not None:
+ self._kqueue.close()
+ self._kqueue = None
def _register_fd(self, fileno, events):
"""The base class invokes this method to notify the implementation to
@@ -882,16 +1083,12 @@ class PollPoller(_PollerBase):
"""
POLL_TIMEOUT_MULT = 1000
- def __init__(self):
+ def __init__(self, get_wait_seconds, process_timeouts):
"""Create an instance of the KQueuePoller
- :param int fileno: The file descriptor to check events for
- :param method handler: What is called when an event happens
- :param int events: The events to look for
-
"""
self._poll = None
- super(PollPoller, self).__init__()
+ super(PollPoller, self).__init__(get_wait_seconds, process_timeouts)
@staticmethod
def _create_poller():
@@ -908,7 +1105,7 @@ class PollPoller(_PollerBase):
"""
while True:
try:
- events = self._poll.poll(self._get_next_deadline())
+ events = self._poll.poll(self._get_max_wait())
break
except _SELECT_ERRORS as error:
if _is_resumable(error):
@@ -916,7 +1113,7 @@ class PollPoller(_PollerBase):
else:
raise
- fd_event_map = defaultdict(int)
+ fd_event_map = collections.defaultdict(int)
for fileno, event in events:
fd_event_map[fileno] |= event
@@ -930,10 +1127,11 @@ class PollPoller(_PollerBase):
def _uninit_poller(self):
"""Notify the implementation to release the poller resource"""
- if hasattr(self._poll, "close"):
- self._poll.close()
+ if self._poll is not None:
+ if hasattr(self._poll, "close"):
+ self._poll.close()
- self._poll = None
+ self._poll = None
def _register_fd(self, fileno, events):
"""The base class invokes this method to notify the implementation to
diff --git a/pika/adapters/tornado_connection.py b/pika/adapters/tornado_connection.py
index ce407d1..db34dfd 100644
--- a/pika/adapters/tornado_connection.py
+++ b/pika/adapters/tornado_connection.py
@@ -94,3 +94,29 @@ class TornadoConnection(base_connection.BaseConnection):
"""
return self.ioloop.remove_timeout(timeout_id)
+
+ def add_callback_threadsafe(self, callback):
+ """Requests a call to the given function as soon as possible in the
+ context of this connection's IOLoop thread.
+
+ NOTE: This is the only thread-safe method offered by the connection. All
+ other manipulations of the connection must be performed from the
+ connection's thread.
+
+ For example, a thread may request a call to the
+ `channel.basic_ack` method of a connection that is running in a
+ different thread via
+
+ ```
+ connection.add_callback_threadsafe(
+ functools.partial(channel.basic_ack, delivery_tag=...))
+ ```
+
+ :param method callback: The callback method; must be callable.
+
+ """
+ if not callable(callback):
+ raise TypeError(
+ 'callback must be a callable, but got %r' % (callback,))
+
+ self.ioloop.add_callback(callback)
diff --git a/pika/adapters/twisted_connection.py b/pika/adapters/twisted_connection.py
index 053ddf6..1dac51f 100644
--- a/pika/adapters/twisted_connection.py
+++ b/pika/adapters/twisted_connection.py
@@ -225,6 +225,28 @@ class IOLoopReactorAdapter(object):
"""
call.cancel()
+ def add_callback_threadsafe(self, callback):
+ """Requests a call to the given function as soon as possible in the
+ context of this IOLoop's thread.
+
+ NOTE: This is the only thread-safe method offered by the IOLoop adapter.
+ All other manipulations of the IOLoop adapter and its parent connection
+ must be performed from the connection's thread.
+
+ For example, a thread may request a call to the
+ `channel.basic_ack` method of a connection that is running in a
+ different thread via
+
+ ```
+ connection.add_callback_threadsafe(
+ functools.partial(channel.basic_ack, delivery_tag=...))
+ ```
+
+ :param method callback: The callback method; must be callable.
+
+ """
+ self.reactor.callFromThread(callback)
+
def stop(self):
# Guard against stopping the reactor multiple times
if not self.started:
diff --git a/pika/connection.py b/pika/connection.py
index e371a03..0c4e2a7 100644
--- a/pika/connection.py
+++ b/pika/connection.py
@@ -1238,8 +1238,11 @@ class Connection(object):
LOGGER.warning('Suppressing close request on %s', self)
return
+ # NOTE The connection is either in opening or open state
+
# Initiate graceful closing of channels that are OPEN or OPENING
- self._close_channels(reply_code, reply_text)
+ if self._channels:
+ self._close_channels(reply_code, reply_text)
# Set our connection state
self._set_connection_state(self.CONNECTION_CLOSING)
diff --git a/pylintrc b/pylintrc
index 2e516a7..4f96c79 100644
--- a/pylintrc
+++ b/pylintrc
@@ -70,7 +70,7 @@ confidence=
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
-disable=
+disable=R1705
[REPORTS]
| Backport #956 for 0.12 | pika/pika | diff --git a/tests/acceptance/async_adapter_tests.py b/tests/acceptance/async_adapter_tests.py
index e6f6e47..53004f6 100644
--- a/tests/acceptance/async_adapter_tests.py
+++ b/tests/acceptance/async_adapter_tests.py
@@ -10,6 +10,8 @@
# Suppress pylint warning about unused argument
# pylint: disable=W0613
+import functools
+import threading
import time
import uuid
@@ -458,3 +460,128 @@ class TestBlockedConnectionUnblocks(AsyncTestCase, AsyncAdapters): # pylint: di
super(TestBlockedConnectionUnblocks, self).on_closed(connection,
reply_code,
reply_text)
+
+
+class TestAddCallbackThreadsafeRequestBeforeIOLoopStarts(AsyncTestCase, AsyncAdapters):
+ DESCRIPTION = "Test add_callback_threadsafe request before ioloop starts."
+
+ def _run_ioloop(self, *args, **kwargs): # pylint: disable=W0221
+ """We intercept this method from AsyncTestCase in order to call
+ add_callback_threadsafe before AsyncTestCase starts the ioloop.
+
+ """
+ self.my_start_time = time.time()
+ # Request a callback from our current (ioloop's) thread
+ self.connection.add_callback_threadsafe(self.on_requested_callback)
+
+ return super(
+ TestAddCallbackThreadsafeRequestBeforeIOLoopStarts, self)._run_ioloop(
+ *args, **kwargs)
+
+ def start(self, *args, **kwargs): # pylint: disable=W0221
+ self.loop_thread_ident = threading.current_thread().ident
+ self.my_start_time = None
+ self.got_callback = False
+ super(TestAddCallbackThreadsafeRequestBeforeIOLoopStarts, self).start(*args, **kwargs)
+ self.assertTrue(self.got_callback)
+
+ def begin(self, channel):
+ self.stop()
+
+ def on_requested_callback(self):
+ self.assertEqual(threading.current_thread().ident,
+ self.loop_thread_ident)
+ self.assertLess(time.time() - self.my_start_time, 0.25)
+ self.got_callback = True
+
+
+class TestAddCallbackThreadsafeFromIOLoopThread(AsyncTestCase, AsyncAdapters):
+ DESCRIPTION = "Test add_callback_threadsafe request from same thread."
+
+ def start(self, *args, **kwargs):
+ self.loop_thread_ident = threading.current_thread().ident
+ self.my_start_time = None
+ self.got_callback = False
+ super(TestAddCallbackThreadsafeFromIOLoopThread, self).start(*args, **kwargs)
+ self.assertTrue(self.got_callback)
+
+ def begin(self, channel):
+ self.my_start_time = time.time()
+ # Request a callback from our current (ioloop's) thread
+ channel.connection.add_callback_threadsafe(self.on_requested_callback)
+
+ def on_requested_callback(self):
+ self.assertEqual(threading.current_thread().ident,
+ self.loop_thread_ident)
+ self.assertLess(time.time() - self.my_start_time, 0.25)
+ self.got_callback = True
+ self.stop()
+
+
+class TestAddCallbackThreadsafeFromAnotherThread(AsyncTestCase, AsyncAdapters):
+ DESCRIPTION = "Test add_callback_threadsafe request from another thread."
+
+ def start(self, *args, **kwargs):
+ self.loop_thread_ident = threading.current_thread().ident
+ self.my_start_time = None
+ self.got_callback = False
+ super(TestAddCallbackThreadsafeFromAnotherThread, self).start(*args, **kwargs)
+ self.assertTrue(self.got_callback)
+
+ def begin(self, channel):
+ self.my_start_time = time.time()
+ # Request a callback from ioloop while executing in another thread
+ timer = threading.Timer(
+ 0,
+ lambda: channel.connection.add_callback_threadsafe(
+ self.on_requested_callback))
+ self.addCleanup(timer.cancel)
+ timer.start()
+
+ def on_requested_callback(self):
+ self.assertEqual(threading.current_thread().ident,
+ self.loop_thread_ident)
+ self.assertLess(time.time() - self.my_start_time, 0.25)
+ self.got_callback = True
+ self.stop()
+
+
+class TestIOLoopStopBeforeIOLoopStarts(AsyncTestCase, AsyncAdapters):
+ DESCRIPTION = "Test ioloop.stop() before ioloop starts causes ioloop to exit quickly."
+
+ def _run_ioloop(self, *args, **kwargs): # pylint: disable=W0221
+ """We intercept this method from AsyncTestCase in order to call
+ ioloop.stop() before AsyncTestCase starts the ioloop.
+ """
+ # Request ioloop to stop before it starts
+ self.my_start_time = time.time()
+ self.stop_ioloop_only()
+
+ return super(
+ TestIOLoopStopBeforeIOLoopStarts, self)._run_ioloop(*args, **kwargs)
+
+ def start(self, *args, **kwargs): # pylint: disable=W0221
+ self.loop_thread_ident = threading.current_thread().ident
+ self.my_start_time = None
+ super(TestIOLoopStopBeforeIOLoopStarts, self).start(*args, **kwargs)
+ self.assertLess(time.time() - self.my_start_time, 0.25)
+
+ def begin(self, channel):
+ pass
+
+
+class TestViabilityOfMultipleTimeoutsWithSameDeadlineAndCallback(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103
+ DESCRIPTION = "Test viability of multiple timeouts with same deadline and callback"
+
+ def begin(self, channel):
+ timer1 = channel.connection.add_timeout(0, self.on_my_timer)
+ timer2 = channel.connection.add_timeout(0, self.on_my_timer)
+
+ self.assertNotEqual(timer1, timer2)
+
+ channel.connection.remove_timeout(timer1)
+
+ # Wait for timer2 to fire
+
+ def on_my_timer(self):
+ self.stop()
diff --git a/tests/acceptance/async_test_base.py b/tests/acceptance/async_test_base.py
index 33f5d3d..6a80b0c 100644
--- a/tests/acceptance/async_test_base.py
+++ b/tests/acceptance/async_test_base.py
@@ -68,34 +68,55 @@ class AsyncTestCase(unittest.TestCase):
"""Extend to start the actual tests on the channel"""
self.fail("AsyncTestCase.begin_test not extended")
- def start(self, adapter=None):
+ def start(self, adapter, ioloop_factory):
self.logger.info('start at %s', datetime.datetime.utcnow())
self.adapter = adapter or self.ADAPTER
- self.connection = self.adapter(self.parameters, self.on_open,
- self.on_open_error, self.on_closed)
- self.timeout = self.connection.add_timeout(self.TIMEOUT,
- self.on_timeout)
- self.connection.ioloop.start()
- self.assertFalse(self._timed_out)
+ self.connection = self.adapter(self.parameters,
+ self.on_open,
+ self.on_open_error,
+ self.on_closed,
+ custom_ioloop=ioloop_factory())
+ try:
+ self.timeout = self.connection.add_timeout(self.TIMEOUT,
+ self.on_timeout)
+ self._run_ioloop()
+ self.assertFalse(self._timed_out)
+ finally:
+ self.connection.ioloop.close()
+ self.connection = None
+
+ def stop_ioloop_only(self):
+ """Request stopping of the connection's ioloop to end the test without
+ closing the connection
+ """
+ self._safe_remove_test_timeout()
+ self.connection.ioloop.stop()
def stop(self):
"""close the connection and stop the ioloop"""
self.logger.info("Stopping test")
- if self.timeout is not None:
- self.connection.remove_timeout(self.timeout)
- self.timeout = None
- self.connection.close()
+ self._safe_remove_test_timeout()
+ self.connection.close() # NOTE: on_closed() will stop the ioloop
+
+ def _run_ioloop(self):
+ """Some tests need to subclass this in order to bootstrap their test
+ logic after we instantiate the connection and assign it to
+ `self.connection`, but before we run the ioloop
+ """
+ self.connection.ioloop.start()
- def _stop(self):
+ def _safe_remove_test_timeout(self):
if hasattr(self, 'timeout') and self.timeout is not None:
self.logger.info("Removing timeout")
self.connection.remove_timeout(self.timeout)
self.timeout = None
- if hasattr(self, 'connection') and self.connection:
+
+ def _stop(self):
+ self._safe_remove_test_timeout()
+ if hasattr(self, 'connection') and self.connection is not None:
self.logger.info("Stopping ioloop")
self.connection.ioloop.stop()
- self.connection = None
def on_closed(self, connection, reply_code, reply_text):
"""called when the connection has finished closing"""
@@ -124,12 +145,12 @@ class AsyncTestCase(unittest.TestCase):
class BoundQueueTestCase(AsyncTestCase):
- def start(self, adapter=None):
+ def start(self, adapter, ioloop_factory):
# PY3 compat encoding
self.exchange = 'e-' + self.__class__.__name__ + ':' + uuid.uuid1().hex
self.queue = 'q-' + self.__class__.__name__ + ':' + uuid.uuid1().hex
self.routing_key = self.__class__.__name__
- super(BoundQueueTestCase, self).start(adapter)
+ super(BoundQueueTestCase, self).start(adapter, ioloop_factory)
def begin(self, channel):
self.channel.exchange_declare(self.on_exchange_declared, self.exchange,
@@ -164,20 +185,31 @@ class BoundQueueTestCase(AsyncTestCase):
class AsyncAdapters(object):
- def start(self, adapter_class):
+ def start(self, adapter_class, ioloop_factory):
+ """
+
+ :param adapter_class: pika connection adapter class to test.
+ :param ioloop_factory: to be called without args to instantiate a
+ non-shared ioloop to be passed as the `custom_ioloop` arg to the
+ `adapter_class` constructor. This is needed because some of the
+ adapters default to using a singleton ioloop, which results in
+ tests errors after prior tests close the ioloop to release resources,
+ in order to eliminate ResourceWarning warnings concerning unclosed
+ sockets from our adapters.
+ :return:
+ """
raise NotImplementedError
def select_default_test(self):
"""SelectConnection:DefaultPoller"""
-
with mock.patch.multiple(select_connection, SELECT_TYPE=None):
- self.start(adapters.SelectConnection)
+ self.start(adapters.SelectConnection, select_connection.IOLoop)
def select_select_test(self):
"""SelectConnection:select"""
with mock.patch.multiple(select_connection, SELECT_TYPE='select'):
- self.start(adapters.SelectConnection)
+ self.start(adapters.SelectConnection, select_connection.IOLoop)
@unittest.skipIf(
not hasattr(select, 'poll') or
@@ -186,27 +218,36 @@ class AsyncAdapters(object):
"""SelectConnection:poll"""
with mock.patch.multiple(select_connection, SELECT_TYPE='poll'):
- self.start(adapters.SelectConnection)
+ self.start(adapters.SelectConnection, select_connection.IOLoop)
@unittest.skipIf(not hasattr(select, 'epoll'), "epoll not supported")
def select_epoll_test(self):
"""SelectConnection:epoll"""
with mock.patch.multiple(select_connection, SELECT_TYPE='epoll'):
- self.start(adapters.SelectConnection)
+ self.start(adapters.SelectConnection, select_connection.IOLoop)
@unittest.skipIf(not hasattr(select, 'kqueue'), "kqueue not supported")
def select_kqueue_test(self):
"""SelectConnection:kqueue"""
with mock.patch.multiple(select_connection, SELECT_TYPE='kqueue'):
- self.start(adapters.SelectConnection)
+ self.start(adapters.SelectConnection, select_connection.IOLoop)
def tornado_test(self):
"""TornadoConnection"""
- self.start(adapters.TornadoConnection)
+ ioloop_factory = None
+ if adapters.TornadoConnection is not None:
+ import tornado.ioloop
+ ioloop_factory = tornado.ioloop.IOLoop
+ self.start(adapters.TornadoConnection, ioloop_factory)
@unittest.skipIf(sys.version_info < (3, 4), "Asyncio available for Python 3.4+")
def asyncio_test(self):
"""AsyncioConnection"""
- self.start(adapters.AsyncioConnection)
+ ioloop_factory = None
+ if adapters.AsyncioConnection is not None:
+ import asyncio
+ ioloop_factory = asyncio.new_event_loop
+
+ self.start(adapters.AsyncioConnection, ioloop_factory)
diff --git a/tests/acceptance/blocking_adapter_test.py b/tests/acceptance/blocking_adapter_test.py
index e8643f1..c27be2e 100644
--- a/tests/acceptance/blocking_adapter_test.py
+++ b/tests/acceptance/blocking_adapter_test.py
@@ -1,7 +1,9 @@
"""blocking adapter test"""
from datetime import datetime
+import functools
import logging
import socket
+import threading
import time
import unittest
import uuid
@@ -449,6 +451,46 @@ class TestBlockedConnectionTimeout(BlockingTestCaseBase):
'Blocked connection timeout expired'))
+class TestAddCallbackThreadsafeFromSameThread(BlockingTestCaseBase):
+
+ def test(self):
+ """BlockingConnection.add_callback_threadsafe from same thread"""
+ connection = self._connect()
+
+ # Test timer completion
+ start_time = time.time()
+ rx_callback = []
+ connection.add_callback_threadsafe(
+ lambda: rx_callback.append(time.time()))
+ while not rx_callback:
+ connection.process_data_events(time_limit=None)
+
+ self.assertEqual(len(rx_callback), 1)
+ elapsed = time.time() - start_time
+ self.assertLess(elapsed, 0.25)
+
+
+class TestAddCallbackThreadsafeFromAnotherThread(BlockingTestCaseBase):
+
+ def test(self):
+ """BlockingConnection.add_callback_threadsafe from another thread"""
+ connection = self._connect()
+
+ # Test timer completion
+ start_time = time.time()
+ rx_callback = []
+ timer = threading.Timer(
+ 0,
+ functools.partial(connection.add_callback_threadsafe,
+ lambda: rx_callback.append(time.time())))
+ self.addCleanup(timer.cancel)
+ timer.start()
+ while not rx_callback:
+ connection.process_data_events(time_limit=None)
+
+ self.assertEqual(len(rx_callback), 1)
+ elapsed = time.time() - start_time
+ self.assertLess(elapsed, 0.25)
class TestAddTimeoutRemoveTimeout(BlockingTestCaseBase):
@@ -488,6 +530,32 @@ class TestAddTimeoutRemoveTimeout(BlockingTestCaseBase):
repr(evt)
+class TestViabilityOfMultipleTimeoutsWithSameDeadlineAndCallback(BlockingTestCaseBase):
+
+ def test(self):
+ """BlockingConnection viability of multiple timeouts with same deadline and callback"""
+ connection = self._connect()
+
+ rx_callback = []
+
+ def callback():
+ rx_callback.append(1)
+
+ timer1 = connection.add_timeout(0, callback)
+ timer2 = connection.add_timeout(0, callback)
+
+ self.assertNotEqual(timer1, timer2)
+
+ connection.remove_timeout(timer1)
+
+ # Wait for second timer to fire
+ start_wait_time = time.time()
+ while not rx_callback and time.time() - start_wait_time < 0.25:
+ connection.process_data_events(time_limit=0.001)
+
+ self.assertListEqual(rx_callback, [1])
+
+
class TestRemoveTimeoutFromTimeoutCallback(BlockingTestCaseBase):
def test(self):
@@ -508,7 +576,7 @@ class TestRemoveTimeoutFromTimeoutCallback(BlockingTestCaseBase):
while not rx_timer2:
connection.process_data_events(time_limit=None)
- self.assertNotIn(timer_id1, connection._impl.ioloop._poller._timeouts)
+ self.assertIsNone(timer_id1.callback)
self.assertFalse(connection._ready_events)
@@ -1471,7 +1539,7 @@ class TestBasicPublishDeliveredWhenPendingUnroutable(BlockingTestCaseBase):
mandatory=True)
self.assertEqual(res, True)
- # Flush channel to force Basic.Return
+ # Flush connection to force Basic.Return
connection.channel().close()
# Deposit a routable message in the queue
@@ -1630,7 +1698,7 @@ class TestPublishAndConsumeWithPubacksAndQosOfOne(BlockingTestCaseBase):
queue=q_name,
expected_count=0)
- # Attempt to cosume again with a short timeout
+ # Attempt to consume again with a short timeout
connection.process_data_events(time_limit=0.005)
self.assertEqual(len(rx_messages), 2)
@@ -1645,6 +1713,197 @@ class TestPublishAndConsumeWithPubacksAndQosOfOne(BlockingTestCaseBase):
self.assertEqual(frame.method.consumer_tag, consumer_tag)
+class TestBasicConsumeWithAckFromAnotherThread(BlockingTestCaseBase):
+
+ def test(self): # pylint: disable=R0914,R0915
+ """BlockingChannel.basic_consume with ack from another thread and \
+ requesting basic_ack via add_callback_threadsafe
+ """
+ # This test simulates processing of a message on another thread and
+ # then requesting an ACK to be dispatched on the connection's thread
+ # via BlockingConnection.add_callback_threadsafe
+
+ connection = self._connect()
+
+ ch = connection.channel()
+
+ q_name = 'TestBasicConsumeWithAckFromAnotherThread_q' + uuid.uuid1().hex
+ exg_name = ('TestBasicConsumeWithAckFromAnotherThread_exg' +
+ uuid.uuid1().hex)
+ routing_key = 'TestBasicConsumeWithAckFromAnotherThread'
+
+ # Place channel in publisher-acknowledgments mode so that publishing
+ # with mandatory=True will be synchronous (for convenience)
+ res = ch.confirm_delivery()
+ self.assertIsNone(res)
+
+ # Declare a new exchange
+ ch.exchange_declare(exg_name, exchange_type='direct')
+ self.addCleanup(connection.channel().exchange_delete, exg_name)
+
+ # Declare a new queue
+ ch.queue_declare(q_name, auto_delete=True)
+ self.addCleanup(self._connect().channel().queue_delete, q_name)
+
+ # Bind the queue to the exchange using routing key
+ ch.queue_bind(q_name, exchange=exg_name, routing_key=routing_key)
+
+ # Publish 2 messages with mandatory=True for synchronous processing
+ ch.publish(exg_name, routing_key, body='msg1', mandatory=True)
+ ch.publish(exg_name, routing_key, body='last-msg', mandatory=True)
+
+ # Configure QoS for one message so that the 2nd message will be
+ # delivered only after the 1st one is ACKed
+ ch.basic_qos(prefetch_size=0, prefetch_count=1, all_channels=False)
+
+ # Create a consumer
+ rx_messages = []
+ def ackAndEnqueueMessageViaAnotherThread(rx_ch,
+ rx_method,
+ rx_properties, # pylint: disable=W0613
+ rx_body):
+ LOGGER.debug(
+ '%s: Got message body=%r; delivery-tag=%r',
+ datetime.now(), rx_body, rx_method.delivery_tag)
+
+ # Request ACK dispatch via add_callback_threadsafe from other
+ # thread; if last message, cancel consumer so that start_consuming
+ # can return
+
+ def processOnConnectionThread():
+ LOGGER.debug('%s: ACKing message body=%r; delivery-tag=%r',
+ datetime.now(),
+ rx_body,
+ rx_method.delivery_tag)
+ ch.basic_ack(delivery_tag=rx_method.delivery_tag,
+ multiple=False)
+ rx_messages.append(rx_body)
+
+ # NOTE on python3, `b'last-msg' != 'last-msg'`
+ if rx_body == b'last-msg':
+ LOGGER.debug('%s: Canceling consumer consumer-tag=%r',
+ datetime.now(),
+ rx_method.consumer_tag)
+ rx_ch.basic_cancel(rx_method.consumer_tag)
+
+ # Spawn a thread to initiate ACKing
+ timer = threading.Timer(0,
+ lambda: connection.add_callback_threadsafe(
+ processOnConnectionThread))
+ self.addCleanup(timer.cancel)
+ timer.start()
+
+ consumer_tag = ch.basic_consume(
+ ackAndEnqueueMessageViaAnotherThread,
+ q_name,
+ no_ack=False,
+ exclusive=False,
+ arguments=None)
+
+ # Wait for both messages
+ LOGGER.debug('%s: calling start_consuming(); consumer tag=%r',
+ datetime.now(),
+ consumer_tag)
+ ch.start_consuming()
+ LOGGER.debug('%s: Returned from start_consuming(); consumer tag=%r',
+ datetime.now(),
+ consumer_tag)
+
+ self.assertEqual(len(rx_messages), 2)
+ self.assertEqual(rx_messages[0], b'msg1')
+ self.assertEqual(rx_messages[1], b'last-msg')
+
+
+class TestConsumeGeneratorWithAckFromAnotherThread(BlockingTestCaseBase):
+
+ def test(self): # pylint: disable=R0914,R0915
+ """BlockingChannel.consume and requesting basic_ack from another \
+ thread via add_callback_threadsafe
+ """
+ connection = self._connect()
+
+ ch = connection.channel()
+
+ q_name = ('TestConsumeGeneratorWithAckFromAnotherThread_q' +
+ uuid.uuid1().hex)
+ exg_name = ('TestConsumeGeneratorWithAckFromAnotherThread_exg' +
+ uuid.uuid1().hex)
+ routing_key = 'TestConsumeGeneratorWithAckFromAnotherThread'
+
+ # Place channel in publisher-acknowledgments mode so that publishing
+ # with mandatory=True will be synchronous (for convenience)
+ res = ch.confirm_delivery()
+ self.assertIsNone(res)
+
+ # Declare a new exchange
+ ch.exchange_declare(exg_name, exchange_type='direct')
+ self.addCleanup(connection.channel().exchange_delete, exg_name)
+
+ # Declare a new queue
+ ch.queue_declare(q_name, auto_delete=True)
+ self.addCleanup(self._connect().channel().queue_delete, q_name)
+
+ # Bind the queue to the exchange using routing key
+ ch.queue_bind(q_name, exchange=exg_name, routing_key=routing_key)
+
+ # Publish 2 messages with mandatory=True for synchronous processing
+ ch.publish(exg_name, routing_key, body='msg1', mandatory=True)
+ ch.publish(exg_name, routing_key, body='last-msg', mandatory=True)
+
+ # Configure QoS for one message so that the 2nd message will be
+ # delivered only after the 1st one is ACKed
+ ch.basic_qos(prefetch_size=0, prefetch_count=1, all_channels=False)
+
+ # Create a consumer
+ rx_messages = []
+ def ackAndEnqueueMessageViaAnotherThread(rx_ch,
+ rx_method,
+ rx_properties, # pylint: disable=W0613
+ rx_body):
+ LOGGER.debug(
+ '%s: Got message body=%r; delivery-tag=%r',
+ datetime.now(), rx_body, rx_method.delivery_tag)
+
+ # Request ACK dispatch via add_callback_threadsafe from other
+ # thread; if last message, cancel consumer so that consumer
+ # generator completes
+
+ def processOnConnectionThread():
+ LOGGER.debug('%s: ACKing message body=%r; delivery-tag=%r',
+ datetime.now(),
+ rx_body,
+ rx_method.delivery_tag)
+ ch.basic_ack(delivery_tag=rx_method.delivery_tag,
+ multiple=False)
+ rx_messages.append(rx_body)
+
+ # NOTE on python3, `b'last-msg' != 'last-msg'`
+ if rx_body == b'last-msg':
+ LOGGER.debug('%s: Canceling consumer consumer-tag=%r',
+ datetime.now(),
+ rx_method.consumer_tag)
+ # NOTE Need to use cancel() for the consumer generator
+ # instead of basic_cancel()
+ rx_ch.cancel()
+
+ # Spawn a thread to initiate ACKing
+ timer = threading.Timer(0,
+ lambda: connection.add_callback_threadsafe(
+ processOnConnectionThread))
+ self.addCleanup(timer.cancel)
+ timer.start()
+
+ for method, properties, body in ch.consume(q_name, no_ack=False):
+ ackAndEnqueueMessageViaAnotherThread(rx_ch=ch,
+ rx_method=method,
+ rx_properties=properties,
+ rx_body=body)
+
+ self.assertEqual(len(rx_messages), 2)
+ self.assertEqual(rx_messages[0], b'msg1')
+ self.assertEqual(rx_messages[1], b'last-msg')
+
+
class TestTwoBasicConsumersOnSameChannel(BlockingTestCaseBase):
def test(self): # pylint: disable=R0914
@@ -1914,7 +2173,7 @@ class TestBasicPublishWithoutPubacks(BlockingTestCaseBase):
queue=q_name,
expected_count=0)
- # Attempt to cosume again with a short timeout
+ # Attempt to consume again with a short timeout
connection.process_data_events(time_limit=0.005)
self.assertEqual(len(rx_messages), 2)
diff --git a/tests/unit/blocking_connection_tests.py b/tests/unit/blocking_connection_tests.py
index 118aa2d..ab046a0 100644
--- a/tests/unit/blocking_connection_tests.py
+++ b/tests/unit/blocking_connection_tests.py
@@ -126,8 +126,7 @@ class BlockingConnectionTests(unittest.TestCase):
connection._flush_output(lambda: False, lambda: True)
self.assertEqual(connection._impl.ioloop.activate_poller.call_count, 1)
- self.assertEqual(connection._impl.ioloop.deactivate_poller.call_count,
- 1)
+ self.assertEqual(connection._impl.ioloop.close.call_count, 1)
@patch.object(
blocking_connection,
@@ -152,8 +151,7 @@ class BlockingConnectionTests(unittest.TestCase):
self.assertSequenceEqual(cm.exception.args, (404, 'not found'))
self.assertEqual(connection._impl.ioloop.activate_poller.call_count, 1)
- self.assertEqual(connection._impl.ioloop.deactivate_poller.call_count,
- 1)
+ self.assertEqual(connection._impl.ioloop.close.call_count, 1)
@patch.object(
blocking_connection,
@@ -178,8 +176,7 @@ class BlockingConnectionTests(unittest.TestCase):
self.assertSequenceEqual(cm.exception.args, (200, 'ok'))
self.assertEqual(connection._impl.ioloop.activate_poller.call_count, 1)
- self.assertEqual(connection._impl.ioloop.deactivate_poller.call_count,
- 1)
+ self.assertEqual(connection._impl.ioloop.close.call_count, 1)
@patch.object(
blocking_connection,
diff --git a/tests/unit/connection_timeout_tests.py b/tests/unit/connection_timeout_tests.py
index b44161b..3118fab 100644
--- a/tests/unit/connection_timeout_tests.py
+++ b/tests/unit/connection_timeout_tests.py
@@ -49,8 +49,13 @@ class ConnectionTests(unittest.TestCase):
connect=mock.Mock(side_effect=mock_timeout))
) as create_sock_mock:
params = pika.ConnectionParameters(socket_timeout=2.0)
- conn = asyncio_connection.AsyncioConnection(params)
+ ioloop = asyncio_connection.asyncio.new_event_loop()
+ self.addCleanup(ioloop.close)
+ conn = asyncio_connection.AsyncioConnection(
+ params,
+ custom_ioloop=ioloop)
conn._on_connect_timer()
+
create_sock_mock.return_value.settimeout.assert_called_with(2.0)
self.assertIn('timeout', str(err_ctx.exception))
@@ -99,6 +104,7 @@ class ConnectionTests(unittest.TestCase):
side_effect=mock_timeout))) as create_sock_mock:
params = pika.ConnectionParameters(socket_timeout=2.0)
conn = select_connection.SelectConnection(params)
+ self.addCleanup(conn.ioloop.close)
conn._on_connect_timer()
create_sock_mock.return_value.settimeout.assert_called_with(2.0)
self.assertIn('timeout', str(err_ctx.exception))
@@ -113,7 +119,11 @@ class ConnectionTests(unittest.TestCase):
connect=mock.Mock(
side_effect=mock_timeout))) as create_sock_mock:
params = pika.ConnectionParameters(socket_timeout=2.0)
- conn = tornado_connection.TornadoConnection(params)
+ ioloop = tornado_connection.ioloop.IOLoop()
+ self.addCleanup(ioloop.close)
+ conn = tornado_connection.TornadoConnection(
+ params,
+ custom_ioloop=ioloop)
conn._on_connect_timer()
create_sock_mock.return_value.settimeout.assert_called_with(2.0)
self.assertIn('timeout', str(err_ctx.exception))
diff --git a/tests/unit/select_connection_ioloop_tests.py b/tests/unit/select_connection_ioloop_tests.py
index ffc34ea..d5c9cf1 100644
--- a/tests/unit/select_connection_ioloop_tests.py
+++ b/tests/unit/select_connection_ioloop_tests.py
@@ -20,6 +20,12 @@ import pika
from pika import compat
from pika.adapters import select_connection
+# protected-access
+# pylint: disable=W0212
+# missing-docstring
+# pylint: disable=C0111
+
+
EPOLL_SUPPORTED = hasattr(select, 'epoll')
POLL_SUPPORTED = hasattr(select, 'poll') and hasattr(select.poll(), 'modify')
KQUEUE_SUPPORTED = hasattr(select, 'kqueue')
@@ -37,6 +43,7 @@ class IOLoopBaseTest(unittest.TestCase):
self.ioloop = select_connection.IOLoop()
self.addCleanup(setattr, self, 'ioloop', None)
+ self.addCleanup(self.ioloop.close)
activate_poller_patch = mock.patch.object(
self.ioloop._poller,
@@ -74,13 +81,76 @@ class IOLoopBaseTest(unittest.TestCase):
self.fail('Test timed out')
+class IOLoopCloseClosesSubordinateObjectsTestSelect(IOLoopBaseTest):
+ """ Test ioloop being closed """
+ SELECT_POLLER = 'select'
+
+ def start_test(self):
+ with mock.patch.multiple(self.ioloop,
+ _timer=mock.DEFAULT,
+ _poller=mock.DEFAULT,
+ _callbacks=mock.DEFAULT) as mocks:
+ self.ioloop.close()
+ mocks['_timer'].close.assert_called_once()
+ mocks['_poller'].close.assert_called_once()
+ self.assertIsNone(self.ioloop._callbacks)
+
+
+class IOLoopCloseAfterStartReturnsTestSelect(IOLoopBaseTest):
+ """ Test IOLoop.close() after normal return from start(). """
+ SELECT_POLLER = 'select'
+
+ def start_test(self):
+ self.ioloop.stop() # so start will terminate quickly
+ self.start()
+ self.ioloop.close()
+ self.assertIsNone(self.ioloop._callbacks)
+
+
+class IOLoopCloseBeforeStartReturnsTestSelect(IOLoopBaseTest):
+ """ Test calling IOLoop.close() before return from start() raises exception. """
+ SELECT_POLLER = 'select'
+
+ def start_test(self):
+ callback_completed = []
+
+ def call_close_from_callback():
+ with self.assertRaises(AssertionError) as cm:
+ self.ioloop.close()
+
+ self.assertEqual(cm.exception.args[0],
+ 'Cannot call close() before start() unwinds.')
+ self.ioloop.stop()
+ callback_completed.append(1)
+
+ self.ioloop.add_callback_threadsafe(call_close_from_callback)
+ self.start()
+ self.assertEqual(callback_completed, [1])
+
+
class IOLoopThreadStopTestSelect(IOLoopBaseTest):
""" Test ioloop being stopped by another Thread. """
SELECT_POLLER = 'select'
def start_test(self):
"""Starts a thread that stops ioloop after a while and start polling"""
- timer = threading.Timer(0.1, self.ioloop.stop)
+ timer = threading.Timer(
+ 0.1,
+ lambda: self.ioloop.add_callback_threadsafe(self.ioloop.stop))
+ self.addCleanup(timer.cancel)
+ timer.start()
+ self.start() # NOTE: Normal return from `start()` constitutes success
+
+
+class IOLoopThreadStopTestSelect(IOLoopBaseTest):
+ """ Test ioloop being stopped by another Thread. """
+ SELECT_POLLER = 'select'
+
+ def start_test(self):
+ """Starts a thread that stops ioloop after a while and start polling"""
+ timer = threading.Timer(
+ 0.1,
+ lambda: self.ioloop.add_callback_threadsafe(self.ioloop.stop))
self.addCleanup(timer.cancel)
timer.start()
self.start() # NOTE: Normal return from `start()` constitutes success
@@ -164,7 +234,7 @@ class IOLoopTimerTestSelect(IOLoopBaseTest):
"""A timeout handler that tries to remove itself."""
self.assertEqual(self.handle, handle_holder.pop())
# This removal here should not raise exception by itself nor
- # in the caller SelectPoller.process_timeouts().
+ # in the caller SelectPoller._process_timeouts().
self.timer_got_called = True
self.ioloop.remove_timeout(self.handle)
self.ioloop.stop()
@@ -202,7 +272,7 @@ class IOLoopTimerTestSelect(IOLoopBaseTest):
"""
self.concluded = True
self.assertTrue(self.deleted_another_timer)
- self.assertNotIn(target_timer, self.ioloop._poller._timeouts)
+ self.assertIsNone(target_timer.callback)
self.ioloop.stop()
self.ioloop.add_timeout(0.01, _on_timer_conclude)
@@ -438,7 +508,10 @@ class IOLoopEintrTestCaseSelect(IOLoopBaseTest):
implementation of polling mechanism and another."""
is_resumable_mock.side_effect = is_resumable_raw
- self.poller = self.ioloop._get_poller()
+ timer = select_connection._Timer()
+ self.poller = self.ioloop._get_poller(timer.get_remaining_interval,
+ timer.process_timeouts)
+ self.addCleanup(self.poller.close)
sockpair = self.poller._get_interrupt_pair()
self.addCleanup(sockpair[0].close)
@@ -448,7 +521,7 @@ class IOLoopEintrTestCaseSelect(IOLoopBaseTest):
self.poller.add_handler(sockpair[0].fileno(), self._eintr_read_handler,
select_connection.READ)
- self.poller.add_timeout(self.TIMEOUT, self._eintr_test_fail)
+ self.ioloop.add_timeout(self.TIMEOUT, self._eintr_test_fail)
original_signal_handler = \
signal.signal(signal.SIGUSR1, self.signal_handler)
@@ -490,23 +563,72 @@ class IOLoopEintrTestCaseKqueue(IOLoopEintrTestCaseSelect):
class SelectPollerTestPollWithoutSockets(unittest.TestCase):
def start_test(self):
- poller = select_connection.SelectPoller()
+ timer = select_connection._Timer()
+ poller = select_connection.SelectPoller(
+ get_wait_seconds=timer.get_remaining_interval,
+ process_timeouts=timer.process_timeouts)
+ self.addCleanup(poller.close)
timer_call_container = []
- poller.add_timeout(0.00001, lambda: timer_call_container.append(1))
+ timer.call_later(0.00001, lambda: timer_call_container.append(1))
poller.poll()
- deadline = poller._next_timeout
+ delay = poller._get_wait_seconds()
+ self.assertIsNotNone(delay)
+ deadline = time.time() + delay
while True:
- poller.process_timeouts()
+ poller._process_timeouts()
if time.time() < deadline:
self.assertEqual(timer_call_container, [])
else:
# One last time in case deadline reached after previous
# processing cycle
- poller.process_timeouts()
+ poller._process_timeouts()
break
self.assertEqual(timer_call_container, [1])
+
+
+class PollerTestCaseSelect(unittest.TestCase):
+ SELECT_POLLER = 'select'
+
+ def setUp(self):
+ select_type_patch = mock.patch.multiple(
+ select_connection, SELECT_TYPE=self.SELECT_POLLER)
+ select_type_patch.start()
+ self.addCleanup(select_type_patch.stop)
+
+ timer = select_connection._Timer()
+ self.addCleanup(timer.close)
+ self.poller = select_connection.IOLoop._get_poller(
+ timer.get_remaining_interval,
+ timer.process_timeouts)
+ self.addCleanup(self.poller.close)
+
+ def test_poller_close(self):
+ self.poller.close()
+ self.assertIsNone(self.poller._r_interrupt)
+ self.assertIsNone(self.poller._w_interrupt)
+ self.assertIsNone(self.poller._fd_handlers)
+ self.assertIsNone(self.poller._fd_events)
+ self.assertIsNone(self.poller._processing_fd_event_map)
+
+
[email protected](not POLL_SUPPORTED, 'poll not supported')
+class PollerTestCasePoll(PollerTestCaseSelect):
+ """Same as PollerTestCaseSelect but uses poll syscall"""
+ SELECT_POLLER = 'poll'
+
+
[email protected](not EPOLL_SUPPORTED, 'epoll not supported')
+class PollerTestCaseEPoll(PollerTestCaseSelect):
+ """Same as PollerTestCaseSelect but uses epoll syscall"""
+ SELECT_POLLER = 'epoll'
+
+
[email protected](not KQUEUE_SUPPORTED, 'kqueue not supported')
+class PollerTestCaseKqueue(PollerTestCaseSelect):
+ """Same as PollerTestCaseSelect but uses kqueue syscall"""
+ SELECT_POLLER = 'kqueue'
diff --git a/tests/unit/select_connection_timer_tests.py b/tests/unit/select_connection_timer_tests.py
new file mode 100644
index 0000000..72e3db5
--- /dev/null
+++ b/tests/unit/select_connection_timer_tests.py
@@ -0,0 +1,353 @@
+# -*- coding: utf-8 -*-
+"""
+Tests for SelectConnection _Timer and _Timeout classes
+
+"""
+
+import time
+import unittest
+
+import mock
+
+from pika.adapters import select_connection
+
+
+# Suppress protected-access
+# pylint: disable=W0212
+
+# Suppress missing-docstring
+# pylint: disable=C0111
+
+# Suppress invalid-name
+# pylint: disable=C0103
+
+class TimeoutClassTests(unittest.TestCase):
+ """Test select_connection._Timeout class"""
+
+ def test_properties(self):
+ now = time.time()
+ cb = lambda: None
+ timeout = select_connection._Timeout(now + 5.3, cb)
+ self.assertIs(timeout.callback, cb)
+ self.assertEqual(timeout.deadline, now + 5.3)
+
+ def test_non_negative_deadline(self):
+ select_connection._Timeout(0, lambda: None)
+ select_connection._Timeout(5, lambda: None)
+
+ with self.assertRaises(ValueError) as cm:
+ select_connection._Timeout(-1, lambda: None)
+
+ self.assertIn('deadline must be non-negative epoch number',
+ cm.exception.args[0])
+
+ def test_non_callable_callback_raises(self):
+ with self.assertRaises(TypeError) as cm:
+ select_connection._Timeout(5, None)
+
+ self.assertIn('callback must be a callable, but got',
+ cm.exception.args[0])
+
+ with self.assertRaises(TypeError) as cm:
+ select_connection._Timeout(5, dict())
+
+ self.assertIn('callback must be a callable, but got',
+ cm.exception.args[0])
+
+ def test_eq_operator(self):
+ # Comparison should be by deadline only
+ t1 = select_connection._Timeout(5, lambda: None)
+ t2 = select_connection._Timeout(5, lambda: 5)
+ self.assertEqual(t1, t2)
+
+ t2 = select_connection._Timeout(10, lambda: 5)
+ self.assertNotEqual(t1, t2)
+
+ def test_lt_operator(self):
+ # Comparison should be by deadline only
+ t1 = select_connection._Timeout(4, lambda: None)
+ t2 = select_connection._Timeout(5, lambda: 5)
+ self.assertLess(t1, t2)
+
+ t2 = select_connection._Timeout(4, lambda: 5)
+ self.assertFalse(t1 < t2)
+
+ t2 = select_connection._Timeout(3, lambda: 5)
+ self.assertFalse(t1 < t2)
+
+ def test_le_operator(self):
+ # Comparison should be by deadline only
+ t1 = select_connection._Timeout(4, lambda: None)
+ t2 = select_connection._Timeout(4, lambda: 5)
+ self.assertLessEqual(t1, t2)
+
+ t2 = select_connection._Timeout(5, lambda: 5)
+ self.assertLessEqual(t1, t2)
+
+ t2 = select_connection._Timeout(3, lambda: 5)
+ self.assertFalse(t1 <= t2)
+
+
+class TimerClassTests(unittest.TestCase):
+ """Test select_connection._Timer class"""
+
+ def test_close_empty(self):
+ timer = select_connection._Timer()
+ timer.close()
+ self.assertIsNone(timer._timeout_heap)
+
+ def test_close_non_empty(self):
+ timer = select_connection._Timer()
+ t1 = timer.call_later(10, lambda: 10)
+ t2 = timer.call_later(20, lambda: 20)
+ timer.close()
+ self.assertIsNone(timer._timeout_heap)
+ self.assertIsNone(t1.callback)
+ self.assertIsNone(t2.callback)
+
+ def test_no_timeouts_remaining_interval_is_none(self):
+ timer = select_connection._Timer()
+ self.assertIsNone(timer.get_remaining_interval())
+
+ def test_call_later_non_negative_delay_check(self):
+ now = time.time()
+
+ # 0 delay is okay
+ with mock.patch('time.time', return_value=now):
+ timer = select_connection._Timer()
+ timer.call_later(0, lambda: None)
+ self.assertEqual(timer._timeout_heap[0].deadline, now)
+ self.assertEqual(timer.get_remaining_interval(), 0)
+
+ # Positive delay is okay
+ with mock.patch('time.time', return_value=now):
+ timer = select_connection._Timer()
+ timer.call_later(0.5, lambda: None)
+ self.assertEqual(timer._timeout_heap[0].deadline, now + 0.5)
+ self.assertEqual(timer.get_remaining_interval(), 0.5)
+
+ # Negative delay raises ValueError
+ timer = select_connection._Timer()
+ with self.assertRaises(ValueError) as cm:
+ timer.call_later(-5, lambda: None)
+ self.assertIn('call_later: delay must be non-negative, but got',
+ cm.exception.args[0])
+
+ def test_call_later_single_timer_expires(self):
+ now = time.time()
+
+ with mock.patch('time.time', return_value=now):
+ bucket = []
+ timer = select_connection._Timer()
+ timer.call_later(5, lambda: bucket.append(1))
+
+ # Nothing is ready to expire
+ timer.process_timeouts()
+ self.assertEqual(bucket, [])
+ self.assertEqual(timer.get_remaining_interval(), 5)
+
+ # Advance time by 5 seconds and expect the timer to expire
+ with mock.patch('time.time', return_value=now + 5):
+ self.assertEqual(timer.get_remaining_interval(), 0)
+ timer.process_timeouts()
+ self.assertEqual(bucket, [1])
+ self.assertEqual(len(timer._timeout_heap), 0)
+ self.assertIsNone(timer.get_remaining_interval())
+
+ def test_call_later_multiple_timers(self):
+ now = time.time()
+
+ bucket = []
+ timer = select_connection._Timer()
+
+ with mock.patch('time.time', return_value=now):
+ timer.call_later(5, lambda: bucket.append(1))
+ timer.call_later(5, lambda: bucket.append(2))
+ timer.call_later(10, lambda: bucket.append(3))
+
+ # Nothing is ready to fire yet
+ self.assertEqual(timer.get_remaining_interval(), 5)
+ timer.process_timeouts()
+ self.assertEqual(bucket, [])
+ self.assertEqual(timer.get_remaining_interval(), 5)
+
+ # Advance time by 6 seconds and expect first two timers to expire
+ with mock.patch('time.time', return_value=now + 6):
+ self.assertEqual(timer.get_remaining_interval(), 0)
+ timer.process_timeouts()
+ self.assertEqual(bucket, [1, 2])
+ self.assertEqual(len(timer._timeout_heap), 1)
+ self.assertEqual(timer.get_remaining_interval(), 4)
+
+ # Advance time by 10 seconds and expect the 3rd timeout to expire
+ with mock.patch('time.time', return_value=now + 10):
+ self.assertEqual(timer.get_remaining_interval(), 0)
+ timer.process_timeouts()
+ self.assertEqual(bucket, [1, 2, 3])
+ self.assertEqual(len(timer._timeout_heap), 0)
+ self.assertIsNone(timer.get_remaining_interval())
+
+ def test_add_and_remove_timeout(self):
+ now = time.time()
+
+ bucket = []
+ timer = select_connection._Timer()
+
+ with mock.patch('time.time', return_value=now):
+ timer.call_later(10, lambda: bucket.append(3)) # t3
+ t2 = timer.call_later(6, lambda: bucket.append(2))
+ t1 = timer.call_later(5, lambda: bucket.append(1))
+
+ # Nothing is ready to fire yet
+ self.assertEqual(timer.get_remaining_interval(), 5)
+ timer.process_timeouts()
+ self.assertEqual(bucket, [])
+ self.assertEqual(timer.get_remaining_interval(), 5)
+
+ # Cancel t1 and t2 that haven't expired yet
+ timer.remove_timeout(t1)
+ self.assertIsNone(t1.callback)
+ self.assertEqual(timer._num_cancellations, 1)
+ timer.remove_timeout(t2)
+ self.assertIsNone(t2.callback)
+ self.assertEqual(timer._num_cancellations, 2)
+ self.assertEqual(timer.get_remaining_interval(), 5)
+ timer.process_timeouts()
+ self.assertEqual(bucket, [])
+ self.assertEqual(timer._num_cancellations, 2)
+ self.assertEqual(timer.get_remaining_interval(), 5)
+ self.assertEqual(len(timer._timeout_heap), 3)
+
+ # Advance time by 6 seconds to expire t1 and t2 and verify they don't
+ # fire
+ with mock.patch('time.time', return_value=now + 6):
+ self.assertEqual(timer.get_remaining_interval(), 0)
+ timer.process_timeouts()
+ self.assertEqual(bucket, [])
+ self.assertEqual(timer._num_cancellations, 0)
+ self.assertEqual(len(timer._timeout_heap), 1)
+ self.assertEqual(timer.get_remaining_interval(), 4)
+
+ # Advance time by 10 seconds to expire t3 and verify it fires
+ with mock.patch('time.time', return_value=now + 10):
+ self.assertEqual(timer.get_remaining_interval(), 0)
+ timer.process_timeouts()
+ self.assertEqual(bucket, [3])
+ self.assertEqual(len(timer._timeout_heap), 0)
+ self.assertIsNone(timer.get_remaining_interval())
+
+ def test_gc_of_unexpired_timeouts(self):
+ now = time.time()
+ bucket = []
+ timer = select_connection._Timer()
+
+ with mock.patch.multiple(select_connection._Timer,
+ _GC_CANCELLATION_THRESHOLD=1):
+ with mock.patch('time.time', return_value=now):
+ t3 = timer.call_later(10, lambda: bucket.append(3))
+ t2 = timer.call_later(6, lambda: bucket.append(2))
+ t1 = timer.call_later(5, lambda: bucket.append(1))
+
+ # Cancel t1 and check that it doesn't trigger GC because it's
+ # not greater than half the timeouts
+ timer.remove_timeout(t1)
+ self.assertEqual(timer._num_cancellations, 1)
+ timer.process_timeouts()
+ self.assertEqual(timer._num_cancellations, 1)
+ self.assertEqual(bucket, [])
+ self.assertEqual(len(timer._timeout_heap), 3)
+ self.assertEqual(timer.get_remaining_interval(), 5)
+
+ # Cancel t3 and verify GC since it's now greater than half of
+ # total timeouts
+ timer.remove_timeout(t3)
+ self.assertEqual(timer._num_cancellations, 2)
+ timer.process_timeouts()
+ self.assertEqual(bucket, [])
+ self.assertEqual(len(timer._timeout_heap), 1)
+ self.assertIs(t2, timer._timeout_heap[0])
+ self.assertEqual(timer.get_remaining_interval(), 6)
+ self.assertEqual(timer._num_cancellations, 0)
+
+ def test_add_timeout_from_another_timeout(self):
+ now = time.time()
+ bucket = []
+ timer = select_connection._Timer()
+
+ with mock.patch('time.time', return_value=now):
+ t1 = timer.call_later(
+ 5,
+ lambda: bucket.append(
+ timer.call_later(0, lambda: bucket.append(2))))
+
+ # Advance time by 10 seconds and verify that t1 fires and creates t2,
+ # but timer manager defers firing of t2 to next `process_timeouts` in
+ # order to avoid IO starvation
+ with mock.patch('time.time', return_value=now + 10):
+ timer.process_timeouts()
+ t2 = bucket.pop()
+ self.assertIsInstance(t2, select_connection._Timeout)
+ self.assertIsNot(t2, t1)
+ self.assertEqual(bucket, [])
+ self.assertEqual(len(timer._timeout_heap), 1)
+ self.assertIs(t2, timer._timeout_heap[0])
+ self.assertEqual(timer.get_remaining_interval(), 0)
+
+ # t2 should now fire
+ timer.process_timeouts()
+ self.assertEqual(bucket, [2])
+ self.assertEqual(timer.get_remaining_interval(), None)
+
+ def test_cancel_unexpired_timeout_from_another_timeout(self):
+ now = time.time()
+ bucket = []
+ timer = select_connection._Timer()
+
+ with mock.patch('time.time', return_value=now):
+ t2 = timer.call_later(10, lambda: bucket.append(2))
+ t1 = timer.call_later(5, lambda: timer.remove_timeout(t2))
+
+ self.assertIs(t1, timer._timeout_heap[0])
+
+ # Advance time by 6 seconds and check that t2 is cancelled, but not
+ # removed from timeout heap
+ with mock.patch('time.time', return_value=now + 6):
+ timer.process_timeouts()
+ self.assertIsNone(t2.callback)
+ self.assertEqual(timer.get_remaining_interval(), 4)
+ self.assertIs(t2, timer._timeout_heap[0])
+ self.assertEqual(timer._num_cancellations, 1)
+
+ # Advance time by 10 seconds and verify that t2 is removed without
+ # firing
+ with mock.patch('time.time', return_value=now + 10):
+ timer.process_timeouts()
+ self.assertEqual(bucket, [])
+ self.assertIsNone(timer.get_remaining_interval())
+ self.assertEqual(len(timer._timeout_heap), 0)
+ self.assertEqual(timer._num_cancellations, 0)
+
+
+ def test_cancel_expired_timeout_from_another_timeout(self):
+ now = time.time()
+ bucket = []
+ timer = select_connection._Timer()
+
+ with mock.patch('time.time', return_value=now):
+ t2 = timer.call_later(10, lambda: bucket.append(2))
+ t1 = timer.call_later(
+ 5,
+ lambda: (self.assertEqual(timer._num_cancellations, 0),
+ timer.remove_timeout(t2)))
+
+ self.assertIs(t1, timer._timeout_heap[0])
+
+ # Advance time by 10 seconds and check that t2 is cancelled and
+ # removed from timeout heap
+ with mock.patch('time.time', return_value=now + 10):
+ timer.process_timeouts()
+ self.assertEqual(bucket, [])
+ self.assertIsNone(t2.callback)
+ self.assertIsNone(timer.get_remaining_interval())
+ self.assertEqual(len(timer._timeout_heap), 0)
+ self.assertEqual(timer._num_cancellations, 0)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 9
} | 0.11 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [],
"python": "3.9",
"reqs_path": [
"test-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
codecov==2.1.13
coverage==7.8.0
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
mock==5.2.0
nose==1.3.7
packaging==24.2
-e git+https://github.com/pika/pika.git@7b6d7983db021ae4b84d08ea9cee4b8f960ada43#egg=pika
pluggy==1.5.0
pytest==8.3.5
requests==2.32.3
tomli==2.2.1
tornado==6.4.2
Twisted==15.3.0
urllib3==2.3.0
zope.interface==7.2
| name: pika
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- codecov==2.1.13
- coverage==7.8.0
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- mock==5.2.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- requests==2.32.3
- tomli==2.2.1
- tornado==6.4.2
- twisted==15.3.0
- urllib3==2.3.0
- zope-interface==7.2
prefix: /opt/conda/envs/pika
| [
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_flush_output_server_initiated_error_close",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_flush_output_server_initiated_no_error_close",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_flush_output_user_initiated_close",
"tests/unit/connection_timeout_tests.py::ConnectionTests::test_select_connection_timeout",
"tests/unit/select_connection_ioloop_tests.py::IOLoopTimerTestSelect::test_normal",
"tests/unit/select_connection_ioloop_tests.py::IOLoopTimerTestSelect::test_timer_delete_another",
"tests/unit/select_connection_ioloop_tests.py::IOLoopTimerTestSelect::test_timer_for_deleting_itself",
"tests/unit/select_connection_ioloop_tests.py::IOLoopTimerTestPoll::test_normal",
"tests/unit/select_connection_ioloop_tests.py::IOLoopTimerTestPoll::test_timer_delete_another",
"tests/unit/select_connection_ioloop_tests.py::IOLoopTimerTestPoll::test_timer_for_deleting_itself",
"tests/unit/select_connection_ioloop_tests.py::IOLoopTimerTestEPoll::test_normal",
"tests/unit/select_connection_ioloop_tests.py::IOLoopTimerTestEPoll::test_timer_delete_another",
"tests/unit/select_connection_ioloop_tests.py::IOLoopTimerTestEPoll::test_timer_for_deleting_itself",
"tests/unit/select_connection_ioloop_tests.py::IOLoopSleepTimerTestSelect::test_normal",
"tests/unit/select_connection_ioloop_tests.py::IOLoopSleepTimerTestSelect::test_timer_delete_another",
"tests/unit/select_connection_ioloop_tests.py::IOLoopSleepTimerTestSelect::test_timer_for_deleting_itself",
"tests/unit/select_connection_ioloop_tests.py::IOLoopSleepTimerTestPoll::test_normal",
"tests/unit/select_connection_ioloop_tests.py::IOLoopSleepTimerTestPoll::test_timer_delete_another",
"tests/unit/select_connection_ioloop_tests.py::IOLoopSleepTimerTestPoll::test_timer_for_deleting_itself",
"tests/unit/select_connection_ioloop_tests.py::IOLoopSleepTimerTestEPoll::test_normal",
"tests/unit/select_connection_ioloop_tests.py::IOLoopSleepTimerTestEPoll::test_timer_delete_another",
"tests/unit/select_connection_ioloop_tests.py::IOLoopSleepTimerTestEPoll::test_timer_for_deleting_itself",
"tests/unit/select_connection_ioloop_tests.py::IOLoopEintrTestCaseSelect::test_eintr",
"tests/unit/select_connection_ioloop_tests.py::IOLoopEintrTestCasePoll::test_eintr",
"tests/unit/select_connection_ioloop_tests.py::IOLoopEintrTestCaseEPoll::test_eintr",
"tests/unit/select_connection_ioloop_tests.py::PollerTestCaseSelect::test_poller_close",
"tests/unit/select_connection_ioloop_tests.py::PollerTestCasePoll::test_poller_close",
"tests/unit/select_connection_ioloop_tests.py::PollerTestCaseEPoll::test_poller_close",
"tests/unit/select_connection_timer_tests.py::TimeoutClassTests::test_eq_operator",
"tests/unit/select_connection_timer_tests.py::TimeoutClassTests::test_le_operator",
"tests/unit/select_connection_timer_tests.py::TimeoutClassTests::test_lt_operator",
"tests/unit/select_connection_timer_tests.py::TimeoutClassTests::test_non_callable_callback_raises",
"tests/unit/select_connection_timer_tests.py::TimeoutClassTests::test_non_negative_deadline",
"tests/unit/select_connection_timer_tests.py::TimeoutClassTests::test_properties",
"tests/unit/select_connection_timer_tests.py::TimerClassTests::test_add_and_remove_timeout",
"tests/unit/select_connection_timer_tests.py::TimerClassTests::test_add_timeout_from_another_timeout",
"tests/unit/select_connection_timer_tests.py::TimerClassTests::test_call_later_multiple_timers",
"tests/unit/select_connection_timer_tests.py::TimerClassTests::test_call_later_non_negative_delay_check",
"tests/unit/select_connection_timer_tests.py::TimerClassTests::test_call_later_single_timer_expires",
"tests/unit/select_connection_timer_tests.py::TimerClassTests::test_cancel_expired_timeout_from_another_timeout",
"tests/unit/select_connection_timer_tests.py::TimerClassTests::test_cancel_unexpired_timeout_from_another_timeout",
"tests/unit/select_connection_timer_tests.py::TimerClassTests::test_close_empty",
"tests/unit/select_connection_timer_tests.py::TimerClassTests::test_close_non_empty",
"tests/unit/select_connection_timer_tests.py::TimerClassTests::test_gc_of_unexpired_timeouts",
"tests/unit/select_connection_timer_tests.py::TimerClassTests::test_no_timeouts_remaining_interval_is_none"
]
| [
"tests/acceptance/blocking_adapter_test.py::TestCreateAndCloseConnection::test",
"tests/acceptance/blocking_adapter_test.py::TestMultiCloseConnection::test",
"tests/acceptance/blocking_adapter_test.py::TestConnectionContextManagerClosesConnection::test",
"tests/acceptance/blocking_adapter_test.py::TestConnectionContextManagerClosesConnectionAndPassesOriginalException::test",
"tests/acceptance/blocking_adapter_test.py::TestConnectionContextManagerClosesConnectionAndPassesSystemException::test",
"tests/acceptance/blocking_adapter_test.py::TestLostConnectionResultsInIsClosedConnectionAndChannel::test",
"tests/acceptance/blocking_adapter_test.py::TestInvalidExchangeTypeRaisesConnectionClosed::test",
"tests/acceptance/blocking_adapter_test.py::TestCreateAndCloseConnectionWithChannelAndConsumer::test",
"tests/acceptance/blocking_adapter_test.py::TestSuddenBrokerDisconnectBeforeChannel::test",
"tests/acceptance/blocking_adapter_test.py::TestNoAccessToFileDescriptorAfterConnectionClosed::test",
"tests/acceptance/blocking_adapter_test.py::TestDisconnectDuringConnectionStart::test",
"tests/acceptance/blocking_adapter_test.py::TestDisconnectDuringConnectionTune::test",
"tests/acceptance/blocking_adapter_test.py::TestProcessDataEvents::test",
"tests/acceptance/blocking_adapter_test.py::TestConnectionRegisterForBlockAndUnblock::test",
"tests/acceptance/blocking_adapter_test.py::TestBlockedConnectionTimeout::test",
"tests/acceptance/blocking_adapter_test.py::TestAddCallbackThreadsafeFromSameThread::test",
"tests/acceptance/blocking_adapter_test.py::TestAddCallbackThreadsafeFromAnotherThread::test",
"tests/acceptance/blocking_adapter_test.py::TestAddTimeoutRemoveTimeout::test",
"tests/acceptance/blocking_adapter_test.py::TestViabilityOfMultipleTimeoutsWithSameDeadlineAndCallback::test",
"tests/acceptance/blocking_adapter_test.py::TestRemoveTimeoutFromTimeoutCallback::test",
"tests/acceptance/blocking_adapter_test.py::TestSleep::test",
"tests/acceptance/blocking_adapter_test.py::TestConnectionProperties::test",
"tests/acceptance/blocking_adapter_test.py::TestCreateAndCloseChannel::test",
"tests/acceptance/blocking_adapter_test.py::TestExchangeDeclareAndDelete::test",
"tests/acceptance/blocking_adapter_test.py::TestExchangeBindAndUnbind::test",
"tests/acceptance/blocking_adapter_test.py::TestQueueDeclareAndDelete::test",
"tests/acceptance/blocking_adapter_test.py::TestPassiveQueueDeclareOfUnknownQueueRaisesChannelClosed::test",
"tests/acceptance/blocking_adapter_test.py::TestQueueBindAndUnbindAndPurge::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicGet::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicReject::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicRejectNoRequeue::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicNack::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicNackNoRequeue::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicNackMultiple::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicRecoverWithRequeue::test",
"tests/acceptance/blocking_adapter_test.py::TestTxCommit::test",
"tests/acceptance/blocking_adapter_test.py::TestTxRollback::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicConsumeFromUnknownQueueRaisesChannelClosed::test",
"tests/acceptance/blocking_adapter_test.py::TestPublishAndBasicPublishWithPubacksUnroutable::test",
"tests/acceptance/blocking_adapter_test.py::TestConfirmDeliveryAfterUnroutableMessage::test",
"tests/acceptance/blocking_adapter_test.py::TestUnroutableMessagesReturnedInNonPubackMode::test",
"tests/acceptance/blocking_adapter_test.py::TestUnroutableMessageReturnedInPubackMode::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicPublishDeliveredWhenPendingUnroutable::test",
"tests/acceptance/blocking_adapter_test.py::TestPublishAndConsumeWithPubacksAndQosOfOne::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicConsumeWithAckFromAnotherThread::test",
"tests/acceptance/blocking_adapter_test.py::TestConsumeGeneratorWithAckFromAnotherThread::test",
"tests/acceptance/blocking_adapter_test.py::TestTwoBasicConsumersOnSameChannel::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicCancelPurgesPendingConsumerCancellationEvt::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicPublishWithoutPubacks::test",
"tests/acceptance/blocking_adapter_test.py::TestPublishFromBasicConsumeCallback::test",
"tests/acceptance/blocking_adapter_test.py::TestStopConsumingFromBasicConsumeCallback::test",
"tests/acceptance/blocking_adapter_test.py::TestCloseChannelFromBasicConsumeCallback::test",
"tests/acceptance/blocking_adapter_test.py::TestCloseConnectionFromBasicConsumeCallback::test",
"tests/acceptance/blocking_adapter_test.py::TestNonPubAckPublishAndConsumeHugeMessage::test",
"tests/acceptance/blocking_adapter_test.py::TestNonPubackPublishAndConsumeManyMessages::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicCancelWithNonAckableConsumer::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicCancelWithAckableConsumer::test",
"tests/acceptance/blocking_adapter_test.py::TestUnackedMessageAutoRestoredToQueueOnChannelClose::test",
"tests/acceptance/blocking_adapter_test.py::TestNoAckMessageNotRestoredToQueueOnChannelClose::test",
"tests/acceptance/blocking_adapter_test.py::TestConsumeInactivityTimeout::test",
"tests/acceptance/blocking_adapter_test.py::TestChannelFlow::test"
]
| [
"tests/acceptance/blocking_adapter_test.py::TestConnectWithDownedBroker::test",
"tests/acceptance/blocking_adapter_test.py::TestDisconnectDuringConnectionProtocol::test",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_channel",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_close",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_close_with_channel_closed_exception",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_connection_attempts_with_timeout",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_constructor",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_flush_output",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_process_io_for_connection_setup",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_process_io_for_connection_setup_fails_with_open_error",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_sleep",
"tests/unit/connection_timeout_tests.py::ConnectionTests::test_asyncio_connection_timeout",
"tests/unit/connection_timeout_tests.py::ConnectionTests::test_base_connection_timeout",
"tests/unit/connection_timeout_tests.py::ConnectionTests::test_blocking_connection_timeout",
"tests/unit/connection_timeout_tests.py::ConnectionTests::test_parameters",
"tests/unit/connection_timeout_tests.py::ConnectionTests::test_tornado_connection_timeout",
"tests/unit/connection_timeout_tests.py::ConnectionTests::test_twisted_connection_timeout"
]
| []
| BSD 3-Clause "New" or "Revised" License | 2,415 | [
"pika/adapters/blocking_connection.py",
"pika/adapters/asyncio_connection.py",
"pika/adapters/select_connection.py",
"pika/adapters/tornado_connection.py",
"docs/faq.rst",
"pika/adapters/twisted_connection.py",
"pika/connection.py",
"pika/adapters/base_connection.py",
"pylintrc"
]
| [
"pika/adapters/blocking_connection.py",
"pika/adapters/asyncio_connection.py",
"pika/adapters/select_connection.py",
"pika/adapters/tornado_connection.py",
"docs/faq.rst",
"pika/adapters/twisted_connection.py",
"pika/connection.py",
"pika/adapters/base_connection.py",
"pylintrc"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.