instance_id
stringlengths 10
57
| patch
stringlengths 261
37.7k
| repo
stringlengths 7
53
| base_commit
stringlengths 40
40
| hints_text
stringclasses 301
values | test_patch
stringlengths 212
2.22M
| problem_statement
stringlengths 23
37.7k
| version
stringclasses 1
value | environment_setup_commit
stringlengths 40
40
| FAIL_TO_PASS
listlengths 1
4.94k
| PASS_TO_PASS
listlengths 0
7.82k
| meta
dict | created_at
stringlengths 25
25
| license
stringclasses 8
values | __index_level_0__
int64 0
6.41k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
boxed__mutmut-228 | diff --git a/HISTORY.rst b/HISTORY.rst
index 03768a0..e2c35e3 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -8,6 +8,10 @@ Changelog
* Fixed error where ``mutmut_config.init()`` was not called when running without explicitly having set ``PYTHONPATH``
+* Use ``Click``'s subcommand feature to refactor the command line interface. For the end user, this can now run ``mutmut [COMMAND] -h``
+ to check which parameters are relevant to this specific subcommand. The change is backwards compatible, and all existing commands
+ work the same as before, with the exception of ``mutmut --version``, which now has to be ``mutmut version``.
+
2.2.0
~~~~~
diff --git a/mutmut/__init__.py b/mutmut/__init__.py
index 0d98509..76703c7 100644
--- a/mutmut/__init__.py
+++ b/mutmut/__init__.py
@@ -808,7 +808,7 @@ def run_mutation(context: Context, callback) -> str:
class Config(object):
def __init__(self, swallow_output, test_command, covered_lines_by_filename,
baseline_time_elapsed, test_time_multiplier, test_time_base,
- backup, dict_synonyms, total, using_testmon, cache_only,
+ dict_synonyms, total, using_testmon, cache_only,
tests_dirs, hash_of_tests, pre_mutation, post_mutation,
coverage_data, paths_to_mutate, mutation_types_to_apply, no_progress):
self.swallow_output = swallow_output
@@ -817,7 +817,6 @@ class Config(object):
self.baseline_time_elapsed = baseline_time_elapsed
self.test_time_multipler = test_time_multiplier
self.test_time_base = test_time_base
- self.backup = backup
self.dict_synonyms = dict_synonyms
self.total = total
self.using_testmon = using_testmon
diff --git a/mutmut/__main__.py b/mutmut/__main__.py
index 4b6fb74..c2a761d 100644
--- a/mutmut/__main__.py
+++ b/mutmut/__main__.py
@@ -81,15 +81,27 @@ null_out = open(os.devnull, 'w')
DEFAULT_RUNNER = 'python -m pytest -x --assert=plain'
[email protected](context_settings=dict(help_option_names=['-h', '--help']))
[email protected]('command', nargs=1, required=False)
[email protected](context_settings=dict(help_option_names=['-h', '--help']))
+def climain():
+ """
+ Mutation testing system for Python.
+ """
+ pass
+
+
[email protected]()
+def version():
+ """Show the version and exit."""
+ print("mutmut version {}".format(__version__))
+ sys.exit(0)
+
+
[email protected](context_settings=dict(help_option_names=['-h', '--help']))
@click.argument('argument', nargs=1, required=False)
[email protected]('argument2', nargs=1, required=False)
@click.option('--paths-to-mutate', type=click.STRING)
@click.option('--disable-mutation-types', type=click.STRING, help='Skip the given types of mutations.')
@click.option('--enable-mutation-types', type=click.STRING, help='Only perform given types of mutations.')
@click.option('--paths-to-exclude', type=click.STRING)
[email protected]('--backup/--no-backup', default=False)
@click.option('--runner')
@click.option('--use-coverage', is_flag=True, default=False)
@click.option('--use-patch-file', help='Only mutate lines added/changed in the given patch file')
@@ -99,9 +111,6 @@ DEFAULT_RUNNER = 'python -m pytest -x --assert=plain'
@click.option('-s', '--swallow-output', help='turn off output capture', is_flag=True)
@click.option('--dict-synonyms')
@click.option('--cache-only', is_flag=True, default=False)
[email protected]('--version', is_flag=True, default=False)
[email protected]('--suspicious-policy', type=click.Choice(['ignore', 'skipped', 'error', 'failure']), default='ignore')
[email protected]('--untested-policy', type=click.Choice(['ignore', 'skipped', 'error', 'failure']), default='ignore')
@click.option('--pre-mutation')
@click.option('--post-mutation')
@click.option('--simple-output', is_flag=True, default=False, help="Swap emojis in mutmut output to plain text alternatives.")
@@ -115,54 +124,126 @@ DEFAULT_RUNNER = 'python -m pytest -x --assert=plain'
post_mutation=None,
use_patch_file=None,
)
-def climain(command, argument, argument2, paths_to_mutate, disable_mutation_types, enable_mutation_types,
- backup, runner, tests_dir, test_time_multiplier, test_time_base, swallow_output, use_coverage,
- dict_synonyms, cache_only, version, suspicious_policy, untested_policy, pre_mutation,
- post_mutation, use_patch_file, paths_to_exclude, simple_output, no_progress):
+def run(argument, paths_to_mutate, disable_mutation_types, enable_mutation_types, runner,
+ tests_dir, test_time_multiplier, test_time_base, swallow_output, use_coverage,
+ dict_synonyms, cache_only, pre_mutation, post_mutation, use_patch_file, paths_to_exclude,
+ simple_output, no_progress):
"""
-commands:\n
- run [mutation id]\n
- Runs mutmut. You probably want to start with just trying this. If you supply a mutation ID mutmut will check just this mutant.\n
- results\n
- Print the results.\n
- result-ids survived (or any other of: killed,timeout,suspicious,skipped,untested)\n
- Print the IDs of the specified mutant classes (separated by spaces).\n
- apply [mutation id]\n
- Apply a mutation on disk.\n
- show [mutation id]\n
- Show a mutation diff.\n
- show [path to file]\n
- Show all mutation diffs for this file.\n
- junitxml\n
- Show a mutation diff with junitxml format.
+ Runs mutmut. You probably want to start with just trying this. If you supply a mutation ID mutmut will check just this mutant.
"""
if test_time_base is None: # click sets the default=0.0 to None
test_time_base = 0.0
if test_time_multiplier is None: # click sets the default=0.0 to None
test_time_multiplier = 0.0
- sys.exit(main(command, argument, argument2, paths_to_mutate, disable_mutation_types,
- enable_mutation_types, backup, runner,
- tests_dir, test_time_multiplier, test_time_base,
- swallow_output, use_coverage, dict_synonyms, cache_only,
- version, suspicious_policy, untested_policy, pre_mutation,
- post_mutation, use_patch_file, paths_to_exclude, simple_output,
- no_progress))
-
-
-def main(command, argument, argument2, paths_to_mutate, disable_mutation_types,
- enable_mutation_types, backup, runner, tests_dir, test_time_multiplier, test_time_base,
- swallow_output, use_coverage, dict_synonyms, cache_only, version,
- suspicious_policy, untested_policy, pre_mutation, post_mutation,
+
+ sys.exit(do_run(argument, paths_to_mutate, disable_mutation_types, enable_mutation_types, runner,
+ tests_dir, test_time_multiplier, test_time_base, swallow_output, use_coverage,
+ dict_synonyms, cache_only, pre_mutation, post_mutation, use_patch_file, paths_to_exclude,
+ simple_output, no_progress))
+
+
[email protected](context_settings=dict(help_option_names=['-h', '--help']))
+def results():
+ """
+ Print the results.
+ """
+ print_result_cache()
+ sys.exit(0)
+
+
[email protected](context_settings=dict(help_option_names=['-h', '--help']))
[email protected]('status', nargs=1, required=True)
+def result_ids(status):
+ """
+ Print the IDs of the specified mutant classes (separated by spaces).\n
+ result-ids survived (or any other of: killed,timeout,suspicious,skipped,untested)\n
+ """
+ if not status or status not in MUTANT_STATUSES:
+ raise click.BadArgumentUsage(f'The result-ids command needs a status class of mutants '
+ f'(one of : {set(MUTANT_STATUSES.keys())}) but was {status}')
+ print_result_ids_cache(status)
+ sys.exit(0)
+
+
[email protected](context_settings=dict(help_option_names=['-h', '--help']))
[email protected]('mutation-id', nargs=1, required=True)
[email protected]('--backup/--no-backup', default=False)
[email protected]('--dict-synonyms')
+@config_from_setup_cfg(
+ dict_synonyms='',
+)
+def apply(mutation_id, backup, dict_synonyms):
+ """
+ Apply a mutation on disk.
+ """
+ do_apply(mutation_id, dict_synonyms, backup)
+ sys.exit(0)
+
+
[email protected](context_settings=dict(help_option_names=['-h', '--help']))
[email protected]('id-or-file', nargs=1, required=False)
[email protected]('only-filenames', nargs=1, required=False) # TODO: this could be changed to be an option, but this would be a not backwards compatible change to the CLI
[email protected]('--dict-synonyms')
+@config_from_setup_cfg(
+ dict_synonyms='',
+)
+def show(id_or_file, only_filenames, dict_synonyms):
+ """
+ Show a mutation diff.
+ """
+ if not id_or_file:
+ print_result_cache()
+ sys.exit(0)
+
+ if id_or_file == 'all':
+ print_result_cache(show_diffs=True, dict_synonyms=dict_synonyms, print_only_filename=only_filenames)
+ sys.exit(0)
+
+ if os.path.isfile(id_or_file):
+ print_result_cache(show_diffs=True, only_this_file=id_or_file)
+ sys.exit(0)
+
+ print(get_unified_diff(id_or_file, dict_synonyms))
+ sys.exit(0)
+
+
[email protected](context_settings=dict(help_option_names=['-h', '--help']))
[email protected]('--dict-synonyms')
[email protected]('--suspicious-policy', type=click.Choice(['ignore', 'skipped', 'error', 'failure']), default='ignore')
[email protected]('--untested-policy', type=click.Choice(['ignore', 'skipped', 'error', 'failure']), default='ignore')
+@config_from_setup_cfg(
+ dict_synonyms='',
+)
+def junitxml(dict_synonyms, suspicious_policy, untested_policy):
+ """
+ Show a mutation diff with junitxml format.
+ """
+ print_result_cache_junitxml(dict_synonyms, suspicious_policy, untested_policy)
+ sys.exit(0)
+
+
[email protected](context_settings=dict(help_option_names=['-h', '--help']))
[email protected]('--dict-synonyms')
+@config_from_setup_cfg(
+ dict_synonyms='',
+)
+def html(dict_synonyms):
+ """
+ Generate a HTML report of surviving mutants.
+ """
+ create_html_report(dict_synonyms)
+ sys.exit(0)
+
+
+def do_run(argument, paths_to_mutate, disable_mutation_types,
+ enable_mutation_types, runner, tests_dir, test_time_multiplier, test_time_base,
+ swallow_output, use_coverage, dict_synonyms, cache_only, pre_mutation, post_mutation,
use_patch_file, paths_to_exclude, simple_output, no_progress):
"""return exit code, after performing an mutation test run.
:return: the exit code from executing the mutation tests
:rtype: int
"""
- if version:
- print("mutmut version {}".format(__version__))
- return 0
-
if use_coverage and use_patch_file:
raise click.BadArgumentUsage("You can't combine --use-coverage and --use-patch")
@@ -180,57 +261,11 @@ def main(command, argument, argument2, paths_to_mutate, disable_mutation_types,
if invalid_types:
raise click.BadArgumentUsage(f"The following are not valid mutation types: {', '.join(sorted(invalid_types))}. Valid mutation types are: {', '.join(mutations_by_type.keys())}")
- valid_commands = ['run', 'results', 'result-ids', 'apply', 'show', 'junitxml', 'html']
- if command not in valid_commands:
- raise click.BadArgumentUsage('{} is not a valid command, must be one of {}'.format(command, ', '.join(valid_commands)))
-
- if command == 'results' and argument:
- raise click.BadArgumentUsage('The {} command takes no arguments'.format(command))
-
dict_synonyms = [x.strip() for x in dict_synonyms.split(',')]
- if command in ('show', 'diff'):
- if not argument:
- print_result_cache()
- return 0
-
- if argument == 'all':
- print_result_cache(show_diffs=True, dict_synonyms=dict_synonyms, print_only_filename=argument2)
- return 0
-
- if os.path.isfile(argument):
- print_result_cache(show_diffs=True, only_this_file=argument)
- return 0
-
- print(get_unified_diff(argument, dict_synonyms))
- return 0
-
if use_coverage and not exists('.coverage'):
raise FileNotFoundError('No .coverage file found. You must generate a coverage file to use this feature.')
- if command == 'results':
- print_result_cache()
- return 0
-
- if command == 'result-ids':
- if not argument or argument not in MUTANT_STATUSES:
- raise click.BadArgumentUsage(f'The {command} command needs a status class of mutants '
- f'(one of : {set(MUTANT_STATUSES.keys())}) but was {argument}')
- print_result_ids_cache(argument)
- return 0
-
- if command == 'junitxml':
- print_result_cache_junitxml(dict_synonyms, suspicious_policy, untested_policy)
- return 0
-
- if command == 'html':
- create_html_report(dict_synonyms)
- return 0
-
- if command == 'apply':
- do_apply(argument, dict_synonyms, backup)
- return 0
-
if paths_to_mutate is None:
paths_to_mutate = guess_paths_to_mutate()
@@ -329,9 +364,6 @@ Legend for output:
assert use_patch_file
covered_lines_by_filename = read_patch_data(use_patch_file)
- if command != 'run':
- raise click.BadArgumentUsage("Invalid command {}".format(command))
-
mutations_by_file = {}
paths_to_exclude = paths_to_exclude or ''
@@ -346,7 +378,6 @@ Legend for output:
covered_lines_by_filename=covered_lines_by_filename,
coverage_data=coverage_data,
baseline_time_elapsed=baseline_time_elapsed,
- backup=backup,
dict_synonyms=dict_synonyms,
using_testmon=using_testmon,
cache_only=cache_only,
| boxed/mutmut | c63ba9ca08ae780ca815ff188ea91417e2dc8868 | diff --git a/tests/test_main.py b/tests/test_main.py
index 09fda7f..644155b 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -25,6 +25,7 @@ from mutmut import (
python_source_files,
read_coverage_data,
MUTANT_STATUSES,
+ __version__,
)
from mutmut.__main__ import climain
@@ -86,6 +87,21 @@ def single_mutant_filesystem(tmpdir):
mutmut.cache.db.schema = None
[email protected]
+def surviving_mutants_filesystem(tmpdir):
+ foo_py = """
+def foo(a, b):
+ result = a + b
+ return result
+"""
+
+ test_py = """
+def test_nothing(): assert True
+"""
+
+ create_filesystem(tmpdir, foo_py, test_py)
+
+
def create_filesystem(tmpdir, file_to_mutate_contents, test_file_contents):
test_dir = str(tmpdir)
os.chdir(test_dir)
@@ -106,6 +122,10 @@ runner=python -m hammett -x
f.write(test_file_contents)
+def test_print_version():
+ assert CliRunner().invoke(climain, ['version']).output.strip() == f'mutmut version {__version__}'
+
+
def test_compute_return_code():
# mock of Config for ease of testing
class MockProgress(Progress):
@@ -253,6 +273,20 @@ def test_simple_apply(filesystem):
assert f.read() != file_to_mutate_contents
+def test_simply_apply_with_backup(filesystem):
+ result = CliRunner().invoke(climain, ['run', '-s', '--paths-to-mutate=foo.py', "--test-time-base=15.0"], catch_exceptions=False)
+ print(repr(result.output))
+ assert result.exit_code == 0
+
+ result = CliRunner().invoke(climain, ['apply', '--backup', '1'], catch_exceptions=False)
+ print(repr(result.output))
+ assert result.exit_code == 0
+ with open(os.path.join(str(filesystem), 'foo.py')) as f:
+ assert f.read() != file_to_mutate_contents
+ with open(os.path.join(str(filesystem), 'foo.py.bak')) as f:
+ assert f.read() == file_to_mutate_contents
+
+
def test_full_run_no_surviving_mutants(filesystem):
result = CliRunner().invoke(climain, ['run', '--paths-to-mutate=foo.py', "--test-time-base=15.0"], catch_exceptions=False)
print(repr(result.output))
@@ -520,3 +554,127 @@ def test_enable_and_disable_mutation_type_are_exclusive():
)
assert result.exception.code == 2
assert "You can't combine --disable-mutation-types and --enable-mutation-types" in result.output
+
+
+def test_show(surviving_mutants_filesystem):
+ CliRunner().invoke(climain, ['run', '--paths-to-mutate=foo.py', "--test-time-base=15.0"], catch_exceptions=False)
+ result = CliRunner().invoke(climain, ['show'])
+ assert result.output.strip() == """
+To apply a mutant on disk:
+ mutmut apply <id>
+
+To show a mutant:
+ mutmut show <id>
+
+
+Survived 🙁 (2)
+
+---- foo.py (2) ----
+
+1-2
+""".strip()
+
+
+def test_show_single_id(surviving_mutants_filesystem):
+ CliRunner().invoke(climain, ['run', '--paths-to-mutate=foo.py', "--test-time-base=15.0"], catch_exceptions=False)
+ result = CliRunner().invoke(climain, ['show', '1'])
+ assert result.output.strip() == """
+--- foo.py
++++ foo.py
+@@ -1,5 +1,5 @@
+
+ def foo(a, b):
+- result = a + b
++ result = a - b
+ return result
+""".strip()
+
+
+def test_show_all(surviving_mutants_filesystem):
+ CliRunner().invoke(climain, ['run', '--paths-to-mutate=foo.py', "--test-time-base=15.0"], catch_exceptions=False)
+ result = CliRunner().invoke(climain, ['show', 'all'])
+ assert result.output.strip() == """
+To apply a mutant on disk:
+ mutmut apply <id>
+
+To show a mutant:
+ mutmut show <id>
+
+
+Survived 🙁 (2)
+
+---- foo.py (2) ----
+
+# mutant 1
+--- foo.py
++++ foo.py
+@@ -1,5 +1,5 @@
+
+ def foo(a, b):
+- result = a + b
++ result = a - b
+ return result
+
+
+# mutant 2
+--- foo.py
++++ foo.py
+@@ -1,5 +1,5 @@
+
+ def foo(a, b):
+- result = a + b
++ result = None
+ return result
+""".strip()
+
+
+def test_show_for_file(surviving_mutants_filesystem):
+ CliRunner().invoke(climain, ['run', '--paths-to-mutate=foo.py', "--test-time-base=15.0"], catch_exceptions=False)
+ result = CliRunner().invoke(climain, ['show', 'foo.py'])
+ assert result.output.strip() == """
+To apply a mutant on disk:
+ mutmut apply <id>
+
+To show a mutant:
+ mutmut show <id>
+
+
+Survived 🙁 (2)
+
+---- foo.py (2) ----
+
+# mutant 1
+--- foo.py
++++ foo.py
+@@ -1,5 +1,5 @@
+
+ def foo(a, b):
+- result = a + b
++ result = a - b
+ return result
+
+
+# mutant 2
+--- foo.py
++++ foo.py
+@@ -1,5 +1,5 @@
+
+ def foo(a, b):
+- result = a + b
++ result = None
+ return result
+""".strip()
+
+
+def test_html_output(surviving_mutants_filesystem):
+ result = CliRunner().invoke(climain, ['run', '--paths-to-mutate=foo.py', "--test-time-base=15.0"], catch_exceptions=False)
+ print(repr(result.output))
+ result = CliRunner().invoke(climain, ['html'])
+ assert os.path.isfile("html/index.html")
+ with open("html/index.html") as f:
+ assert f.read() == (
+ '<h1>Mutation testing report</h1>'
+ 'Killed 0 out of 2 mutants'
+ '<table><thead><tr><th>File</th><th>Total</th><th>Killed</th><th>% killed</th><th>Survived</th></thead>'
+ '<tr><td><a href="foo.py.html">foo.py</a></td><td>2</td><td>0</td><td>0.00</td><td>2</td>'
+ '</table></body></html>')
| Using click's subcommands
As discussed on #55, we should add support for `click`'s subcommand feature: http://click.palletsprojects.com/en/7.x/commands/. This provides greater control on arguments for each individual command, and should be a backwards compatible change. This would also improve extensibility of the CLI. | 0.0 | c63ba9ca08ae780ca815ff188ea91417e2dc8868 | [
"tests/test_main.py::test_print_version"
]
| [
"tests/test_main.py::test_compute_return_code",
"tests/test_main.py::test_read_coverage_data",
"tests/test_main.py::test_python_source_files[expected0-foo.py-tests_dirs0]",
"tests/test_main.py::test_python_source_files[expected1-.-tests_dirs1]",
"tests/test_main.py::test_python_source_files[expected2-.-tests_dirs2]",
"tests/test_main.py::test_python_source_files__with_paths_to_exclude",
"tests/test_main.py::test_popen_streaming_output_timeout",
"tests/test_main.py::test_popen_streaming_output_stream",
"tests/test_main.py::test_select_unknown_mutation_type[--enable-mutation-types]",
"tests/test_main.py::test_select_unknown_mutation_type[--disable-mutation-types]",
"tests/test_main.py::test_enable_and_disable_mutation_type_are_exclusive"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-09-19 12:41:11+00:00 | bsd-3-clause | 1,424 |
|
boxed__mutmut-239 | diff --git a/mutmut/__init__.py b/mutmut/__init__.py
index a736caf..9af8f48 100644
--- a/mutmut/__init__.py
+++ b/mutmut/__init__.py
@@ -227,11 +227,17 @@ def number_mutation(value, **_):
try:
parsed = int(value, base=base)
+ result = repr(parsed + 1)
except ValueError:
# Since it wasn't an int, it must be a float
parsed = float(value)
+ # This avoids all very small numbers becoming 1.0, and very
+ # large numbers not changing at all
+ if (1e-5 < abs(parsed) < 1e5) or (parsed == 0.0):
+ result = repr(parsed + 1)
+ else:
+ result = repr(parsed * 2)
- result = repr(parsed + 1)
if not result.endswith(suffix):
result += suffix
return result
| boxed/mutmut | cce78241aa116d14c4ae38ecd1b2c84e659126d7 | diff --git a/tests/test_mutation.py b/tests/test_mutation.py
index 710c741..42c063e 100644
--- a/tests/test_mutation.py
+++ b/tests/test_mutation.py
@@ -74,6 +74,7 @@ for x in y:
('1.0', '2.0'),
('0.1', '1.1'),
('1e-3', '1.001'),
+ ('1e16', '2e+16'),
('True', 'False'),
('False', 'True'),
('"foo"', '"XXfooXX"'),
@@ -101,7 +102,8 @@ for x in y:
('1j', '2j'),
('1.0j', '2.0j'),
('0o1', '2'),
- ('1.0e10', '10000000001.0'),
+ ('1.0e10', '20000000000.0'),
+ ('1.1e-16', '2.2e-16'),
("dict(a=b)", "dict(aXX=b)"),
("Struct(a=b)", "Struct(aXX=b)"),
("FooBarDict(a=b)", "FooBarDict(aXX=b)"),
| Generation of unary plus in exponent is un-killable
Hello, thanks for the lovely tool!
mutmut generates mutants like the following, which are identical to the original code and thus un-killable (immortal?):
```diff
--- mvce/mvce.py
+++ mvce/mvce.py
@@ -1,3 +1,3 @@
def multiply_by_1e100(number):
- return number * 1e100
+ return number * 1e+100
```
Here's a test so that you can reproduce the whole thing:
```py
from mvce import multiply_by_1e100
def test_multiply_by_1e100():
assert multiply_by_1e100(4) == 4e100
```
`1e16` seems to be the smallest number to which this happens. Smaller numbers get expanded from e.g. `1e2` to `101.0` | 0.0 | cce78241aa116d14c4ae38ecd1b2c84e659126d7 | [
"tests/test_mutation.py::test_basic_mutations[1e16-2e+16]",
"tests/test_mutation.py::test_basic_mutations[1.0e10-20000000000.0]",
"tests/test_mutation.py::test_basic_mutations[1.1e-16-2.2e-16]"
]
| [
"tests/test_mutation.py::test_matches_py3",
"tests/test_mutation.py::test_matches",
"tests/test_mutation.py::test_ast_pattern_for_loop",
"tests/test_mutation.py::test_basic_mutations[lambda:",
"tests/test_mutation.py::test_basic_mutations[a(b)-a(None)]",
"tests/test_mutation.py::test_basic_mutations[a[b]-a[None]]",
"tests/test_mutation.py::test_basic_mutations[1",
"tests/test_mutation.py::test_basic_mutations[1+1-2-2]",
"tests/test_mutation.py::test_basic_mutations[1-2]",
"tests/test_mutation.py::test_basic_mutations[1-1-2+2]",
"tests/test_mutation.py::test_basic_mutations[1*1-2/2]",
"tests/test_mutation.py::test_basic_mutations[1/1-2*2]",
"tests/test_mutation.py::test_basic_mutations[1.0-2.0]",
"tests/test_mutation.py::test_basic_mutations[0.1-1.1]",
"tests/test_mutation.py::test_basic_mutations[1e-3-1.001]",
"tests/test_mutation.py::test_basic_mutations[True-False]",
"tests/test_mutation.py::test_basic_mutations[False-True]",
"tests/test_mutation.py::test_basic_mutations[\"foo\"-\"XXfooXX\"]",
"tests/test_mutation.py::test_basic_mutations['foo'-'XXfooXX']",
"tests/test_mutation.py::test_basic_mutations[u'foo'-u'XXfooXX']",
"tests/test_mutation.py::test_basic_mutations[0-1]",
"tests/test_mutation.py::test_basic_mutations[0o0-1]",
"tests/test_mutation.py::test_basic_mutations[0.-1.0]",
"tests/test_mutation.py::test_basic_mutations[0x0-1]",
"tests/test_mutation.py::test_basic_mutations[0b0-1]",
"tests/test_mutation.py::test_basic_mutations[1<2-2<=3]",
"tests/test_mutation.py::test_basic_mutations[(1,",
"tests/test_mutation.py::test_basic_mutations[foo",
"tests/test_mutation.py::test_basic_mutations[x",
"tests/test_mutation.py::test_basic_mutations[a",
"tests/test_mutation.py::test_basic_mutations[s[0]-s[1]]",
"tests/test_mutation.py::test_basic_mutations[s[0]",
"tests/test_mutation.py::test_basic_mutations[s[x]-s[None]]",
"tests/test_mutation.py::test_basic_mutations[s[1:]-s[2:]]",
"tests/test_mutation.py::test_basic_mutations[1j-2j]",
"tests/test_mutation.py::test_basic_mutations[1.0j-2.0j]",
"tests/test_mutation.py::test_basic_mutations[0o1-2]",
"tests/test_mutation.py::test_basic_mutations[dict(a=b)-dict(aXX=b)]",
"tests/test_mutation.py::test_basic_mutations[Struct(a=b)-Struct(aXX=b)]",
"tests/test_mutation.py::test_basic_mutations[FooBarDict(a=b)-FooBarDict(aXX=b)]",
"tests/test_mutation.py::test_basic_mutations[lambda",
"tests/test_mutation.py::test_basic_mutations[break-continue]",
"tests/test_mutation.py::test_multiple_mutations[x+=1-expected0]",
"tests/test_mutation.py::test_multiple_mutations[x-=1-expected1]",
"tests/test_mutation.py::test_multiple_mutations[x*=1-expected2]",
"tests/test_mutation.py::test_multiple_mutations[x/=1-expected3]",
"tests/test_mutation.py::test_multiple_mutations[x//=1-expected4]",
"tests/test_mutation.py::test_multiple_mutations[x%=1-expected5]",
"tests/test_mutation.py::test_multiple_mutations[x<<=1-expected6]",
"tests/test_mutation.py::test_multiple_mutations[x>>=1-expected7]",
"tests/test_mutation.py::test_multiple_mutations[x&=1-expected8]",
"tests/test_mutation.py::test_multiple_mutations[x|=1-expected9]",
"tests/test_mutation.py::test_multiple_mutations[x^=1-expected10]",
"tests/test_mutation.py::test_multiple_mutations[x**=1-expected11]",
"tests/test_mutation.py::test_basic_mutations_python3[a:",
"tests/test_mutation.py::test_basic_mutations_python3[def",
"tests/test_mutation.py::test_basic_mutations_python3[a",
"tests/test_mutation.py::test_basic_mutations_python3[lambda",
"tests/test_mutation.py::test_basic_mutations_python3[lambda:",
"tests/test_mutation.py::test_basic_mutations_python36[a:",
"tests/test_mutation.py::test_do_not_mutate[foo(a,",
"tests/test_mutation.py::test_do_not_mutate['''foo''']",
"tests/test_mutation.py::test_do_not_mutate[r'''foo''']",
"tests/test_mutation.py::test_do_not_mutate[(x",
"tests/test_mutation.py::test_do_not_mutate[NotADictSynonym(a=b)]",
"tests/test_mutation.py::test_do_not_mutate[from",
"tests/test_mutation.py::test_do_not_mutate[import",
"tests/test_mutation.py::test_do_not_mutate[foo.bar]",
"tests/test_mutation.py::test_do_not_mutate[for",
"tests/test_mutation.py::test_do_not_mutate[def",
"tests/test_mutation.py::test_do_not_mutate_python3[def",
"tests/test_mutation.py::test_do_not_mutate_python3[a[None]]",
"tests/test_mutation.py::test_do_not_mutate_python3[a(None)]",
"tests/test_mutation.py::test_mutate_body_of_function_with_return_type_annotation",
"tests/test_mutation.py::test_mutate_all",
"tests/test_mutation.py::test_mutate_both",
"tests/test_mutation.py::test_perform_one_indexed_mutation",
"tests/test_mutation.py::test_function",
"tests/test_mutation.py::test_function_with_annotation",
"tests/test_mutation.py::test_pragma_no_mutate",
"tests/test_mutation.py::test_pragma_no_mutate_and_no_cover",
"tests/test_mutation.py::test_mutate_decorator",
"tests/test_mutation.py::test_mutate_dict",
"tests/test_mutation.py::test_mutate_dict2",
"tests/test_mutation.py::test_performed_mutation_ids",
"tests/test_mutation.py::test_syntax_error",
"tests/test_mutation.py::test_bug_github_issue_18",
"tests/test_mutation.py::test_bug_github_issue_19",
"tests/test_mutation.py::test_bug_github_issue_26",
"tests/test_mutation.py::test_bug_github_issue_30",
"tests/test_mutation.py::test_bug_github_issue_77",
"tests/test_mutation.py::test_multiline_dunder_whitelist",
"tests/test_mutation.py::test_bug_github_issue_162",
"tests/test_mutation.py::test_bad_mutation_str_type_definition"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2021-11-03 15:42:32+00:00 | bsd-3-clause | 1,425 |
|
bpython__bpython-691 | diff --git a/bpython/curtsiesfrontend/manual_readline.py b/bpython/curtsiesfrontend/manual_readline.py
index 35d28f54..223ec9e7 100644
--- a/bpython/curtsiesfrontend/manual_readline.py
+++ b/bpython/curtsiesfrontend/manual_readline.py
@@ -323,7 +323,7 @@ def titlecase_next_word(cursor_offset, line):
return cursor_offset, line # TODO Not implemented
-delete_word_from_cursor_back_re = LazyReCompile(r'\b\w')
+delete_word_from_cursor_back_re = LazyReCompile(r'^|\b\w')
@edit_keys.on('<Esc+BACKSPACE>')
| bpython/bpython | 307f855306ae8e0814458026b9fc47c1f25bf357 | diff --git a/bpython/test/test_manual_readline.py b/bpython/test/test_manual_readline.py
index 4141292d..faf4b585 100644
--- a/bpython/test/test_manual_readline.py
+++ b/bpython/test/test_manual_readline.py
@@ -240,6 +240,12 @@ class TestManualReadline(unittest.TestCase):
"|"],
delete_word_from_cursor_back)
+ self.try_stages_kill([
+ " (( asdf |",
+ " (( |",
+ "|"],
+ delete_word_from_cursor_back)
+
class TestEdits(unittest.TestCase):
| Option-delete doesn't delete a left paren
On line like
~~~
>>> (( asdf a sdf
>>> lots of space
~~~
pressing option-delete repeatedly doesn't delete the opening parens or leading spaces | 0.0 | 307f855306ae8e0814458026b9fc47c1f25bf357 | [
"bpython/test/test_manual_readline.py::TestManualReadline::test_delete_word_from_cursor_back"
]
| [
"bpython/test/test_manual_readline.py::TestManualReadline::test_back_word",
"bpython/test/test_manual_readline.py::TestManualReadline::test_backspace",
"bpython/test/test_manual_readline.py::TestManualReadline::test_beginning_of_line",
"bpython/test/test_manual_readline.py::TestManualReadline::test_delete",
"bpython/test/test_manual_readline.py::TestManualReadline::test_delete_from_cursor_back",
"bpython/test/test_manual_readline.py::TestManualReadline::test_delete_from_cursor_forward",
"bpython/test/test_manual_readline.py::TestManualReadline::test_delete_rest_of_word",
"bpython/test/test_manual_readline.py::TestManualReadline::test_delete_word_to_cursor",
"bpython/test/test_manual_readline.py::TestManualReadline::test_end_of_line",
"bpython/test/test_manual_readline.py::TestManualReadline::test_forward_word",
"bpython/test/test_manual_readline.py::TestManualReadline::test_forward_word_empty",
"bpython/test/test_manual_readline.py::TestManualReadline::test_forward_word_end",
"bpython/test/test_manual_readline.py::TestManualReadline::test_forward_word_tabs",
"bpython/test/test_manual_readline.py::TestManualReadline::test_last_word_pos",
"bpython/test/test_manual_readline.py::TestManualReadline::test_last_word_pos_single_word",
"bpython/test/test_manual_readline.py::TestManualReadline::test_left_arrow_at_non_zero",
"bpython/test/test_manual_readline.py::TestManualReadline::test_left_arrow_at_zero",
"bpython/test/test_manual_readline.py::TestManualReadline::test_right_arrow_at_end",
"bpython/test/test_manual_readline.py::TestManualReadline::test_right_arrow_at_non_end",
"bpython/test/test_manual_readline.py::TestManualReadline::test_transpose_character_before_cursor",
"bpython/test/test_manual_readline.py::TestManualReadline::test_transpose_empty_line",
"bpython/test/test_manual_readline.py::TestManualReadline::test_transpose_end_of_line",
"bpython/test/test_manual_readline.py::TestManualReadline::test_transpose_first_character",
"bpython/test/test_manual_readline.py::TestManualReadline::test_transpose_word_before_cursor",
"bpython/test/test_manual_readline.py::TestManualReadline::test_yank_prev_killed_text",
"bpython/test/test_manual_readline.py::TestManualReadline::test_yank_prev_prev_killed_text",
"bpython/test/test_manual_readline.py::TestEdits::test_config",
"bpython/test/test_manual_readline.py::TestEdits::test_functions_with_bad_return_values",
"bpython/test/test_manual_readline.py::TestEdits::test_functions_with_bad_signatures",
"bpython/test/test_manual_readline.py::TestEdits::test_seq"
]
| {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | 2017-07-08 14:48:58+00:00 | mit | 1,426 |
|
bpython__bpython-920 | diff --git a/bpython/args.py b/bpython/args.py
index 7895d84a..79ddcc67 100644
--- a/bpython/args.py
+++ b/bpython/args.py
@@ -216,11 +216,17 @@ def parse(args, extras=None, ignore_stdin=False) -> Tuple:
def exec_code(interpreter, args):
"""
- Helper to execute code in a given interpreter. args should be a [faked]
- sys.argv
+ Helper to execute code in a given interpreter, e.g. to implement the behavior of python3 [-i] file.py
+
+ args should be a [faked] sys.argv.
"""
- with open(args[0]) as sourcefile:
- source = sourcefile.read()
+ try:
+ with open(args[0]) as sourcefile:
+ source = sourcefile.read()
+ except OSError as e:
+ # print an error and exit (if -i is specified the calling code will continue)
+ print(f"bpython: can't open file '{args[0]}: {e}", file=sys.stderr)
+ raise SystemExit(e.errno)
old_argv, sys.argv = sys.argv, args
sys.path.insert(0, os.path.abspath(os.path.dirname(args[0])))
spec = importlib.util.spec_from_loader("__console__", loader=None)
diff --git a/bpython/line.py b/bpython/line.py
index 7ced3bf1..b98302dd 100644
--- a/bpython/line.py
+++ b/bpython/line.py
@@ -4,6 +4,8 @@ All functions take cursor offset from the beginning of the line and the line of
Python code, and return None, or a tuple of the start index, end index, and the
word."""
+import re
+
from itertools import chain
from typing import Optional, NamedTuple
@@ -34,7 +36,41 @@ def current_word(cursor_offset: int, line: str) -> Optional[LinePart]:
return LinePart(start, end, word)
-_current_dict_key_re = LazyReCompile(r"""[\w_][\w0-9._]*\[([\w0-9._(), '"]*)""")
+# pieces of regex to match repr() of several hashable built-in types
+_match_all_dict_keys = r"""[^\]]*"""
+
+# https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
+_match_single_quote_str_bytes = r"""
+ # bytes repr() begins with `b` character; bytes and str begin with `'`
+ b?'
+ # match escape sequence; this handles `\'` in the string repr()
+ (?:\\['"nabfrtvxuU\\]|
+ # or match any non-`\` and non-single-quote character (most of the string)
+ [^'\\])*
+ # matches hanging `\` or ending `'` if one is present
+ [\\']?
+"""
+
+# bytes and str repr() only uses double quotes if the string contains 1 or more
+# `'` character and exactly 0 `"` characters
+_match_double_quote_str_bytes = r"""
+ # bytes repr() begins with `b` character
+ b?"
+ # string continues until a `"` character is reached
+ [^"]*
+ # end matching at closing double-quote if one is present
+ "?"""
+
+# match valid identifier name followed by `[` character
+_match_dict_before_key = r"""[\w_][\w0-9._]*\["""
+
+_current_dict_key_re = LazyReCompile(
+ f"{_match_dict_before_key}((?:"
+ f"{_match_single_quote_str_bytes}|"
+ f"{_match_double_quote_str_bytes}|"
+ f"{_match_all_dict_keys}|)*)",
+ re.VERBOSE,
+)
def current_dict_key(cursor_offset: int, line: str) -> Optional[LinePart]:
@@ -45,7 +81,16 @@ def current_dict_key(cursor_offset: int, line: str) -> Optional[LinePart]:
return None
-_current_dict_re = LazyReCompile(r"""([\w_][\w0-9._]*)\[([\w0-9._(), '"]*)""")
+# capture valid identifier name if followed by `[` character
+_capture_dict_name = r"""([\w_][\w0-9._]*)\["""
+
+_current_dict_re = LazyReCompile(
+ f"{_capture_dict_name}((?:"
+ f"{_match_single_quote_str_bytes}|"
+ f"{_match_double_quote_str_bytes}|"
+ f"{_match_all_dict_keys}|)*)",
+ re.VERBOSE,
+)
def current_dict(cursor_offset: int, line: str) -> Optional[LinePart]:
| bpython/bpython | b66a29fde838f98a39c08ce1b94f6cc8b8c3e86d | diff --git a/bpython/test/test_line_properties.py b/bpython/test/test_line_properties.py
index fe1b0813..592a6176 100644
--- a/bpython/test/test_line_properties.py
+++ b/bpython/test/test_line_properties.py
@@ -178,10 +178,24 @@ class TestCurrentDictKey(LineTestCase):
self.assertAccess("asdf[<(>|]")
self.assertAccess("asdf[<(1>|]")
self.assertAccess("asdf[<(1,>|]")
+ self.assertAccess("asdf[<(1,)>|]")
self.assertAccess("asdf[<(1, >|]")
self.assertAccess("asdf[<(1, 2)>|]")
# TODO self.assertAccess('d[d[<12|>')
self.assertAccess("d[<'a>|")
+ self.assertAccess("object.dict['a'bcd'], object.dict[<'abc>|")
+ self.assertAccess("object.dict[<'a'bcd'>|], object.dict['abc")
+ self.assertAccess(r"object.dict[<'a\'\\\"\n\\'>|")
+ self.assertAccess("object.dict[<\"abc'>|")
+ self.assertAccess("object.dict[<(1, 'apple', 2.134>|]")
+ self.assertAccess("object.dict[<(1, 'apple', 2.134)>|]")
+ self.assertAccess("object.dict[<-1000>|")
+ self.assertAccess("object.dict[<-0.23948>|")
+ self.assertAccess("object.dict[<'\U0001ffff>|")
+ self.assertAccess(r"object.dict[<'a\'\\\"\n\\'>|]")
+ self.assertAccess(r"object.dict[<'a\'\\\"\n\\|[[]'>")
+ self.assertAccess('object.dict[<"a]bc[|]">]')
+ self.assertAccess("object.dict[<'abcd[]>|")
class TestCurrentDict(LineTestCase):
| Tab completion for dict key causes crash
This issue is not reproducible with several different `dict`s that I've tried, but it can be reproduced with the `dict` below.
```
my_dict = {
'Circle': {'Ellipse',
'Shape',
'ShapeDriver'},
'Ellipse': {'Shape'},
'Rectangle': {'Shape'},
'Shape': {'Circle'},
'ShapeDriver': {'Circle',
'Ellipse',
'Shape'},
'Square': {'Rectangle'}
}
```
Steps to recreate:
1. create new `dict`
2. type `my_dict[`
3. press <tab> key to use tab-completion
4. crash
```
~ % bpython
bpython version 0.21 on top of Python 3.8.2 /Library/Developer/CommandLineTools/usr/bin/python3
>>> my_dict = {
... 'a/Circle': {'Ellipse',
... 'Shape',
... 'ShapeDriver'},
...
... 'a/Ellipse': {'Shape'},
...
... 'a/Rectangle': {'Shape'},
...
... 'a/Shape': {'Circle'},
...
... 'a/ShapeDriver': {'Circle',
... 'Ellipse',
... 'Shape'},
...
... 'a/Square': {'Rectangle'}
... }
>>> my_dict['b/banana']
Traceback (most recent call last):
File "/Users/arian/Library/Python/3.8/bin/bpython", line 8, in <module>
sys.exit(main())
File "/Users/arian/Library/Python/3.8/lib/python/site-packages/bpython/curtsies.py", line 201, in main
exit_value = repl.mainloop(True, paste)
File "/Users/arian/Library/Python/3.8/lib/python/site-packages/bpython/curtsies.py", line 121, in mainloop
self.process_event_and_paint(e)
File "/Users/arian/Library/Python/3.8/lib/python/site-packages/bpython/curtsies.py", line 85, in process_event_and_paint
self.process_event(e)
File "/Users/arian/Library/Python/3.8/lib/python/site-packages/bpython/curtsiesfrontend/repl.py", line 615, in process_event
return self.process_key_event(e)
File "/Users/arian/Library/Python/3.8/lib/python/site-packages/bpython/curtsiesfrontend/repl.py", line 757, in process_key_event
self.on_tab()
File "/Users/arian/Library/Python/3.8/lib/python/site-packages/bpython/curtsiesfrontend/repl.py", line 891, in on_tab
cursor_and_line = self.matches_iter.substitute_cseq()
File "/Users/arian/Library/Python/3.8/lib/python/site-packages/bpython/repl.py", line 297, in substitute_cseq
self.update(
File "/Users/arian/Library/Python/3.8/lib/python/site-packages/bpython/repl.py", line 318, in update
self.start, self.end, self.current_word = self.completer.locate(
TypeError: cannot unpack non-iterable NoneType object
```
Image below so you can see the colors as well:

| 0.0 | b66a29fde838f98a39c08ce1b94f6cc8b8c3e86d | [
"bpython/test/test_line_properties.py::TestCurrentDictKey::test_simple"
]
| [
"bpython/test/test_line_properties.py::TestHelpers::test_I",
"bpython/test/test_line_properties.py::TestHelpers::test_assert_access",
"bpython/test/test_line_properties.py::TestHelpers::test_decode",
"bpython/test/test_line_properties.py::TestHelpers::test_encode",
"bpython/test/test_line_properties.py::TestCurrentWord::test_dots",
"bpython/test/test_line_properties.py::TestCurrentWord::test_inside",
"bpython/test/test_line_properties.py::TestCurrentWord::test_non_dots",
"bpython/test/test_line_properties.py::TestCurrentWord::test_open_paren",
"bpython/test/test_line_properties.py::TestCurrentWord::test_simple",
"bpython/test/test_line_properties.py::TestCurrentDict::test_simple",
"bpython/test/test_line_properties.py::TestCurrentString::test_closed",
"bpython/test/test_line_properties.py::TestCurrentString::test_open",
"bpython/test/test_line_properties.py::TestCurrentObject::test_simple",
"bpython/test/test_line_properties.py::TestCurrentAttribute::test_simple",
"bpython/test/test_line_properties.py::TestCurrentFromImportFrom::test_simple",
"bpython/test/test_line_properties.py::TestCurrentFromImportImport::test_simple",
"bpython/test/test_line_properties.py::TestCurrentImport::test_simple",
"bpython/test/test_line_properties.py::TestMethodDefinitionName::test_simple",
"bpython/test/test_line_properties.py::TestSingleWord::test_simple",
"bpython/test/test_line_properties.py::TestCurrentExpressionAttribute::test_indexing",
"bpython/test/test_line_properties.py::TestCurrentExpressionAttribute::test_simple",
"bpython/test/test_line_properties.py::TestCurrentExpressionAttribute::test_strings",
"bpython/test/test_line_properties.py::TestCurrentExpressionAttribute::test_with_whitespace",
"bpython/test/test_line_properties.py::TestCurrentExpressionAttribute::test_without_dot",
"bpython/test/test_line_properties.py::TestCurrentDottedAttribute::test_simple"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-09-27 23:05:44+00:00 | mit | 1,427 |
|
bradleygolden__pytailor-15 | diff --git a/pytailor/__version__.py b/pytailor/__version__.py
index 57f0c12..8ed71d5 100644
--- a/pytailor/__version__.py
+++ b/pytailor/__version__.py
@@ -7,6 +7,6 @@
# |__| \/ \/
-VERSION = (0, 1, 4)
+VERSION = (0, 1, 5)
__version__ = ".".join(map(str, VERSION))
diff --git a/pytailor/tailor.py b/pytailor/tailor.py
index 5ee150b..e9675e7 100644
--- a/pytailor/tailor.py
+++ b/pytailor/tailor.py
@@ -46,8 +46,8 @@ class Tailor(dict):
for name, value in _store.items():
self[name] = value
- def watch_env_var(self, name: str):
- """Set configuration and watch a system wide environment variable."""
+ def from_envar(self, name: str):
+ """Set configuration from an environment variable."""
value = os.getenv(name)
if not value:
warn_msg = f"Environment variable '{name}' not found."
| bradleygolden/pytailor | 02c1bc028bc86c18562e8f0ec864787c475cb402 | diff --git a/tests/test_tailor.py b/tests/test_tailor.py
index db3c231..f2fbdae 100644
--- a/tests/test_tailor.py
+++ b/tests/test_tailor.py
@@ -49,26 +49,26 @@ def test_from_object_and_then_dotenv(env_path):
assert config["TESTING"] is False
-def test_watch_env_var_that_doesnt_exist_raises_warning():
+def test_from_envar_that_doesnt_exist_raises_warning():
config = Tailor()
with pytest.warns(RuntimeWarning) as warn:
- config.watch_env_var("BAR")
+ config.from_envar("BAR")
assert len(warn) == 1
assert "not found" in warn[0].message.args[0]
-def test_watch_env_var_that_doesnt_exist_but_exists_in_config_object():
+def test_from_envar_that_doesnt_exist_but_exists_in_config_object():
config = Tailor()
config["BAR"] = "BAZ"
- config.watch_env_var("BAR")
+ config.from_envar("BAR")
assert "BAR" in config
assert config["BAR"] == "BAZ"
-def test_watch_env_var_and_change_after_watching():
+def test_from_envar_and_change_after_watching():
config = Tailor()
- config.watch_env_var("FOO")
+ config.from_envar("FOO")
assert config["FOO"] == "BAR"
os.environ["FOO"] = "BAZ"
assert config["FOO"] == "BAZ"
@@ -76,7 +76,7 @@ def test_watch_env_var_and_change_after_watching():
def test_env_var_is_set_then_gets_removed():
config = Tailor()
- config.watch_env_var("FOO")
+ config.from_envar("FOO")
del os.environ["FOO"]
# check original value was backed up
assert config["FOO"] == "BAR"
| Rename watch_env_var
watch_env_var is confusingly named. It should be called something like "from_env_var"
For example
```
config.watch_env_var("FOO")
```
could be something like:
```
config.from_environ("FOO")
# or
config.from_env_var("FOO")
# or
config.from_envar("FOO")
```
or perhaps we can get more creative with something like:
```
config.environ["FOO"]
config.environ.get("FOO")
```
Where environ is a dictionary like object that takes brackets or the .get() method. | 0.0 | 02c1bc028bc86c18562e8f0ec864787c475cb402 | [
"tests/test_tailor.py::test_from_envar_that_doesnt_exist_raises_warning",
"tests/test_tailor.py::test_from_envar_that_doesnt_exist_but_exists_in_config_object",
"tests/test_tailor.py::test_from_envar_and_change_after_watching",
"tests/test_tailor.py::test_env_var_is_set_then_gets_removed"
]
| [
"tests/test_tailor.py::test_from_object",
"tests/test_tailor.py::test_from_dotenv",
"tests/test_tailor.py::test_from_object_and_then_dotenv",
"tests/test_tailor.py::test_user_can_change_value_manually",
"tests/test_tailor.py::test_user_can_use_number_types_with_dotenv",
"tests/test_tailor.py::test_dunder_str",
"tests/test_tailor.py::test_dunder_eq",
"tests/test_tailor.py::test_dunder_ne"
]
| {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2018-10-04 11:42:26+00:00 | mit | 1,428 |
|
brandon-rhodes__python-sgp4-73 | diff --git a/sgp4/exporter.py b/sgp4/exporter.py
index 759c034..32d6baf 100644
--- a/sgp4/exporter.py
+++ b/sgp4/exporter.py
@@ -30,7 +30,10 @@ def export_tle(satrec):
append(satrec.intldesg.ljust(8, " ") + " ")
# Add epoch year and days in YYDDD.DDDDDDDD format
- append(str(satrec.epochyr).zfill(2) + "{:012.8f}".format(satrec.epochdays) + " ")
+ epochyr = satrec.epochyr
+ # Undo non-standard 4-digit year for old satrec objects
+ epochyr %= 100
+ append(str(epochyr).zfill(2) + "{:012.8f}".format(satrec.epochdays) + " ")
# Add First Time Derivative of the Mean Motion (don't use "+")
append("{0: 8.8f}".format(satrec.ndot * (_xpdotp * 1440.0)).replace("0", "", 1) + " ")
| brandon-rhodes/python-sgp4 | c959af39f32d4ba9a88b8e9cfeed394d77a86e51 | diff --git a/sgp4/tests.py b/sgp4/tests.py
index ba63b1d..3fb6d41 100644
--- a/sgp4/tests.py
+++ b/sgp4/tests.py
@@ -198,14 +198,18 @@ def test_tle_export():
line1 = line1[:69]
line2 = line2[:69]
satrec = Satrec.twoline2rv(line1, line2)
+ satrec_old = io.twoline2rv(line1, line2, wgs72)
# Generate TLE from satrec
out_line1, out_line2 = export_tle(satrec)
+ out_line1_old, out_line2_old = export_tle(satrec_old)
if satrec.satnum not in expected_errs_line1:
assertEqual(out_line1, line1)
+ assertEqual(out_line1_old, line1)
if satrec.satnum not in expected_errs_line2:
assertEqual(out_line2, line2)
+ assertEqual(out_line2_old, line2)
def test_export_tle_raises_error_for_out_of_range_angles():
# See https://github.com/brandon-rhodes/python-sgp4/issues/70
| export_tle roundtrip issues
While studying #70, I discovered a roundtrip problem with `export_tle`:
```
>>> lines = """1 46272U 20061A 20311.82207422 .00000083 00000-0 68546-5 0 9999
... 2 46272 97.3995 23.7575 0006827 352.5560 7.5569 15.20743438 9810"""
>>> print(lines)
1 46272U 20061A 20311.82207422 .00000083 00000-0 68546-5 0 9999
2 46272 97.3995 23.7575 0006827 352.5560 7.5569 15.20743438 9810
>>> sat = twoline2rv(*lines.splitlines(), wgs84)
>>> print("\n".join(export_tle(sat)))
1 46272U 20061A 2020311.82207422 .00000083 00000-0 68546-5 0 9993
2 46272 97.3995 23.7575 0006827 352.5560 7.5569 15.20743438 9810
```
And I think it also happens with other examples in https://github.com/brandon-rhodes/python-sgp4/blob/master/sgp4/SGP4-VER.TLE. | 0.0 | c959af39f32d4ba9a88b8e9cfeed394d77a86e51 | [
"sgp4/tests.py::test_tle_export"
]
| [
"sgp4/tests.py::test_satrec_built_with_twoline2rv",
"sgp4/tests.py::test_legacy_built_with_twoline2rv",
"sgp4/tests.py::test_satrec_initialized_with_sgp4init",
"sgp4/tests.py::test_satrec_initialized_with_sgp4init_in_afspc_mode",
"sgp4/tests.py::test_legacy_initialized_with_sgp4init",
"sgp4/tests.py::test_days2mdhms",
"sgp4/tests.py::test_jday2",
"sgp4/tests.py::test_jday_datetime",
"sgp4/tests.py::test_sat_epoch_datetime",
"sgp4/tests.py::test_good_tle_checksum",
"sgp4/tests.py::test_bad_tle_checksum",
"sgp4/tests.py::test_export_tle_raises_error_for_out_of_range_angles",
"sgp4/tests.py::test_all_three_gravity_models_with_twoline2rv",
"sgp4/tests.py::test_all_three_gravity_models_with_sgp4init",
"sgp4/tests.py::test_intldesg_with_6_characters",
"sgp4/tests.py::test_intldesg_with_7_characters",
"sgp4/tests.py::test_hyperbolic_orbit",
"sgp4/tests.py::test_correct_epochyr",
"sgp4/tests.py::test_legacy_epochyr",
"sgp4/tests.py::test_support_for_old_no_attribute",
"sgp4/tests.py::test_months_and_days",
"sgp4/tests.py::test_december_32",
"sgp4/tests.py::test_bad_first_line",
"sgp4/tests.py::test_bad_second_line",
"sgp4/tests.py::test_mismatched_lines",
"sgp4/tests.py::test_satrec_against_tcppver_using_julian_dates",
"sgp4/tests.py::test_satrec_against_tcppver_using_tsince",
"sgp4/tests.py::test_legacy_against_tcppver",
"sgp4/tests.py::test_omm_xml_matches_old_tle",
"sgp4/tests.py::test_omm_csv_matches_old_tle"
]
| {
"failed_lite_validators": [
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false
} | 2020-11-14 14:06:18+00:00 | mit | 1,429 |
|
brettcannon__mousebender-100 | diff --git a/docs/simple.rst b/docs/simple.rst
index 17ddd96..50eaa12 100644
--- a/docs/simple.rst
+++ b/docs/simple.rst
@@ -21,6 +21,15 @@
.. versionadded:: 2022.1.0
+.. autoexception:: UnsupportedAPIVersion
+
+ .. versionadded:: 2023.0.0
+
+
+.. autoexception:: APIVersionWarning
+
+ .. versionadded:: 2023.0.0
+
.. autoexception:: UnsupportedMIMEType
.. versionadded:: 2022.1.0
diff --git a/mousebender/simple.py b/mousebender/simple.py
index 9659eee..7c2f1f4 100644
--- a/mousebender/simple.py
+++ b/mousebender/simple.py
@@ -5,9 +5,9 @@ This module helps with the JSON-based Simple repository API by providing
responses, functions are provided to convert the HTML to the equivalent JSON
response.
-This module implements :pep:`503`, :pep:`592`, :pep:`658`, and :pep:`691` of the
-:external:ref:`Simple repository API <simple-repository-api>` (it forgoes
-:pep:`629` as :pep:`691` makes it obsolete).
+This module implements :pep:`503`, :pep:`592`, :pep:`629`, :pep:`658`,
+:pep:`691`, and :pep:`700` of the
+:external:ref:`Simple repository API <simple-repository-api>`.
"""
from __future__ import annotations
@@ -16,6 +16,7 @@ import html
import html.parser
import json
import urllib.parse
+import warnings
from typing import Any, Dict, List, Optional, Union
import packaging.utils
@@ -47,6 +48,22 @@ ACCEPT_SUPPORTED = ", ".join(
:func:`parse_project_details` support."""
+class UnsupportedAPIVersion(Exception):
+ """The major version of an API response is not supported."""
+
+ def __init__(self, version: str) -> None:
+ """Initialize the exception with a message based on the provided version."""
+ super().__init__(f"Unsupported API major version: {version!r}")
+
+
+class APIVersionWarning(Warning):
+ """The minor version of an API response is not supported."""
+
+ def __init__(self, version: str) -> None:
+ """Initialize the warning with a message based on the provided version."""
+ super().__init__(f"Unsupported API minor version: {version!r}")
+
+
class UnsupportedMIMEType(Exception):
"""An unsupported MIME type was provided in a ``Content-Type`` header."""
@@ -139,6 +156,21 @@ class ProjectDetails_1_1(TypedDict):
ProjectDetails: TypeAlias = Union[ProjectDetails_1_0, ProjectDetails_1_1]
+def _check_version(tag: str, attrs: Dict[str, Optional[str]]) -> None:
+ if (
+ tag == "meta"
+ and attrs.get("name") == "pypi:repository-version"
+ and "content" in attrs
+ and attrs["content"]
+ ):
+ version = attrs["content"]
+ major_version, minor_version = map(int, version.split("."))
+ if major_version != 1:
+ raise UnsupportedAPIVersion(version)
+ elif minor_version > 1:
+ warnings.warn(APIVersionWarning(version), stacklevel=7)
+
+
class _SimpleIndexHTMLParser(html.parser.HTMLParser):
# PEP 503:
# Within a repository, the root URL (/) MUST be a valid HTML5 page with a
@@ -150,8 +182,9 @@ class _SimpleIndexHTMLParser(html.parser.HTMLParser):
self.names: List[str] = []
def handle_starttag(
- self, tag: str, _attrs_list: list[tuple[str, Optional[str]]]
+ self, tag: str, attrs_list: list[tuple[str, Optional[str]]]
) -> None:
+ _check_version(tag, dict(attrs_list))
if tag != "a":
return
self._parsing_anchor = True
@@ -186,6 +219,7 @@ class _ArchiveLinkHTMLParser(html.parser.HTMLParser):
self, tag: str, attrs_list: list[tuple[str, Optional[str]]]
) -> None:
attrs = dict(attrs_list)
+ _check_version(tag, attrs)
if tag != "a":
return
# PEP 503:
diff --git a/noxfile.py b/noxfile.py
index 506227e..a1255a0 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -3,7 +3,7 @@
All sessions prefixed with `check_` are non-destructive.
"""
-import nox
+import nox # type: ignore[import]
python_versions = ["3.7", "3.8", "3.9", "3.10", "3.11"]
diff --git a/pyproject.toml b/pyproject.toml
index 16dbff0..846c4eb 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -5,7 +5,7 @@ build-backend = "flit_core.buildapi"
[project]
name = "mousebender"
-version = "2022.1.0"
+version = "2023.0.0"
authors = [
{ name = "Brett Cannon", email = "[email protected]" },
{ name = "Derek Keeler", email = "[email protected]" },
@@ -54,7 +54,7 @@ report.fail_under = 100
profile = "black"
[tool.ruff]
-select = ["E", "F", "W", "D", "C", "B", "A", "ANN", "RUF", "I"]
+select = ["E", "F", "W", "D", "B", "A", "ANN", "RUF", "I"]
ignore = ["E501", "D203", "D213", "ANN101"]
[tool.ruff.per-file-ignores]
| brettcannon/mousebender | 28e05b408381375a5ce5498359cc01545becc5ae | diff --git a/tests/test_simple.py b/tests/test_simple.py
index d9cc92e..d44c500 100644
--- a/tests/test_simple.py
+++ b/tests/test_simple.py
@@ -1,5 +1,6 @@
"""Tests for mousebender.simple."""
import json
+import warnings
from typing import Dict, Union
import importlib_resources
@@ -331,6 +332,55 @@ class TestProjectDetailsParsing:
assert details["files"][0].get("yanked") == expected
+class TestPEP629Versioning:
+ @pytest.mark.parametrize(["version"], [("",), ("1.0",), ("1.1",)])
+ def test_supported_versions(self, version):
+ if not version:
+ meta_tag = ""
+ else:
+ meta_tag = f'<meta name="pypi:repository-version" content="{version}">'
+
+ index_html = (
+ f"<!DOCTYPE html><html><head>{meta_tag}</head>"
+ '<body><a href="/spamspamspam/">spamspamspam</a></body></html>'
+ )
+
+ assert simple.from_project_index_html(index_html)
+
+ details_html = (
+ f"<!DOCTYPE html><html><head>{meta_tag}</head>"
+ '<body><a href="mousebender-2022.1.0-py3-none-any.whl">'
+ "mousebender-2022.1.0-py3-none-any.whl/a></body></html>"
+ )
+
+ assert simple.from_project_details_html(details_html, "mousebender")
+
+ @pytest.mark.parametrize(["version"], [("0.1",), ("2.0",), ("2.1",), ("10.0",)])
+ def test_unsupported_major_versions(self, version):
+ meta_tag = f'<meta name="pypi:repository-version" content="{version}">'
+ index_html = (
+ f"<!DOCTYPE html><html><head>{meta_tag}</head>"
+ '<body><a href="/spamspamspam/">spamspamspam</a></body></html>'
+ )
+
+ with pytest.raises(simple.UnsupportedAPIVersion):
+ simple.from_project_index_html(index_html)
+
+ @pytest.mark.parametrize(["minor_version"], [("2",), ("10",)])
+ def test_unsupported_minor_version(self, minor_version):
+ meta_tag = f'<meta name="pypi:repository-version" content="1.{minor_version}">'
+ details_html = (
+ f"<!DOCTYPE html><html><head>{meta_tag}</head>"
+ '<body><a href="mousebender-2022.1.0-py3-none-any.whl">'
+ "mousebender-2022.1.0-py3-none-any.whl/a></body></html>"
+ )
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("error")
+ with pytest.raises(simple.APIVersionWarning):
+ simple.from_project_details_html(details_html, "mousebender")
+
+
class TestPEP658Metadata:
def test_default(self):
html = '<a href="spam-1.2.3-py3.none.any.whl">spam-1.2.3-py3.none.any.whl</a>'
| Support PEP 629 and 700 for HTML responses
It technically doesn't change the outcome, but https://peps.python.org/pep-0700/#specification says HTML is supported, so proper detection of HTML versions is necessary (i.e. https://peps.python.org/pep-0629/ ). | 0.0 | 28e05b408381375a5ce5498359cc01545becc5ae | [
"tests/test_simple.py::TestPEP629Versioning::test_unsupported_major_versions[0.1]",
"tests/test_simple.py::TestPEP629Versioning::test_unsupported_major_versions[2.0]",
"tests/test_simple.py::TestPEP629Versioning::test_unsupported_major_versions[2.1]",
"tests/test_simple.py::TestPEP629Versioning::test_unsupported_major_versions[10.0]",
"tests/test_simple.py::TestPEP629Versioning::test_unsupported_minor_version[2]",
"tests/test_simple.py::TestPEP629Versioning::test_unsupported_minor_version[10]"
]
| [
"tests/test_simple.py::TestProjectURLConstruction::test_url_joining[/simple/]",
"tests/test_simple.py::TestProjectURLConstruction::test_url_joining[/simple]",
"tests/test_simple.py::TestProjectURLConstruction::test_project_name_lowercased",
"tests/test_simple.py::TestProjectURLConstruction::test_project_name_normalized",
"tests/test_simple.py::TestProjectURLConstruction::test_only_project_name_in_url_normalized",
"tests/test_simple.py::TestProjectURLConstruction::test_no_base_url",
"tests/test_simple.py::TestRepoIndexParsing::test_full_parse[pypi-212862-numpy]",
"tests/test_simple.py::TestRepoIndexParsing::test_full_parse[piwheels-263872-django-node]",
"tests/test_simple.py::TestRepoIndexParsing::test_no_cdata",
"tests/test_simple.py::TestRepoIndexParsing::test_project_name_not_normalized",
"tests/test_simple.py::TestProjectDetailsParsing::test_full_parse[numpy-1402-expected_file_details0]",
"tests/test_simple.py::TestProjectDetailsParsing::test_full_parse[pulpcore-client-370-expected_file_details1]",
"tests/test_simple.py::TestProjectDetailsParsing::test_full_parse[pytorch-522-expected_file_details2]",
"tests/test_simple.py::TestProjectDetailsParsing::test_full_parse[aicoe-tensorflow-15-expected_file_details3]",
"tests/test_simple.py::TestProjectDetailsParsing::test_full_parse[numpy-piwheels-316-expected_file_details4]",
"tests/test_simple.py::TestProjectDetailsParsing::test_filename[<a",
"tests/test_simple.py::TestProjectDetailsParsing::test_url[<a",
"tests/test_simple.py::TestProjectDetailsParsing::test_no_href",
"tests/test_simple.py::TestProjectDetailsParsing::test_requires_python[<a",
"tests/test_simple.py::TestProjectDetailsParsing::test_hashes[<a",
"tests/test_simple.py::TestProjectDetailsParsing::test_gpg_sig[<a",
"tests/test_simple.py::TestProjectDetailsParsing::test_yanked[sole",
"tests/test_simple.py::TestProjectDetailsParsing::test_yanked[`data-yanked`",
"tests/test_simple.py::TestProjectDetailsParsing::test_yanked[`data-yanked",
"tests/test_simple.py::TestProjectDetailsParsing::test_yanked[no",
"tests/test_simple.py::TestPEP629Versioning::test_supported_versions[]",
"tests/test_simple.py::TestPEP629Versioning::test_supported_versions[1.0]",
"tests/test_simple.py::TestPEP629Versioning::test_supported_versions[1.1]",
"tests/test_simple.py::TestPEP658Metadata::test_default",
"tests/test_simple.py::TestPEP658Metadata::test_attribute_only[data-dist-info-metadata]",
"tests/test_simple.py::TestPEP658Metadata::test_attribute_only[data-dist-info-metadata=true]",
"tests/test_simple.py::TestPEP658Metadata::test_hash[data-dist-info-metadata=\"sha256=abcdef\"]",
"tests/test_simple.py::TestPEP658Metadata::test_hash[data-dist-info-metadata=\"SHA256=abcdef\"]",
"tests/test_simple.py::TestParseProjectIndex::test_json",
"tests/test_simple.py::TestParseProjectIndex::test_html[application/vnd.pypi.simple.v1+html]",
"tests/test_simple.py::TestParseProjectIndex::test_html[text/html]",
"tests/test_simple.py::TestParseProjectIndex::test_invalid_content_type",
"tests/test_simple.py::TestParseProjectDetails::test_json",
"tests/test_simple.py::TestParseProjectDetails::test_html[application/vnd.pypi.simple.v1+html]",
"tests/test_simple.py::TestParseProjectDetails::test_html[text/html]",
"tests/test_simple.py::TestParseProjectDetails::test_invalid_content_type"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2023-04-12 00:09:53+00:00 | bsd-3-clause | 1,430 |
|
brettcannon__mousebender-70 | diff --git a/.vscode/settings.json b/.vscode/settings.json
index 80b124b..ce66456 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -4,5 +4,6 @@
"tests"
],
"python.formatting.provider": "black",
- "python.linting.flake8Enabled": true
+ "python.linting.flake8Enabled": true,
+ "python.linting.enabled": true
}
diff --git a/mousebender/simple.py b/mousebender/simple.py
index e926b46..1308a1d 100644
--- a/mousebender/simple.py
+++ b/mousebender/simple.py
@@ -1,16 +1,15 @@
"""Parsing for PEP 503 -- Simple Repository API."""
import html
import html.parser
-import re
import urllib.parse
import warnings
-from typing import List, Optional, Tuple
+from typing import Any, Dict, List, Optional, Tuple
import attr
import packaging.specifiers
+import packaging.utils
-_NORMALIZE_RE = re.compile(r"[-_.]+")
PYPI_INDEX = "https://pypi.org/simple/"
@@ -32,12 +31,10 @@ class UnsupportedVersionWarning(Warning, UnsupportedVersion):
"""
-def create_project_url(base_url, project_name):
+def create_project_url(base_url: str, project_name: str) -> str:
"""Construct the project URL for a repository following PEP 503."""
if base_url and not base_url.endswith("/"):
- base_url += "/"
- # https://www.python.org/dev/peps/pep-0503/#normalized-names
- normalized_project_name = _NORMALIZE_RE.sub("-", project_name).lower()
+ base_url += "/" # Normalize for easier use w/ str.join() later.
# PEP 503:
# The format of this URL is /<project>/ where the <project> is replaced by
# the normalized name for that project, so a project named "HolyGrail" would
@@ -45,7 +42,7 @@ def create_project_url(base_url, project_name):
#
# All URLs which respond with an HTML5 page MUST end with a / and the
# repository SHOULD redirect the URLs without a / to add a / to the end.
- return "".join([base_url, normalized_project_name, "/"])
+ return "".join([base_url, packaging.utils.canonicalize_name(project_name), "/"])
def _normalize_project_url(url):
@@ -118,26 +115,55 @@ class _SimpleIndexHTMLParser(html.parser.HTMLParser):
self._name = data
-def parse_repo_index(html):
+def parse_repo_index(html: str) -> Dict[str, str]:
"""Parse the HTML of a repository index page."""
parser = _SimpleIndexHTMLParser()
parser.feed(html)
return parser.mapping
[email protected]
[email protected](kw_only=True)
class ArchiveLink:
"""Data related to a link to an archive file."""
filename: str
url: str
- requires_python: packaging.specifiers.SpecifierSet
+ requires_python: packaging.specifiers.SpecifierSet = (
+ packaging.specifiers.SpecifierSet("")
+ )
hash_: Optional[Tuple[str, str]] = None
gpg_sig: Optional[bool] = None
- yanked: Tuple[bool, str] = (False, "")
+ yanked: Optional[str] = None # Is `""` if no message provided.
metadata: Optional[Tuple[str, str]] = None # No hash leads to a `("", "")` tuple.
+ def __str__(self) -> str:
+ attrs = []
+ if self.requires_python:
+ requires_str = str(self.requires_python)
+ escaped_requires = html.escape(requires_str)
+ attrs.append(f'data-requires-python="{escaped_requires}"')
+ if self.gpg_sig is not None:
+ attrs.append(f"data-gpg-sig={str(self.gpg_sig).lower()}")
+ if self.yanked is not None:
+ if self.yanked:
+ attrs.append(f'data-yanked="{self.yanked}"')
+ else:
+ attrs.append("data-yanked")
+ if self.metadata:
+ hash_algorithm, hash_value = self.metadata
+ if hash_algorithm:
+ attrs.append(f'data-dist-info-metadata="{hash_algorithm}={hash_value}"')
+ else:
+ attrs.append("data-dist-info-metadata")
+
+ url = self.url
+ if self.hash_:
+ hash_algorithm, hash_value = self.hash_
+ url += f"#{hash_algorithm}={hash_value}"
+
+ return f'<a href="{url}" {" ".join(attrs)}>{self.filename}</a>'
+
class _ArchiveLinkHTMLParser(html.parser.HTMLParser):
def __init__(self):
@@ -160,34 +186,36 @@ class _ArchiveLinkHTMLParser(html.parser.HTMLParser):
_, _, raw_filename = parsed_url.path.rpartition("/")
filename = urllib.parse.unquote(raw_filename)
url = urllib.parse.urlunparse((*parsed_url[:5], ""))
- hash_ = None
+ args: Dict[str, Any] = {"filename": filename, "url": url}
# PEP 503:
# The URL SHOULD include a hash in the form of a URL fragment with the
# following syntax: #<hashname>=<hashvalue> ...
if parsed_url.fragment:
hash_algo, hash_value = parsed_url.fragment.split("=", 1)
- hash_ = hash_algo.lower(), hash_value
+ args["hash_"] = hash_algo.lower(), hash_value
# PEP 503:
# A repository MAY include a data-requires-python attribute on a file
# link. This exposes the Requires-Python metadata field ...
# In the attribute value, < and > have to be HTML encoded as < and
# >, respectively.
- requires_python_data = html.unescape(attrs.get("data-requires-python", ""))
- requires_python = packaging.specifiers.SpecifierSet(requires_python_data)
+ if "data-requires-python" in attrs:
+ requires_python_data = html.unescape(attrs["data-requires-python"])
+ args["requires_python"] = packaging.specifiers.SpecifierSet(
+ requires_python_data
+ )
# PEP 503:
# A repository MAY include a data-gpg-sig attribute on a file link with
# a value of either true or false ...
- gpg_sig = attrs.get("data-gpg-sig")
- if gpg_sig:
- gpg_sig = gpg_sig == "true"
+ if "data-gpg-sig" in attrs:
+ args["gpg_sig"] = attrs["data-gpg-sig"] == "true"
# PEP 592:
# Links in the simple repository MAY have a data-yanked attribute which
# may have no value, or may have an arbitrary string as a value.
- yanked = "data-yanked" in attrs, attrs.get("data-yanked") or ""
+ if "data-yanked" in attrs:
+ args["yanked"] = attrs.get("data-yanked") or ""
# PEP 658:
# ... each anchor tag pointing to a distribution MAY have a
# data-dist-info-metadata attribute.
- metadata = None
if "data-dist-info-metadata" in attrs:
metadata = attrs.get("data-dist-info-metadata")
if metadata and metadata != "true":
@@ -202,12 +230,9 @@ class _ArchiveLinkHTMLParser(html.parser.HTMLParser):
# The repository MAY use true as the attribute's value if a hash
# is unavailable.
metadata = "", ""
+ args["metadata"] = metadata
- self.archive_links.append(
- ArchiveLink(
- filename, url, requires_python, hash_, gpg_sig, yanked, metadata
- )
- )
+ self.archive_links.append(ArchiveLink(**args))
def parse_archive_links(html: str) -> List[ArchiveLink]:
| brettcannon/mousebender | 13cb69c76176dd7b6b8d7b91cf04e54373a87b8a | diff --git a/tests/test_simple.py b/tests/test_simple.py
index d9e53f4..2f403aa 100644
--- a/tests/test_simple.py
+++ b/tests/test_simple.py
@@ -105,6 +105,63 @@ class TestRepoIndexParsing:
assert index["django-node"] == "django-node/"
+class TestArchiveLink:
+
+ """Tests for mousebender.simple.ArchiveLink."""
+
+ @pytest.mark.parametrize(
+ "archive_link",
+ [
+ simple.ArchiveLink(filename="B", url="A/B"),
+ simple.ArchiveLink(
+ filename="B",
+ url="A/B",
+ requires_python=packaging.specifiers.SpecifierSet(">=3.6"),
+ ),
+ simple.ArchiveLink(
+ filename="B",
+ url="A/B",
+ hash_=("sha256", "ABCDEF"),
+ ),
+ simple.ArchiveLink(
+ filename="B",
+ url="A/B",
+ gpg_sig=True,
+ ),
+ simple.ArchiveLink(filename="B", url="A/B", yanked=""),
+ simple.ArchiveLink(filename="B", url="A/B", yanked="oops!"),
+ simple.ArchiveLink(filename="B", url="A/B", metadata=("", "")),
+ simple.ArchiveLink(filename="B", url="A/B", metadata=("sha256", "ABCDEF")),
+ simple.ArchiveLink(
+ filename="B",
+ url="A/B",
+ requires_python=packaging.specifiers.SpecifierSet(">=3.6"),
+ hash_=("sha256", "ABCDEF"),
+ gpg_sig=True,
+ yanked="oops!",
+ metadata=("sha512", "GHIJKL"),
+ ),
+ ],
+ )
+ def test_str(self, archive_link):
+ """Make sure __str__ roundtrips."""
+ html = str(archive_link)
+ roundtrip = simple.parse_archive_links(html)
+ assert len(roundtrip) == 1
+ print(html)
+ print(roundtrip[0])
+ assert archive_link == roundtrip[0]
+
+ def test_str_escaping(self):
+ """data-requires-python must have an escaped value."""
+ archive_link = simple.ArchiveLink(
+ filename="B",
+ url="A/B",
+ requires_python=packaging.specifiers.SpecifierSet(">=3.6"),
+ )
+ assert "gt;=3.6" in str(archive_link)
+
+
class TestParseArchiveLinks:
"""Tests for mousebender.simple.parse_archive_links()."""
@@ -116,66 +173,76 @@ class TestParseArchiveLinks:
"numpy",
1402,
simple.ArchiveLink(
- "numpy-1.13.0rc1-cp36-none-win_amd64.whl",
- "https://files.pythonhosted.org/packages/5c/2e/5c0eee0635035a7e0646734e2b9388e17a97f6f2087e15141a218b6f2b6d/numpy-1.13.0rc1-cp36-none-win_amd64.whl",
- packaging.specifiers.SpecifierSet(
+ filename="numpy-1.13.0rc1-cp36-none-win_amd64.whl",
+ url="https://files.pythonhosted.org/packages/5c/2e/5c0eee0635035a7e0646734e2b9388e17a97f6f2087e15141a218b6f2b6d/numpy-1.13.0rc1-cp36-none-win_amd64.whl",
+ requires_python=packaging.specifiers.SpecifierSet(
">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*"
),
- (
+ hash_=(
"sha256",
"8e8e1ccf025c8b6a821f75086a364a68d9e1877519a35bf8facec9e5120836f4",
),
- None,
+ gpg_sig=None,
+ yanked=None,
+ metadata=None,
),
),
(
"pulpcore-client",
370,
simple.ArchiveLink(
- "pulpcore_client-3.1.0.dev1578940535-py3-none-any.whl",
- "https://files.pythonhosted.org/packages/ca/7e/e14e41dc4bc60208f597f346d57755636e882be7509179c4e7c11f2c60a9/pulpcore_client-3.1.0.dev1578940535-py3-none-any.whl",
- packaging.specifiers.SpecifierSet(),
- (
+ filename="pulpcore_client-3.1.0.dev1578940535-py3-none-any.whl",
+ url="https://files.pythonhosted.org/packages/ca/7e/e14e41dc4bc60208f597f346d57755636e882be7509179c4e7c11f2c60a9/pulpcore_client-3.1.0.dev1578940535-py3-none-any.whl",
+ requires_python=packaging.specifiers.SpecifierSet(),
+ hash_=(
"sha256",
"83a3759d7b6af33083b0d4893d53615fc045cbad9adde68a8df02e25b1862bc6",
),
- None,
+ gpg_sig=None,
+ yanked=None,
+ metadata=None,
),
),
(
"pytorch",
522,
simple.ArchiveLink(
- "torchvision-0.5.0+cu100-cp36-cp36m-linux_x86_64.whl",
- "cu100/torchvision-0.5.0%2Bcu100-cp36-cp36m-linux_x86_64.whl",
- packaging.specifiers.SpecifierSet(),
- None,
- None,
+ filename="torchvision-0.5.0+cu100-cp36-cp36m-linux_x86_64.whl",
+ url="cu100/torchvision-0.5.0%2Bcu100-cp36-cp36m-linux_x86_64.whl",
+ requires_python=packaging.specifiers.SpecifierSet(),
+ hash_=None,
+ gpg_sig=None,
+ yanked=None,
+ metadata=None,
),
),
(
"AICoE-tensorflow",
15,
simple.ArchiveLink(
- "tensorflow-2.0.0-cp37-cp37m-linux_x86_64.whl",
- "tensorflow-2.0.0-cp37-cp37m-linux_x86_64.whl",
- packaging.specifiers.SpecifierSet(),
- None,
- None,
+ filename="tensorflow-2.0.0-cp37-cp37m-linux_x86_64.whl",
+ url="tensorflow-2.0.0-cp37-cp37m-linux_x86_64.whl",
+ requires_python=packaging.specifiers.SpecifierSet(),
+ hash_=None,
+ gpg_sig=None,
+ yanked=None,
+ metadata=None,
),
),
(
"numpy-piwheels",
316,
simple.ArchiveLink(
- "numpy-1.10.4-cp35-cp35m-linux_armv7l.whl",
- "numpy-1.10.4-cp35-cp35m-linux_armv7l.whl",
- packaging.specifiers.SpecifierSet(),
- (
+ filename="numpy-1.10.4-cp35-cp35m-linux_armv7l.whl",
+ url="numpy-1.10.4-cp35-cp35m-linux_armv7l.whl",
+ requires_python=packaging.specifiers.SpecifierSet(),
+ hash_=(
"sha256",
"5768279588a4766adb0211bbaa0f5857be38483c5aafe5d1caecbcd32749966e",
),
- None,
+ gpg_sig=None,
+ yanked=None,
+ metadata=None,
),
),
],
@@ -298,19 +365,19 @@ class TestParseArchiveLinks:
[
(
'<a href="spam-1.2.3-py3.none.any.whl" data-yanked>spam-1.2.3-py3.none.any.whl</a>',
- (True, ""),
+ "",
),
(
'<a href="spam-1.2.3-py3.none.any.whl" data-yanked="oops!">spam-1.2.3-py3.none.any.whl</a>',
- (True, "oops!"),
+ "oops!",
),
(
'<a href="spam-1.2.3-py3.none.any.whl" data-yanked="">spam-1.2.3-py3.none.any.whl</a>',
- (True, ""),
+ "",
),
(
'<a href="spam-1.2.3-py3.none.any.whl">spam-1.2.3-py3.none.any.whl</a>',
- (False, ""),
+ None,
),
],
)
| Add type hints
`NewType` might be neat to use, but that also assumes the values don't leave the package since it might require users to cast things.
https://docs.python.org/3/library/typing.html#distinct
https://mypy.readthedocs.io/en/stable/more_types.html#newtypes | 0.0 | 13cb69c76176dd7b6b8d7b91cf04e54373a87b8a | [
"tests/test_simple.py::TestProjectURLConstruction::test_url_joining[/simple/]",
"tests/test_simple.py::TestProjectURLConstruction::test_url_joining[/simple]",
"tests/test_simple.py::TestProjectURLConstruction::test_project_name_lowercased",
"tests/test_simple.py::TestProjectURLConstruction::test_project_name_normalized",
"tests/test_simple.py::TestProjectURLConstruction::test_only_project_name_in_url_normalized",
"tests/test_simple.py::TestProjectURLConstruction::test_no_base_url",
"tests/test_simple.py::TestRepoIndexParsing::test_full_parse[pypi-212862-expected_item0]",
"tests/test_simple.py::TestRepoIndexParsing::test_full_parse[piwheels-263872-expected_item1]",
"tests/test_simple.py::TestRepoIndexParsing::test_no_cdata",
"tests/test_simple.py::TestRepoIndexParsing::test_no_href",
"tests/test_simple.py::TestRepoIndexParsing::test_project_url_normalization_complete",
"tests/test_simple.py::TestRepoIndexParsing::test_project_name_not_normalized",
"tests/test_simple.py::TestRepoIndexParsing::test_relative_url",
"tests/test_simple.py::TestArchiveLink::test_str[archive_link0]",
"tests/test_simple.py::TestArchiveLink::test_str[archive_link1]",
"tests/test_simple.py::TestArchiveLink::test_str[archive_link2]",
"tests/test_simple.py::TestArchiveLink::test_str[archive_link3]",
"tests/test_simple.py::TestArchiveLink::test_str[archive_link4]",
"tests/test_simple.py::TestArchiveLink::test_str[archive_link5]",
"tests/test_simple.py::TestArchiveLink::test_str[archive_link6]",
"tests/test_simple.py::TestArchiveLink::test_str[archive_link7]",
"tests/test_simple.py::TestArchiveLink::test_str[archive_link8]",
"tests/test_simple.py::TestArchiveLink::test_str_escaping",
"tests/test_simple.py::TestParseArchiveLinks::test_full_parse[numpy-1402-expected_archive_link0]",
"tests/test_simple.py::TestParseArchiveLinks::test_full_parse[pulpcore-client-370-expected_archive_link1]",
"tests/test_simple.py::TestParseArchiveLinks::test_full_parse[pytorch-522-expected_archive_link2]",
"tests/test_simple.py::TestParseArchiveLinks::test_full_parse[AICoE-tensorflow-15-expected_archive_link3]",
"tests/test_simple.py::TestParseArchiveLinks::test_full_parse[numpy-piwheels-316-expected_archive_link4]",
"tests/test_simple.py::TestParseArchiveLinks::test_filename[<a",
"tests/test_simple.py::TestParseArchiveLinks::test_url[<a",
"tests/test_simple.py::TestParseArchiveLinks::test_requires_python[<a",
"tests/test_simple.py::TestParseArchiveLinks::test_hash_[<a",
"tests/test_simple.py::TestParseArchiveLinks::test_gpg_sig[<a",
"tests/test_simple.py::TestParseArchiveLinks::test_yanked[<a",
"tests/test_simple.py::TestPEP629Versioning::test_unspecified[parse_repo_index]",
"tests/test_simple.py::TestPEP629Versioning::test_unspecified[parse_archive_links]",
"tests/test_simple.py::TestPEP629Versioning::test_equal[parse_repo_index]",
"tests/test_simple.py::TestPEP629Versioning::test_equal[parse_archive_links]",
"tests/test_simple.py::TestPEP629Versioning::test_newer_minor[parse_repo_index]",
"tests/test_simple.py::TestPEP629Versioning::test_newer_minor[parse_archive_links]",
"tests/test_simple.py::TestPEP629Versioning::test_newer_major[parse_repo_index]",
"tests/test_simple.py::TestPEP629Versioning::test_newer_major[parse_archive_links]",
"tests/test_simple.py::TestPEP629Versioning::test_older_minor[parse_repo_index]",
"tests/test_simple.py::TestPEP629Versioning::test_older_minor[parse_archive_links]",
"tests/test_simple.py::TestPEP658Metadata::test_default",
"tests/test_simple.py::TestPEP658Metadata::test_attribute_only[data-dist-info-metadata]",
"tests/test_simple.py::TestPEP658Metadata::test_attribute_only[data-dist-info-metadata=true]",
"tests/test_simple.py::TestPEP658Metadata::test_hash[data-dist-info-metadata=\"sha256=abcdef\"]",
"tests/test_simple.py::TestPEP658Metadata::test_hash[data-dist-info-metadata=\"SHA256=abcdef\"]"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-12-15 03:56:34+00:00 | bsd-3-clause | 1,431 |
|
brettcannon__mousebender-79 | diff --git a/.flake8 b/.flake8
deleted file mode 100644
index fd1cee7..0000000
--- a/.flake8
+++ /dev/null
@@ -1,3 +0,0 @@
-[flake8]
-ignore = C,E,W
-exclude = .venv
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index d6f16f7..3c3e6f3 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -12,7 +12,7 @@ jobs:
- name: Lint code
run: |
- pipx run flake8
+ pipx run nox -s check_code
- name: Check format
run: |
@@ -24,7 +24,7 @@ jobs:
- name: Build wheel
run: |
- pipx run flit build
+ pipx run nox -s build
tests:
name: Test Python ${{ matrix.python_version }}
diff --git a/.vscode/extensions.json b/.vscode/extensions.json
index f42d932..eb2311f 100644
--- a/.vscode/extensions.json
+++ b/.vscode/extensions.json
@@ -3,6 +3,6 @@
"ms-python.python",
"ms-python.black-formatter",
"ms-python.isort",
- "ms-python.flake8"
+ "charliermarsh.ruff"
]
}
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 27234e4..6160836 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -1,7 +1,12 @@
{
+ "[python]": {
+ "editor.defaultFormatter": "ms-python.black-formatter"
+ },
"python.testing.pytestEnabled": true,
"python.testing.pytestArgs": [
"tests"
],
- "editor.defaultFormatter": "ms-python.black-formatter"
-}
\ No newline at end of file
+ "ruff.logLevel": "warn",
+ "isort.check": true,
+ "isort.logLevel": "warn"
+}
diff --git a/mousebender/simple.py b/mousebender/simple.py
index 51310d9..f840732 100644
--- a/mousebender/simple.py
+++ b/mousebender/simple.py
@@ -1,9 +1,20 @@
+"""Implement the Simple Repository API.
+
+This encompasses PEPs:
+
+1. 503: Simple Repository API
+2. 592: Adding “Yank” Support to the Simple API
+3. 629: Versioning PyPI's Simple API
+4. 658: Serve Distribution Metadata in the Simple Repository API
+5. 691: JSON-based Simple API for Python Package Indexes
+
+"""
from __future__ import annotations
import html
import html.parser
import urllib.parse
-from typing import Dict, List, Union
+from typing import Any, Dict, List, Optional, Union
import packaging.specifiers
import packaging.utils
@@ -30,6 +41,8 @@ _Meta = TypedDict("_Meta", {"api-version": Literal["1.0"]})
class ProjectIndex(TypedDict):
+ """A TypedDict representing a project index."""
+
meta: _Meta
projects: List[Dict[Literal["name"], str]]
@@ -48,41 +61,46 @@ _OptionalProjectFileDetails = TypedDict(
class ProjectFileDetails(_OptionalProjectFileDetails):
+ """A TypedDict representing a project file's details."""
+
filename: str
url: str
hashes: _HashesDict
class ProjectDetails(TypedDict):
+ """A TypedDict representing a project's detail."""
+
meta: _Meta
name: packaging.utils.NormalizedName
files: list[ProjectFileDetails]
class _SimpleIndexHTMLParser(html.parser.HTMLParser):
-
"""Parse the HTML of a repository index page."""
# PEP 503:
# Within a repository, the root URL (/) MUST be a valid HTML5 page with a
# single anchor element per project in the repository.
- def __init__(self):
+ def __init__(self) -> None:
super().__init__()
self._parsing_anchor = False
- self.names = []
+ self.names: List[str] = []
- def handle_starttag(self, tag, _attrs_list):
+ def handle_starttag(
+ self, tag: str, _attrs_list: list[tuple[str, Optional[str]]]
+ ) -> None:
if tag != "a":
return
self._parsing_anchor = True
- def handle_endtag(self, tag):
+ def handle_endtag(self, tag: str) -> None:
if tag != "a":
return
self._parsing_anchor = False
- def handle_data(self, data):
+ def handle_data(self, data: str) -> None:
if self._parsing_anchor:
self.names.append(data)
@@ -99,18 +117,22 @@ def from_project_index_html(html: str) -> ProjectIndex:
class _ArchiveLinkHTMLParser(html.parser.HTMLParser):
- def __init__(self):
- self.archive_links = []
+ def __init__(self) -> None:
+ self.archive_links: List[Dict[str, Any]] = []
super().__init__()
- def handle_starttag(self, tag, attrs_list):
+ def handle_starttag(
+ self, tag: str, attrs_list: list[tuple[str, Optional[str]]]
+ ) -> None:
attrs = dict(attrs_list)
if tag != "a":
return
# PEP 503:
# The href attribute MUST be a URL that links to the location of the
# file for download ...
- full_url = attrs["href"]
+ if "href" not in attrs or not attrs["href"]:
+ return
+ full_url: str = attrs["href"]
parsed_url = urllib.parse.urlparse(full_url)
# PEP 503:
# ... the text of the anchor tag MUST match the final path component
@@ -118,7 +140,7 @@ class _ArchiveLinkHTMLParser(html.parser.HTMLParser):
_, _, raw_filename = parsed_url.path.rpartition("/")
filename = urllib.parse.unquote(raw_filename)
url = urllib.parse.urlunparse((*parsed_url[:5], ""))
- args = {"filename": filename, "url": url}
+ args: Dict[str, Any] = {"filename": filename, "url": url}
# PEP 503:
# The URL SHOULD include a hash in the form of a URL fragment with the
# following syntax: #<hashname>=<hashvalue> ...
@@ -130,7 +152,7 @@ class _ArchiveLinkHTMLParser(html.parser.HTMLParser):
# link. This exposes the Requires-Python metadata field ...
# In the attribute value, < and > have to be HTML encoded as < and
# >, respectively.
- if "data-requires-python" in attrs:
+ if "data-requires-python" in attrs and attrs["data-requires-python"]:
requires_python_data = html.unescape(attrs["data-requires-python"])
args["requires-python"] = requires_python_data
# PEP 503:
@@ -147,15 +169,15 @@ class _ArchiveLinkHTMLParser(html.parser.HTMLParser):
# ... each anchor tag pointing to a distribution MAY have a
# data-dist-info-metadata attribute.
if "data-dist-info-metadata" in attrs:
- metadata = attrs.get("data-dist-info-metadata")
- if metadata and metadata != "true":
+ found_metadata = attrs.get("data-dist-info-metadata")
+ if found_metadata and found_metadata != "true":
# The repository SHOULD provide the hash of the Core Metadata
# file as the data-dist-info-metadata attribute's value using
# the syntax <hashname>=<hashvalue>, where <hashname> is the
# lower cased name of the hash function used, and <hashvalue> is
# the hex encoded digest.
- algorithm, _, hash = metadata.partition("=")
- metadata = (algorithm.lower(), hash)
+ algorithm, _, hash_ = found_metadata.partition("=")
+ metadata = (algorithm.lower(), hash_)
else:
# The repository MAY use true as the attribute's value if a hash
# is unavailable.
@@ -166,6 +188,7 @@ class _ArchiveLinkHTMLParser(html.parser.HTMLParser):
def from_project_details_html(name: str, html: str) -> ProjectDetails:
+ """Parse the HTML of a project details page."""
parser = _ArchiveLinkHTMLParser()
parser.feed(html)
files: List[ProjectFileDetails] = []
diff --git a/noxfile.py b/noxfile.py
index 75c9857..d30143b 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -1,3 +1,8 @@
+"""Developer-related actions.
+
+All sessions prefixed with `check_` are non-destructive.
+
+"""
import nox
python_versions = ["3.7", "3.8", "3.9", "3.10", "3.11"]
@@ -6,7 +11,10 @@ python_versions = ["3.7", "3.8", "3.9", "3.10", "3.11"]
@nox.session(python=python_versions)
def test(session, coverage=False):
"""Run the test suite."""
- session.run("pytest", *(["--cov"] if coverage else []))
+ session.install("-e", ".[test]")
+ session.run(
+ "pytest", *(["--cov", "--cov-report", "term-missing"] if coverage else [])
+ )
@nox.session(python=python_versions)
@@ -37,3 +45,17 @@ def format(session, check=False):
def check_format(session):
"""Check that the code is properly formatted."""
format(session, check=True)
+
+
[email protected]
+def check_code(session):
+ """Lint the code."""
+ session.install("ruff")
+ session.run("ruff", "mousebender", "tests")
+
+
[email protected]
+def build(session):
+ """Build the wheel and sdist."""
+ session.install("flit")
+ session.run("flit", "build")
diff --git a/pyproject.toml b/pyproject.toml
index 99e08eb..fedd832 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -6,12 +6,12 @@ build-backend = "flit_core.buildapi"
[project]
name = "mousebender"
authors = [
- {name = "Brett Cannon", email = "[email protected]"},
- {name = "Derek Keeler", email = "[email protected]"}
+ { name = "Brett Cannon", email = "[email protected]" },
+ { name = "Derek Keeler", email = "[email protected]" },
]
readme = "README.rst"
requires-python = ">=3.6"
-license = {file = "LICENSE"}
+license = { file = "LICENSE" }
keywords = ["packaging", "PEP 503", "PEP 592"]
urls.homepage = "https://github.com/brettcannon/mousebender"
urls.repository = "https://github.com/brettcannon/mousebender"
@@ -19,18 +19,15 @@ urls.issues = "https://github.com/brettcannon/mousebender/issues"
urls.documentation = "https://github.com/brettcannon/mousebender/blob/master/README.rst"
classifiers = [
"Topic :: Software Development :: Libraries :: Python Modules",
- "License :: OSI Approved :: BSD License"
+ "License :: OSI Approved :: BSD License",
]
-dependencies = [
- "packaging >= 20.9",
- "typing-extensions"
-]
+dependencies = ["packaging >= 20.9", "typing-extensions"]
optional-dependencies.test = [
"pytest >= 6.0.1",
"importlib-resources >= 1.4.0",
"coverage[toml] >= 5.0.4",
- "pytest-cov >= 2.8.1"
+ "pytest-cov >= 2.8.1",
]
dynamic = ["version", "description"]
@@ -43,3 +40,14 @@ report.fail_under = 100
[tool.isort]
profile = "black"
+
+[tool.ruff]
+select = ["E", "F", "W", "D", "C", "B", "A", "ANN", "RUF", "M"]
+ignore = ["E501", "D203", "D213", "ANN101"]
+per-file-ignores = { "tests/*" = [
+ "D",
+ "ANN",
+], "noxfile.py" = [
+ "ANN",
+ "A001",
+] }
| brettcannon/mousebender | c4ce5e7be2a273fb4cfb914c7ea2558e66eb53d3 | diff --git a/tests/test_simple.py b/tests/test_simple.py
index 446144c..aa3357c 100644
--- a/tests/test_simple.py
+++ b/tests/test_simple.py
@@ -8,9 +8,6 @@ from .data import simple as simple_data
class TestProjectURLConstruction:
-
- """Tests for mousebender.simple.create_project_url()."""
-
@pytest.mark.parametrize("base_url", ["/simple/", "/simple"])
def test_url_joining(self, base_url):
url = simple.create_project_url(base_url, "hello")
@@ -36,9 +33,6 @@ class TestProjectURLConstruction:
class TestRepoIndexParsing:
-
- """Tests for mousebender.simple.parse_repo_index()."""
-
@pytest.mark.parametrize(
"name,count,expected_item",
[
@@ -75,9 +69,6 @@ class TestRepoIndexParsing:
class TestProjectDetailsParsing:
-
- """Tests for mousebender.simple. from project details HTML."""
-
@pytest.mark.parametrize(
"module_name,count,expected_file_details",
[
@@ -183,6 +174,11 @@ class TestProjectDetailsParsing:
assert len(project_details["files"]) == 1
assert project_details["files"][0]["url"] == expected_url
+ def test_no_href(self):
+ html = "<a>numpy-1.12.1-cp35-none-win_amd64.whl</a><br/>"
+ project_details = simple.from_project_details_html("test_no_href", html)
+ assert not len(project_details["files"])
+
@pytest.mark.parametrize(
"html,expected",
[
| Consider switching to Ruff from flake8
https://github.com/charliermarsh/ruff
https://marketplace.visualstudio.com/items?itemName=charliermarsh.ruff | 0.0 | c4ce5e7be2a273fb4cfb914c7ea2558e66eb53d3 | [
"tests/test_simple.py::TestProjectDetailsParsing::test_no_href"
]
| [
"tests/test_simple.py::TestProjectURLConstruction::test_url_joining[/simple/]",
"tests/test_simple.py::TestProjectURLConstruction::test_url_joining[/simple]",
"tests/test_simple.py::TestProjectURLConstruction::test_project_name_lowercased",
"tests/test_simple.py::TestProjectURLConstruction::test_project_name_normalized",
"tests/test_simple.py::TestProjectURLConstruction::test_only_project_name_in_url_normalized",
"tests/test_simple.py::TestProjectURLConstruction::test_no_base_url",
"tests/test_simple.py::TestRepoIndexParsing::test_full_parse[pypi-212862-numpy]",
"tests/test_simple.py::TestRepoIndexParsing::test_full_parse[piwheels-263872-django-node]",
"tests/test_simple.py::TestRepoIndexParsing::test_no_cdata",
"tests/test_simple.py::TestRepoIndexParsing::test_project_name_not_normalized",
"tests/test_simple.py::TestProjectDetailsParsing::test_full_parse[numpy-1402-expected_file_details0]",
"tests/test_simple.py::TestProjectDetailsParsing::test_full_parse[pulpcore-client-370-expected_file_details1]",
"tests/test_simple.py::TestProjectDetailsParsing::test_full_parse[pytorch-522-expected_file_details2]",
"tests/test_simple.py::TestProjectDetailsParsing::test_full_parse[aicoe-tensorflow-15-expected_file_details3]",
"tests/test_simple.py::TestProjectDetailsParsing::test_full_parse[numpy-piwheels-316-expected_file_details4]",
"tests/test_simple.py::TestProjectDetailsParsing::test_filename[<a",
"tests/test_simple.py::TestProjectDetailsParsing::test_url[<a",
"tests/test_simple.py::TestProjectDetailsParsing::test_requires_python[<a",
"tests/test_simple.py::TestProjectDetailsParsing::test_hashes[<a",
"tests/test_simple.py::TestProjectDetailsParsing::test_gpg_sig[<a",
"tests/test_simple.py::TestProjectDetailsParsing::test_yanked[sole",
"tests/test_simple.py::TestProjectDetailsParsing::test_yanked[`data-yanked`",
"tests/test_simple.py::TestProjectDetailsParsing::test_yanked[`data-yanked",
"tests/test_simple.py::TestProjectDetailsParsing::test_yanked[no",
"tests/test_simple.py::TestPEP658Metadata::test_default",
"tests/test_simple.py::TestPEP658Metadata::test_attribute_only[data-dist-info-metadata]",
"tests/test_simple.py::TestPEP658Metadata::test_attribute_only[data-dist-info-metadata=true]",
"tests/test_simple.py::TestPEP658Metadata::test_hash[data-dist-info-metadata=\"sha256=abcdef\"]",
"tests/test_simple.py::TestPEP658Metadata::test_hash[data-dist-info-metadata=\"SHA256=abcdef\"]"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2022-11-13 22:28:53+00:00 | bsd-3-clause | 1,432 |
|
briggySmalls__pyflipdot-2 | diff --git a/docs/usage.rst b/docs/usage.rst
index 9b0494f..e840f22 100644
--- a/docs/usage.rst
+++ b/docs/usage.rst
@@ -32,6 +32,6 @@ Once you've confirmed this is working, you'll want to send specific images to a
image[1::2, 1::2] = True
# Write the image
- controller.write(image)
+ controller.draw_image(image)
Refer to the :ref:`api` for full documentation.
diff --git a/pyflipdot/data.py b/pyflipdot/data.py
index df3ccf7..b26b05f 100644
--- a/pyflipdot/data.py
+++ b/pyflipdot/data.py
@@ -148,10 +148,16 @@ class ImagePacket(Packet):
message_image = np.zeros((data_rows, columns), dtype=bool)
message_image[:rows, :columns] = image
- # Flip image (we send column major, starting 'bottom left')
+ # Flip image vertically
+ # Our image is little-endian (0,0 contains least significant bit)
+ # Packbits expects array to be big-endian
message_image = np.flipud(message_image)
- # Flatten 'column major', so a whole column of pixels are sent together
+ # Interpret the boolean array as bits in a byte
# Note: we 'view' as uin8 for numpy versions < 1.10 that don't accept
# boolean arrays to packbits
- return bytes(np.packbits(message_image.flatten('F').view(np.uint8)))
+ byte_values = np.packbits(message_image.view(np.uint8), axis=0)
+
+ # Flip vertically so that we send the least significant byte first
+ # Flatten 'column major', so a whole column of pixels are sent together
+ return bytes(np.flipud(byte_values).flatten("F"))
| briggySmalls/pyflipdot | 9f3b8dafe3961396f548f1172ef5eb73350638ec | diff --git a/tests/test_data.py b/tests/test_data.py
index 791b2a0..011a49f 100644
--- a/tests/test_data.py
+++ b/tests/test_data.py
@@ -21,14 +21,10 @@ class TestPackets(object):
def test_image(self):
# Send an image as below ('p' indicates byte alignment padding)
- # (0) | 1, 0 | | p, p |
- # (1) | 0, 0 | -> | p, p |
- # (2) | 0, 0 | | p, p |
- # (3) | 0, 0 | | p, p |
- # (4) | p, p |
- # (5) | 0, 0 |
- # (6) | 0, 0 |
- # (7) | 1, 0 |
+ # (0) | 1, 0 |
+ # (1) | 0, 0 | -> [0x01, 0x00]
+ # (2) | 0, 0 |
+ # (3) | 0, 0 |
image = np.full((3, 2), False)
image[0, 0] = True
@@ -38,26 +34,25 @@ class TestPackets(object):
def test_tall_image(self):
# Send an image as below ('p' indicates byte alignment padding)
- # (0) | 1, 0 | | p, p |
- # (1) | 0, 0 | | 0, 0 |
- # (2) | 0, 0 | | 0, 0 |
- # (3) | 0, 0 | | 0, 0 |
- # (4) | 0, 0 | | 0, 0 |
- # (5) | 0, 0 | | 0, 0 |
- # (6) | 0, 0 | | 1, 0 |
- # (7) | 0, 0 | -> | 0, 0 |
- # (8) | 0, 0 | | 0, 0 |
- # (9) | 1, 0 | | 0, 0 |
- # (10) | 0, 0 | | 0, 0 |
- # (11) | 0, 0 | | 0, 0 |
- # (12) | 0, 0 | | 0, 0 |
- # (13) | 0, 0 | | 0, 0 |
- # (14) | 0, 0 | | 0, 0 |
- # (15) | 1, 0 |
+ # (0) | 1, 0 |
+ # (1) | 0, 0 |
+ # (2) | 0, 0 |
+ # (3) | 0, 0 |
+ # (4) | 0, 0 |
+ # (5) | 0, 0 |
+ # (6) | 0, 0 |
+ # (7) | 0, 0 | -> | 0x01, 0x00 | -> [0x01, 0x02, 0x00, 0x00]
+ # (8) | 0, 0 | | 0x02, 0x00 |
+ # (9) | 1, 0 |
+ # (10) | 0, 0 |
+ # (11) | 0, 0 |
+ # (12) | 0, 0 |
+ # (13) | 0, 0 |
+ # (14) | 0, 0 |
image = np.full((15, 2), False)
- image[0, 0] = True # MSbit of MSbyte
- image[9, 0] = True # MSbit for LSbyte
+ image[0, 0] = True
+ image[9, 0] = True
packet = ImagePacket(1, image)
packet_data = packet.get_bytes()
- assert packet_data == b'\x02110402010000\x03B4'
+ assert packet_data == b'\x02110401020000\x03B4'
| On larger signs 0,0 is displayed as 0,9
I have a 96x16 Hanover sign. This code works perfectly except that it starts on the 9 dot from the top.
To replicate I push a numpy array that is only the first pixel as true. Such as:
[[ True False False ... False False False]
[False False False ... False False False]
[False False False ... False False False]
...
[False False False ... False False False]
[False False False ... False False False]
[False False False ... False False False]]
The result is that the one dot is rendered mid sign instead of 0,0 as i would expect.
Photo:

here it is using the textwriter.py to display the time:

Let me know if you have ideas on how to fix. I don't mind doing the work - but don't know where to start without starting from scratch.
Thanks for the lib. it is much better than the alternatives ;) | 0.0 | 9f3b8dafe3961396f548f1172ef5eb73350638ec | [
"tests/test_data.py::TestPackets::test_tall_image"
]
| [
"tests/test_data.py::TestPackets::test_no_payload",
"tests/test_data.py::TestPackets::test_with_payload",
"tests/test_data.py::TestPackets::test_image"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2019-06-25 07:53:53+00:00 | mit | 1,434 |
|
briggySmalls__pyflipdot-5 | diff --git a/pyflipdot/data.py b/pyflipdot/data.py
index b26b05f..2326941 100644
--- a/pyflipdot/data.py
+++ b/pyflipdot/data.py
@@ -16,13 +16,7 @@ _COMMAND_CODES = {
def _to_ascii_hex(value: bytes) -> bytes:
- def _bytes_to_ascii_hex(val: bytes) -> bytes:
- return val.hex().upper().encode('ASCII')
-
- try:
- return _bytes_to_ascii_hex(value)
- except AttributeError:
- return _bytes_to_ascii_hex(bytes([value]))
+ return value.hex().upper().encode('ASCII')
def _bytes_to_int(data: bytes) -> int:
@@ -125,7 +119,11 @@ class ImagePacket(Packet):
image_bytes = self.image_to_bytes(image)
# Start with the resolution (image byte count)
- payload = _to_ascii_hex(len(image_bytes))
+ # Note: we only ever send a single bytes-worth of info, even if the
+ # resolution is an integer bigger than 255
+ resolution_bytes = (len(image_bytes) & 0xFF).to_bytes(
+ 1, byteorder='big')
+ payload = _to_ascii_hex(resolution_bytes)
# Add the image bytes
payload += _to_ascii_hex(image_bytes)
| briggySmalls/pyflipdot | 1de9944550a1d5f49a29b5499ce4a44dba81d2cc | diff --git a/tests/test_data.py b/tests/test_data.py
index 011a49f..710d4ae 100644
--- a/tests/test_data.py
+++ b/tests/test_data.py
@@ -5,54 +5,68 @@ import numpy as np
from pyflipdot.data import ImagePacket, Packet
-class TestPackets(object):
- def test_no_payload(self):
- packet = Packet(1, 2)
- packet_data = packet.get_bytes()
-
- assert packet_data == b'\x0212\x039A'
-
- def test_with_payload(self):
- payload = b'345'
- packet = Packet(1, 2, payload)
- packet_data = packet.get_bytes()
-
- assert packet_data == b'\x0212345\x03FE'
-
- def test_image(self):
- # Send an image as below ('p' indicates byte alignment padding)
- # (0) | 1, 0 |
- # (1) | 0, 0 | -> [0x01, 0x00]
- # (2) | 0, 0 |
- # (3) | 0, 0 |
- image = np.full((3, 2), False)
- image[0, 0] = True
-
- packet = ImagePacket(1, image)
- packet_data = packet.get_bytes()
- assert packet_data == b'\x0211020100\x0378'
-
- def test_tall_image(self):
- # Send an image as below ('p' indicates byte alignment padding)
- # (0) | 1, 0 |
- # (1) | 0, 0 |
- # (2) | 0, 0 |
- # (3) | 0, 0 |
- # (4) | 0, 0 |
- # (5) | 0, 0 |
- # (6) | 0, 0 |
- # (7) | 0, 0 | -> | 0x01, 0x00 | -> [0x01, 0x02, 0x00, 0x00]
- # (8) | 0, 0 | | 0x02, 0x00 |
- # (9) | 1, 0 |
- # (10) | 0, 0 |
- # (11) | 0, 0 |
- # (12) | 0, 0 |
- # (13) | 0, 0 |
- # (14) | 0, 0 |
- image = np.full((15, 2), False)
- image[0, 0] = True
- image[9, 0] = True
-
- packet = ImagePacket(1, image)
- packet_data = packet.get_bytes()
- assert packet_data == b'\x02110401020000\x03B4'
+def test_no_payload():
+ packet = Packet(1, 2)
+ packet_data = packet.get_bytes()
+
+ assert packet_data == b'\x0212\x039A'
+
+
+def test_with_payload():
+ payload = b'345'
+ packet = Packet(1, 2, payload)
+ packet_data = packet.get_bytes()
+
+ assert packet_data == b'\x0212345\x03FE'
+
+
+def test_simple_image():
+ # Send an image as below ('p' indicates byte alignment padding)
+ # (0) | 1, 0 |
+ # (1) | 0, 0 | -> [0x01, 0x00]
+ # (2) | 0, 0 |
+ # (3) | 0, 0 |
+ image = np.full((3, 2), False)
+ image[0, 0] = True
+
+ packet = ImagePacket(1, image)
+ packet_data = packet.get_bytes()
+ assert packet_data == b'\x0211020100\x0378'
+
+
+def test_tall_image():
+ # Send an image as below ('p' indicates byte alignment padding)
+ # (0) | 1, 0 |
+ # (1) | 0, 0 |
+ # (2) | 0, 0 |
+ # (3) | 0, 0 |
+ # (4) | 0, 0 |
+ # (5) | 0, 0 |
+ # (6) | 0, 0 |
+ # (7) | 0, 0 | -> | 0x01, 0x00 | -> [0x01, 0x02, 0x00, 0x00]
+ # (8) | 0, 0 | | 0x02, 0x00 |
+ # (9) | 1, 0 |
+ # (10) | 0, 0 |
+ # (11) | 0, 0 |
+ # (12) | 0, 0 |
+ # (13) | 0, 0 |
+ # (14) | 0, 0 |
+ image = np.full((15, 2), False)
+ image[0, 0] = True
+ image[9, 0] = True
+
+ packet = ImagePacket(1, image)
+ packet_data = packet.get_bytes()
+ assert packet_data == b'\x02110401020000\x03B4'
+
+
+def test_large_image():
+ # Create an image that is 128x32 pixels
+ image = np.full((16, 128), True)
+
+ packet = ImagePacket(1, image)
+ packet_data = packet.get_bytes()
+ assert packet_data[:5] == b'\x021100'
+ for val in packet_data[7:-3]:
+ assert val.to_bytes(1, byteorder='big') == b'F'
+ assert packet_data[-3:] == b'\x033B'
| The script cannot support board like 128 * 32
the image will be too large, see below error
File "/usr/local/lib/python3.6/dist-packages/pyflipdot/data.py", line 23, in _to_ascii_hex
return _bytes_to_ascii_hex(value)
File "/usr/local/lib/python3.6/dist-packages/pyflipdot/data.py", line 20, in _bytes_to_ascii_hex
return val.hex().upper().encode('ASCII') | 0.0 | 1de9944550a1d5f49a29b5499ce4a44dba81d2cc | [
"tests/test_data.py::test_large_image"
]
| [
"tests/test_data.py::test_no_payload",
"tests/test_data.py::test_with_payload",
"tests/test_data.py::test_simple_image",
"tests/test_data.py::test_tall_image"
]
| {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | 2020-01-03 18:47:49+00:00 | mit | 1,435 |
|
brightway-lca__brightway2-analyzer-19 | diff --git a/bw2analyzer/contribution.py b/bw2analyzer/contribution.py
index 91b2ced..37a1503 100644
--- a/bw2analyzer/contribution.py
+++ b/bw2analyzer/contribution.py
@@ -41,7 +41,7 @@ class ContributionAnalysis:
if limit_type == "percent":
if not 0 < limit <= 1:
raise ValueError("Percentage limits > 0 and <= 1.")
- limit = (data >= (total * limit)).sum()
+ limit = (np.abs(data) >= (total * limit)).sum()
results = np.hstack(
(data.reshape((-1, 1)), np.arange(data.shape[0]).reshape((-1, 1)))
| brightway-lca/brightway2-analyzer | 6829144e8a85b9ca7bbcf79360784562a86ca96c | diff --git a/tests/contribution.py b/tests/contribution.py
index 3b168d4..fca270e 100644
--- a/tests/contribution.py
+++ b/tests/contribution.py
@@ -35,6 +35,21 @@ class ContributionTestCase(unittest.TestCase):
answer, ca.sort_array(test_data, limit=0.3, limit_type="percent")
)
)
+
+ def test_sort_array_percentage_negative(self):
+ test_data = np.array((1.0, 2.0, -4.0, 3.0))
+ answer = np.array(
+ (
+ (-4, 2),
+ (3, 3),
+ )
+ )
+ ca = CA()
+ self.assertTrue(
+ np.allclose(
+ answer, ca.sort_array(test_data, limit=0.3, limit_type="percent")
+ )
+ )
def test_sort_array_errors(self):
ca = CA()
| Invalid sorting for Contribution results when top contributors are negative (Absolute values)
When getting the contribution results, if the top contributions have a negative value BW analyzer ignore these. The issue is in the limit calculation, as it doesnt take negatives into account.
I have tried the following fix and it works:
[contribution.py > ContributionAnalysis > sort_array() method > line 44](https://github.com/brightway-lca/brightway2-analyzer/blob/master/bw2analyzer/contribution.py#L44)
Currently: `limit = (data >= (total * limit)).sum()`
Fix: `limit = (np.abs(data) >= (total * limit)).sum()`
Does this issue have any horizontal impacts? (Other places where negatives may not have been considered?) | 0.0 | 6829144e8a85b9ca7bbcf79360784562a86ca96c | [
"tests/contribution.py::ContributionTestCase::test_sort_array_percentage_negative"
]
| [
"tests/contribution.py::BW2DataTest::test_setup_clean",
"tests/contribution.py::ContributionTestCase::test_sort_array_errors",
"tests/contribution.py::ContributionTestCase::test_sort_array_number",
"tests/contribution.py::ContributionTestCase::test_sort_array_percentage",
"tests/contribution.py::ContributionTestCase::test_top_matrix_array",
"tests/contribution.py::ContributionTestCase::test_top_matrix_matrix",
"tests/contribution.py::Contribution2TestCase::test_setup_clean"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2022-02-16 11:19:12+00:00 | bsd-3-clause | 1,436 |
|
brightway-lca__brightway2-io-207 | diff --git a/CHANGES.md b/CHANGES.md
index a59041b..f12104b 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,4 +1,12 @@
-# io Changelog
+# `bw2io` Changelog
+
+### DEV
+
+* Merged PR #213 Reparametrize lognormals for ecospold2 imports
+
+### 0.9.DEV22 (2023-09-15)
+
+* Pinned dependencies to fix environment problems
### 0.9.DEV21 (2023-08-12)
diff --git a/bw2io/extractors/simapro_lcia_csv.py b/bw2io/extractors/simapro_lcia_csv.py
index 5adff67..f8c1033 100644
--- a/bw2io/extractors/simapro_lcia_csv.py
+++ b/bw2io/extractors/simapro_lcia_csv.py
@@ -5,7 +5,7 @@ from numbers import Number
from bw2data.logs import close_log, get_io_logger
from stats_arrays import *
-INTRODUCTION = u"""Starting SimaPro import:
+INTRODUCTION = """Starting SimaPro import:
\tFilepath: %s
\tDelimiter: %s
"""
@@ -52,10 +52,11 @@ class SimaProLCIACSVExtractor(object):
list
List of datasets extracted from the SimaPro LCIACSV file.
"""
+
@classmethod
def extract(cls, filepath, delimiter=";", encoding="cp1252"):
assert os.path.exists(filepath), "Can't find file %s" % filepath
- log, logfile = get_io_logger(u"SimaPro-LCIA-extractor")
+ log, logfile = get_io_logger("SimaPro-LCIA-extractor")
log.info(
INTRODUCTION
@@ -73,7 +74,7 @@ class SimaProLCIACSVExtractor(object):
]
# Check if valid SimaPro file
- assert u"SimaPro" in lines[0][0], "File is not valid SimaPro export"
+ assert "SimaPro" in lines[0][0], "File is not valid SimaPro export"
datasets = []
@@ -118,7 +119,7 @@ class SimaProLCIACSVExtractor(object):
try:
if data[index] and data[index][0] in SKIPPABLE_SECTIONS:
index = cls.skip_to_section_end(data, index)
- elif data[index] and data[index][0] == u"Method":
+ elif data[index] and data[index][0] == "Method":
return index + 1
except IndexError:
# File ends without extra metadata
@@ -162,11 +163,11 @@ class SimaProLCIACSVExtractor(object):
"""
categories = (line[0], line[1])
return {
- u"amount": float(line[4]),
- u"CAS number": line[3],
- u"categories": categories,
- u"name": line[2],
- u"unit": line[5],
+ "amount": float(line[4].replace(",", ".")),
+ "CAS number": line[3],
+ "categories": categories,
+ "name": line[2],
+ "unit": line[5],
}
@classmethod
@@ -220,7 +221,7 @@ class SimaProLCIACSVExtractor(object):
Raises
------
ValueError
-
+
"""
metadata, index = cls.read_metadata(data, index)
method_root_name = metadata.pop("Name")
@@ -284,7 +285,7 @@ class SimaProLCIACSVExtractor(object):
def get_all_cfs(cls, nw_data, category_data):
"""
Get all CFs from `nw_data` and `category_data`.
-
+
Parameters
----------
nw_data : list
@@ -296,6 +297,7 @@ class SimaProLCIACSVExtractor(object):
list
A list of all CFs.
"""
+
def rescale(cf, scale):
cf["amount"] *= scale
return cf
@@ -318,12 +320,13 @@ class SimaProLCIACSVExtractor(object):
A list of tuples containing the name and scale of the damage
category_data : list of tuples
A list of tuples containing the name, unit, and data of each impact category
-
+
Returns
-------
list of dictionaries
A list of dictionaries with the calculated damage exchanges of each impact category
"""
+
def rescale(cf, scale):
cf["amount"] *= scale
return cf
@@ -403,7 +406,7 @@ class SimaProLCIACSVExtractor(object):
index += 1
while data[index]:
method, scalar = data[index][:2]
- damage_data.append((method, float(scalar)))
+ damage_data.append((method, float(scalar.replace(",", "."))))
index += 1
return (name, unit, damage_data), index
@@ -420,5 +423,5 @@ class SimaProLCIACSVExtractor(object):
index += 1
if weight == "0":
continue
- nw_data.append((cat, float(weight)))
+ nw_data.append((cat, float(weight.replace(",", "."))))
return (name, nw_data), index
diff --git a/bw2io/importers/ecospold2.py b/bw2io/importers/ecospold2.py
index 2273c6b..931c36f 100644
--- a/bw2io/importers/ecospold2.py
+++ b/bw2io/importers/ecospold2.py
@@ -53,6 +53,7 @@ class SingleOutputEcospold2Importer(LCIImporter):
extractor=Ecospold2DataExtractor,
use_mp=True,
signal=None,
+ reparametrize_lognormals=False,
):
"""
@@ -70,6 +71,10 @@ class SingleOutputEcospold2Importer(LCIImporter):
Flag to indicate whether to use multiprocessing, by default True.
signal : object
Object to indicate the status of the import process, by default None.
+ reparametrize_lognormals: bool
+ Flag to indicate if lognormal distributions for exchanges should be reparametrized
+ such that the mean value of the resulting distribution meets the amount
+ defined for the exchange.
"""
self.dirpath = dirpath
@@ -93,13 +98,17 @@ class SingleOutputEcospold2Importer(LCIImporter):
delete_ghost_exchanges,
remove_uncertainty_from_negative_loss_exchanges,
fix_unreasonably_high_lognormal_uncertainties,
- set_lognormal_loc_value,
convert_activity_parameters_to_list,
add_cpc_classification_from_single_reference_product,
delete_none_synonyms,
partial(update_social_flows_in_older_consequential, biosphere_db=Database(config.biosphere)),
]
+ if reparametrize_lognormals:
+ self.strategies.append(reparametrize_lognormal_to_agree_with_static_amount)
+ else:
+ self.strategies.append(set_lognormal_loc_value)
+
start = time()
try:
self.data = extractor.extract(dirpath, db_name, use_mp=use_mp)
diff --git a/bw2io/strategies/__init__.py b/bw2io/strategies/__init__.py
index c1be067..cb19e0b 100644
--- a/bw2io/strategies/__init__.py
+++ b/bw2io/strategies/__init__.py
@@ -28,6 +28,7 @@ __all__ = [
"fix_ecoinvent_flows_pre35",
"fix_localized_water_flows",
"fix_unreasonably_high_lognormal_uncertainties",
+ "reparametrize_lognormal_to_agree_with_static_amount",
"fix_zero_allocation_products",
"json_ld_add_activity_unit",
"json_ld_add_products_as_activities",
diff --git a/bw2io/strategies/ecospold2.py b/bw2io/strategies/ecospold2.py
index bcbea9e..ed3fe75 100644
--- a/bw2io/strategies/ecospold2.py
+++ b/bw2io/strategies/ecospold2.py
@@ -861,6 +861,79 @@ def set_lognormal_loc_value(db):
return db
+def reparametrize_lognormal_to_agree_with_static_amount(db):
+ """
+ For lognormal distributions, choose the mean of the underlying normal distribution
+ (loc) such that the expected value (mean) of the resulting distribution is
+ equal to the (static) amount defined for the exchange.
+
+ Parameters
+ ----------
+ db : list
+ A list of datasets, where each dataset is a dictionary containing an
+ 'exchanges' key with a list of exchange dictionaries. The structure of a
+ dataset is as follows:
+
+ {
+ "exchanges": [
+ {
+ "type": str,
+ "name": str,
+ "amount": float,
+ "uncertainty type": int,
+ "loc": float,
+ "scale": float,
+ },
+ ...
+ ]
+ }
+
+ Returns
+ -------
+ list
+ The updated list of datasets with adjusted lognormal uncertainty
+ distribution loc values.
+
+ Examples
+ --------
+ >>> import math
+ >>> db = [
+ ... {
+ ... "exchanges": [
+ ... {
+ ... "type": "technosphere",
+ ... "name": "input_A",
+ ... "amount": 5,
+ ... "uncertainty type": 2,
+ ... "loc": 1,
+ ... "scale": 0.5,
+ ... },
+ ... ],
+ ... }
+ ... ]
+ >>> reparametrize_lognormals_to_agree_with_static_amount(db)
+ [
+ {
+ "exchanges": [
+ {
+ "type": "technosphere",
+ "name": "input_A",
+ "amount": 5,
+ "uncertainty type": 2,
+ "loc": math.log(5) - 0.5**2 / 2,
+ "scale": 0.5,
+ },
+ ],
+ }
+ ]
+ """
+ for ds in db:
+ for exc in ds.get("exchanges", []):
+ if exc["uncertainty type"] == LognormalUncertainty.id:
+ exc["loc"] = math.log(abs(exc["amount"])) - exc["scale"]**2 / 2
+ return db
+
+
def fix_unreasonably_high_lognormal_uncertainties(db, cutoff=2.5, replacement=0.25):
"""
Replace unreasonably high lognormal uncertainties in the given database
diff --git a/bw2io/version.py b/bw2io/version.py
index 877a372..edb9b77 100644
--- a/bw2io/version.py
+++ b/bw2io/version.py
@@ -1,1 +1,1 @@
-version = (0, 9, "DEV21")
+version = (0, 9, "DEV22")
diff --git a/setup.py b/setup.py
index 5f62474..e4d0be0 100644
--- a/setup.py
+++ b/setup.py
@@ -1,11 +1,11 @@
from setuptools import setup
REQUIREMENTS = [
- "bw2calc>=1.7.4",
- "bw2data>=3.5.1",
- "bw2parameters>=0.7.1",
- "bw_migrations",
- "bw_processing",
+ "bw2calc>=2.0.dev14",
+ "bw2data>=4.0.dev24",
+ "bw2parameters>=1.1.0",
+ "bw_migrations>=0.2",
+ "bw_processing>=0.8.5",
"lxml",
"mrio_common_metadata",
"numpy",
@@ -13,7 +13,7 @@ REQUIREMENTS = [
"platformdirs",
"requests",
"scipy",
- "stats_arrays",
+ "stats_arrays>=0.6.5",
"tqdm",
"unidecode",
"voluptuous",
| brightway-lca/brightway2-io | 0c3c7288a897f57511ce17a6be1698e2cb9b08a1 | diff --git a/tests/strategies/ecospold2.py b/tests/strategies/ecospold2.py
index 8c37730..fe3b901 100644
--- a/tests/strategies/ecospold2.py
+++ b/tests/strategies/ecospold2.py
@@ -7,6 +7,7 @@ from bw2io.strategies.ecospold2 import (
fix_unreasonably_high_lognormal_uncertainties,
remove_uncertainty_from_negative_loss_exchanges,
set_lognormal_loc_value,
+ reparametrize_lognormal_to_agree_with_static_amount,
)
@@ -100,6 +101,56 @@ def test_set_lognormal_loc_value():
assert set_lognormal_loc_value(db) == expected
+def test_reparametrize_lognormal_to_agree_with_static_amount():
+ db = [
+ {
+ "exchanges": [
+ {
+ "uncertainty type": LognormalUncertainty.id,
+ "loc": 1000,
+ "scale": 2,
+ "amount": 1,
+ },
+ {
+ "uncertainty type": LognormalUncertainty.id,
+ "loc": 1000,
+ "scale": 2,
+ "amount": -1,
+ },
+ {
+ "uncertainty type": -1,
+ "loc": 1000,
+ "amount": 1,
+ },
+ ]
+ }
+ ]
+ expected = [
+ {
+ "exchanges": [
+ {
+ "uncertainty type": LognormalUncertainty.id,
+ "loc": -2,
+ "scale": 2,
+ "amount": 1,
+ },
+ {
+ "uncertainty type": LognormalUncertainty.id,
+ "loc": -2,
+ "scale": 2,
+ "amount": -1,
+ },
+ {
+ "uncertainty type": -1,
+ "loc": 1000,
+ "amount": 1,
+ },
+ ]
+ }
+ ]
+ assert reparametrize_lognormal_to_agree_with_static_amount(db) == expected
+
+
def test_remove_uncertainty_from_negative_loss_exchanges():
db = [
{
| float parsing error
```
### Importing Environmental Footprint 3.1 (adapted) patch wtu.CSV...
Traceback (most recent call last):
File "/home/jovyan/ecobalyse/data/import_method.py", line 62, in <module>
main()
File "/home/jovyan/ecobalyse/data/import_method.py", line 56, in main
import_method()
File "/home/jovyan/ecobalyse/data/import_method.py", line 33, in import_method
ef = bw2io.importers.SimaProLCIACSVImporter(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/bw2io/importers/simapro_lcia_csv.py", line 61, in __init__
self.data = SimaProLCIACSVExtractor.extract(filepath, delimiter, encoding)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/bw2io/extractors/simapro_lcia_csv.py", line 84, in extract
ds, index = cls.read_method_data_set(lines, index, filepath)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/bw2io/extractors/simapro_lcia_csv.py", line 235, in read_method_data_set
catdata, index = cls.get_category_data(data, index + 1)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/bw2io/extractors/simapro_lcia_csv.py", line 377, in get_category_data
cf_data.append(cls.parse_cf(data[index]))
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/bw2io/extractors/simapro_lcia_csv.py", line 165, in parse_cf
u"amount": float(line[4]),
^^^^^^^^^^^^^^
ValueError: could not convert string to float: '3,02'
``` | 0.0 | 0c3c7288a897f57511ce17a6be1698e2cb9b08a1 | [
"tests/strategies/ecospold2.py::test_fix_unreasonably_high_lognormal_uncertainties",
"tests/strategies/ecospold2.py::test_set_lognormal_loc_value",
"tests/strategies/ecospold2.py::test_reparametrize_lognormal_to_agree_with_static_amount",
"tests/strategies/ecospold2.py::test_remove_uncertainty_from_negative_loss_exchanges",
"tests/strategies/ecospold2.py::test_drop_temporary_outdated_biosphere_flows",
"tests/strategies/ecospold2.py::test_add_cpc_classification_from_single_reference_product",
"tests/strategies/ecospold2.py::test_delete_none_synonyms"
]
| []
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2023-08-22 22:25:22+00:00 | bsd-3-clause | 1,437 |
|
broadinstitute__cromshell-203 | diff --git a/src/cromshell/__main__.py b/src/cromshell/__main__.py
index 8060d9a..e48c4a8 100644
--- a/src/cromshell/__main__.py
+++ b/src/cromshell/__main__.py
@@ -13,6 +13,7 @@ from .slim_metadata import command as slim_metadata
from .status import command as status
from .submit import command as submit
from .timing import command as timing
+from .update_server import command as update_server
# Version number is automatically set via bumpversion.
# DO NOT MODIFY:
@@ -129,6 +130,7 @@ main_entry.add_command(status.main)
main_entry.add_command(submit.main)
main_entry.add_command(slim_metadata.main)
main_entry.add_command(metadata.main)
+main_entry.add_command(update_server.main)
main_entry.add_command(timing.main)
diff --git a/src/cromshell/update_server/__init__.py b/src/cromshell/update_server/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/cromshell/update_server/command.py b/src/cromshell/update_server/command.py
new file mode 100644
index 0000000..ffaa7a2
--- /dev/null
+++ b/src/cromshell/update_server/command.py
@@ -0,0 +1,44 @@
+import json
+import logging
+import os
+from pathlib import Path
+from typing import Dict
+
+import click
+
+LOGGER = logging.getLogger(__name__)
+CROMWELL_SERVER_KEY = "cromwell_server"
+
+
[email protected](name="update-server")
[email protected]("cromwell_server_url", required=True, nargs=1)
[email protected]_obj
+def main(config, cromwell_server_url):
+ """Update the default Cromwell server in the config file"""
+
+ LOGGER.info("update-server")
+
+ # location of the config file
+ cromshell_config_path = os.path.join(
+ config.config_dir, config.CROMSHELL_CONFIG_FILE_NAME
+ )
+ assert os.access(
+ cromshell_config_path, mode=os.W_OK
+ ), f"Cannot write to Cromshell config file {cromshell_config_path}"
+
+ # the contents of the config file as a dict
+ cromshell_config: Dict = config.cromshell_config_options
+
+ # update the cromwell server
+ if CROMWELL_SERVER_KEY not in cromshell_config.keys():
+ cromshell_config.update({CROMWELL_SERVER_KEY: cromwell_server_url})
+ else:
+ cromshell_config[CROMWELL_SERVER_KEY] = cromwell_server_url
+
+ # write the modified config file
+ config_contents = json.dumps(cromshell_config, indent=2)
+ with Path(cromshell_config_path).open("w") as crom_config_file:
+ crom_config_file.write(config_contents)
+
+ LOGGER.info(f"Cromshell config file at {cromshell_config_path}")
+ LOGGER.info(f"Default Cromwell server updated to {cromwell_server_url}")
diff --git a/src/cromshell/utilities/cromshellconfig.py b/src/cromshell/utilities/cromshellconfig.py
index 35d6127..10e9d76 100644
--- a/src/cromshell/utilities/cromshellconfig.py
+++ b/src/cromshell/utilities/cromshellconfig.py
@@ -197,7 +197,7 @@ def __load_cromshell_config_file(
def __get_cromwell_server(config_options: dict):
- """Get Cromshell Server URL from configuration options"""
+ """Get Cromwell Server URL from configuration options"""
if not config_options["cromwell_server"]:
raise FileNotFoundError('Cromshell config file is missing "cromwell_server"')
| broadinstitute/cromshell | e4cd5f565ebde77949673799d0ac6ee1e50e73b0 | diff --git a/tests/integration/test_update_server.py b/tests/integration/test_update_server.py
new file mode 100644
index 0000000..f7874d9
--- /dev/null
+++ b/tests/integration/test_update_server.py
@@ -0,0 +1,66 @@
+import os
+from traceback import print_exception
+
+from click.testing import CliRunner
+
+from cromshell.__main__ import main_entry as cromshell
+from cromshell.utilities import cromshellconfig
+
+
+def get_current_cromwell_server():
+ cromshell_config_dict = cromshellconfig.__load_cromshell_config_file(
+ config_directory=cromshellconfig.config_dir,
+ config_file_name=cromshellconfig.CROMSHELL_CONFIG_FILE_NAME,
+ config_file_template=None,
+ )
+ print(
+ os.path.join(
+ cromshellconfig.config_dir, cromshellconfig.CROMSHELL_CONFIG_FILE_NAME
+ )
+ )
+ print(cromshell_config_dict)
+ return cromshell_config_dict["cromwell_server"]
+
+
+def run_update_server(runner, new_cromwell_server):
+ """Run cromshell update-server in a specific CliRunner context"""
+
+ result = runner.invoke(
+ cromshell,
+ [
+ "update-server",
+ new_cromwell_server,
+ ],
+ )
+
+ # print any exceptions so pytest will show them
+ print_exception(*result.exc_info)
+
+
+def test_update_server():
+ """Run cromshell update-server using CliRunner and ensure server changes"""
+
+ print("Running a test of cromshell update-server...")
+
+ test_server_name = "http://test.server"
+
+ runner = CliRunner(mix_stderr=False)
+ # The test is being run in temp directory created by CliRunner, and
+ # so its modification of the default server only happens in this context
+ with runner.isolated_filesystem():
+
+ # the current default server
+ current_cromwell_server = get_current_cromwell_server()
+ print(f"Initial Cromwell server: {current_cromwell_server}")
+
+ # update the server and check that it worked
+ run_update_server(
+ runner=runner,
+ new_cromwell_server=test_server_name,
+ )
+ tmp_cromwell_server = get_current_cromwell_server()
+ print(
+ f"After attempting to update to '{test_server_name}', "
+ f"the config file has '{tmp_cromwell_server}'"
+ )
+ assert tmp_cromwell_server == test_server_name, "cromshell update-server failed"
| `update-server` command
Port the `update-server` command over from 1.0
Allows the user to programmatically change the server name in the config file, without having to know how to do this manually. | 0.0 | e4cd5f565ebde77949673799d0ac6ee1e50e73b0 | [
"tests/integration/test_update_server.py::test_update_server"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2022-04-29 18:33:04+00:00 | bsd-3-clause | 1,438 |
|
brocksam__pyproprop-49 | diff --git a/.restyled.yaml b/.restyled.yaml
index 877a3c5..3c5a7dc 100644
--- a/.restyled.yaml
+++ b/.restyled.yaml
@@ -1,3 +1,2 @@
restylers:
- - isort
- - reorder-python-imports
\ No newline at end of file
+ - isort
\ No newline at end of file
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 14e1e13..48d6418 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -38,6 +38,14 @@ Unreleased
- None
+[0.4.1] - 2020-09-22
+--------------------
+
+Fixed
+~~~~~
+
+- Fix bug which raises error when using the `method` kwarg with `processed_property`, as reported in issue #48. Was caused by variables being out of scope in the function `apply_method` with `pyproprop/processed_property.py`.
+
[0.4.0] - 2020-09-22
--------------------
diff --git a/pyproprop/processed_property.py b/pyproprop/processed_property.py
index 641dc4d..108ae46 100644
--- a/pyproprop/processed_property.py
+++ b/pyproprop/processed_property.py
@@ -7,17 +7,13 @@ reuse.
"""
from numbers import Real
-from typing import Any
-from typing import Iterable
-from typing import Tuple
+from typing import Any, Iterable, Tuple
import numpy as np
-from .format_str_case import format_str_case
-from .format_str_case import SUPPORTED_STR_FORMAT_OPTIONS
+from .format_str_case import SUPPORTED_STR_FORMAT_OPTIONS, format_str_case
from .options import Options
-from .utils import format_for_output
-from .utils import generate_name_description_error_message
+from .utils import format_for_output, generate_name_description_error_message
__all__ = ["processed_property"]
@@ -141,7 +137,8 @@ def processed_property(name, **kwargs):
args = (name_str, )
setter_dispatcher.update({process_optimisable: (args, {})})
if post_method is not None:
- setter_dispatcher.update({apply_method: no_args_kwargs})
+ args = (optional, post_method)
+ setter_dispatcher.update({apply_method: (args, {})})
return setter_dispatcher
storage_name = "_" + name
@@ -521,7 +518,7 @@ def check_len(value, len_sequence, name_str):
return value
-def apply_method(value):
+def apply_method(value, optional, post_method):
"""Applies a specified method at the end of the property setter.
Parameters
diff --git a/setup.py b/setup.py
index caceec9..3dc14a5 100644
--- a/setup.py
+++ b/setup.py
@@ -117,7 +117,7 @@ def get_contents_from_file(filepath, by_line=False, strip=""):
PACKAGE_NAME = "pyproprop"
-VERSION = "0.4.0"
+VERSION = "0.4.1"
AUTHOR = "Sam Brockie"
AUTHOR_EMAIL = "[email protected]"
DESCRIPTION = ("Package for aiding writing classes with lots of similar "
| brocksam/pyproprop | 693950be09186100bc029535ddaef0e9f61eb4f9 | diff --git a/tests/unit/processed_property/test_post_method.py b/tests/unit/processed_property/test_post_method.py
index e69de29..2ad6b79 100644
--- a/tests/unit/processed_property/test_post_method.py
+++ b/tests/unit/processed_property/test_post_method.py
@@ -0,0 +1,46 @@
+"""Test processed properties with post-methods applied."""
+
+
+import hypothesis.strategies as st
+import numpy as np
+from hypothesis import given
+
+from pyproprop import processed_property
+
+
+def square(x):
+ """Dummy function for squaring two numbers."""
+ xx = x * x
+ return xx
+
+
+class ClassWithPostMethodProperty:
+ """Dummy class for testing processed properties with post methods."""
+
+ method_prop = processed_property("method_prop", type=int, method=square)
+ np_method_prop = processed_property("np_method_prop", type=float,
+ cast=True, iterable_allowed=True,
+ method=np.cos)
+
+
+@given(st.integers())
+def test_post_method(test_value):
+ """Applied python function correctly."""
+ test_fixture = ClassWithPostMethodProperty()
+ test_fixture.method_prop = test_value
+ assert test_fixture.method_prop == square(test_value)
+
+
+@given(st.one_of(st.integers(min_value=-9223372036854775808,
+ max_value=9223372036854775807),
+ st.floats(allow_infinity=False, allow_nan=False),
+ st.lists(st.integers(min_value=-9223372036854775808,
+ max_value=9223372036854775807)),
+ )
+ )
+def test_post_method_numpy(test_value):
+ """Applies numpy function correctly."""
+ test_fixture = ClassWithPostMethodProperty()
+ test_fixture.np_method_prop = test_value
+ expected_result = np.cos(np.array([test_value]))
+ assert (test_fixture.np_method_prop == expected_result).all()
| Error in post method
When calling a processed property with a post method, for example:
```
squares = processed_property(
"squares",
type=int,
iterable_allowed=True,
method=square()
)
```
The following is raised: `NameError: name 'optional' is not defined`.
This is comping from line `processed_property` line 537:
https://github.com/brocksam/pyproprop/blob/693950be09186100bc029535ddaef0e9f61eb4f9/pyproprop/processed_property.py#L524-L539
Optional is not is in the scope of `apply_method`. | 0.0 | 693950be09186100bc029535ddaef0e9f61eb4f9 | [
"tests/unit/processed_property/test_post_method.py::test_post_method",
"tests/unit/processed_property/test_post_method.py::test_post_method_numpy"
]
| []
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2020-09-22 14:25:00+00:00 | mit | 1,439 |
|
bryanyang0528__ksql-python-73 | diff --git a/ksql/api.py b/ksql/api.py
index f4a6bbb..b5f1f8f 100644
--- a/ksql/api.py
+++ b/ksql/api.py
@@ -101,7 +101,7 @@ class BaseAPI(object):
headers = {"Accept": "application/json", "Content-Type": "application/json"}
if self.api_key and self.secret:
- base64string = base64.b64encode("{}:{}".format(self.api_key, self.secret))
+ base64string = base64.b64encode(bytes("{}:{}".format(self.api_key, self.secret), "utf-8"))
headers["Authorization"] = "Basic {}" % base64string
req = urllib.request.Request(url=url, data=data, headers=headers, method=method.upper())
| bryanyang0528/ksql-python | dbd864e2f424805a7c3170dbdfe3723fe7aea403 | diff --git a/tests/test_client.py b/tests/test_client.py
index 42a89db..6e9075f 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -53,6 +53,13 @@ class TestKSQLAPI(unittest.TestCase):
property = [i for i in properties if i["name"] == "ksql.schema.registry.url"][0]
self.assertEqual(property.get("value"), "http://schema-registry:8081")
+ @vcr.use_cassette("tests/vcr_cassettes/ksql_show_table_with_api_key.yml")
+ def test_ksql_show_tables_with_api_key(self):
+ api_client = KSQLAPI(url=self.url, check_version=False, api_key='foo', secret='bar')
+ ksql_string = "show tables;"
+ r = api_client.ksql(ksql_string)
+ self.assertEqual(r, [{"@type": "tables", "statementText": "show tables;", "tables": [], "warnings": []}])
+
@vcr.use_cassette("tests/vcr_cassettes/ksql_show_table.yml")
def test_ksql_show_tables(self):
""" Test GET requests """
diff --git a/tests/unit-tests/test_api.py b/tests/unit-tests/test_api.py
index 0bce5d4..98a2c60 100644
--- a/tests/unit-tests/test_api.py
+++ b/tests/unit-tests/test_api.py
@@ -1,5 +1,6 @@
import unittest
import responses
+import urllib
from ksql.api import BaseAPI
diff --git a/tests/vcr_cassettes/ksql_show_table_with_api_key.yml b/tests/vcr_cassettes/ksql_show_table_with_api_key.yml
new file mode 100644
index 0000000..df994fc
--- /dev/null
+++ b/tests/vcr_cassettes/ksql_show_table_with_api_key.yml
@@ -0,0 +1,34 @@
+interactions:
+- request:
+ body: '{"ksql": "show tables;"}'
+ headers:
+ Accept:
+ - application/json
+ Authorization:
+ - Basic {}
+ Connection:
+ - close
+ Content-Length:
+ - '24'
+ Content-Type:
+ - application/json
+ Host:
+ - localhost:8088
+ User-Agent:
+ - Python-urllib/3.8
+ method: POST
+ uri: http://localhost:8088/ksql
+ response:
+ body:
+ string: '[{"@type":"tables","statementText":"show tables;","tables":[],"warnings":[]}]'
+ headers:
+ connection:
+ - close
+ content-length:
+ - '77'
+ content-type:
+ - application/json
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/vcr_cassettes_backup/bad_requests.yml b/tests/vcr_cassettes_backup/bad_requests.yml
deleted file mode 100644
index 024ac7a..0000000
--- a/tests/vcr_cassettes_backup/bad_requests.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "noi;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['16']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"generic_error","error_code":40000,"message":"line 1:1:
- mismatched input ''noi'' expecting {<EOF>, ''('', ''SELECT'', ''VALUES'',
- ''CREATE'', ''REGISTER'', ''TABLE'', ''INSERT'', ''DESCRIBE'', ''PRINT'',
- ''EXPLAIN'', ''SHOW'', ''LIST'', ''TERMINATE'', ''LOAD'', ''DROP'', ''SET'',
- ''EXPORT'', ''UNSET'', ''RUN''}\nCaused by: org.antlr.v4.runtime.InputMismatchException","stack_trace":["io.confluent.ksql.parser.KsqlParser.buildAst(KsqlParser.java:66)","io.confluent.ksql.KsqlEngine.getStatements(KsqlEngine.java:497)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:171)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 21:19:08 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-version: 1
diff --git a/tests/vcr_cassettes_backup/get_ksql_server.yml b/tests/vcr_cassettes_backup/get_ksql_server.yml
deleted file mode 100644
index e0a3c83..0000000
--- a/tests/vcr_cassettes_backup/get_ksql_server.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-interactions:
-- request:
- body: null
- headers:
- Accept: ['*/*']
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- User-Agent: [python-requests/2.19.1]
- method: GET
- uri: http://localhost:8088/info
- response:
- body: {string: '{"KsqlServerInfo":{"version":"5.0.0-SNAPSHOT","kafkaClusterId":"9HvFRIoMSyy1YUxjpOt-gg","ksqlServiceId":"default_"}}'}
- headers:
- Content-Type: [application/vnd.ksql.v1+json]
- Date: ['Fri, 20 Jul 2018 20:08:04 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/get_properties.yml b/tests/vcr_cassettes_backup/get_properties.yml
deleted file mode 100644
index 0ffd904..0000000
--- a/tests/vcr_cassettes_backup/get_properties.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "show properties;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['28']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"properties","statementText":"show properties;","properties":{"ksql.extension.dir":"ext","ksql.streams.cache.max.bytes.buffering":"10000000","ksql.transient.prefix":"transient_","ksql.schema.registry.url":"http://localhost:8081","ssl.secure.random.implementation":null,"ksql.streams.default.deserialization.exception.handler":"io.confluent.ksql.errors.LogMetricAndContinueExceptionHandler","ksql.output.topic.name.prefix":"","ksql.streams.auto.offset.reset":"latest","ksql.sink.partitions":"4","ssl.keystore.type":"JKS","ssl.trustmanager.algorithm":"PKIX","ksql.statestore.suffix":"_ksql_statestore","ssl.key.password":null,"ksql.service.id":"default_","ssl.truststore.password":null,"ssl.endpoint.identification.algorithm":"https","ksql.streams.bootstrap.servers":"localhost:29092","ssl.protocol":"TLS","ksql.streams.commit.interval.ms":"2000","ksql.sink.replicas":"1","ssl.provider":null,"ssl.enabled.protocols":"TLSv1.2,TLSv1.1,TLSv1","ssl.keystore.location":null,"ksql.streams.num.stream.threads":"4","ssl.cipher.suites":null,"ssl.truststore.type":"JKS","ksql.udfs.enabled":"true","ssl.truststore.location":null,"ksql.udf.enable.security.manager":"true","ssl.keystore.password":null,"ssl.keymanager.algorithm":"SunX509","ksql.streams.application.id":"KSQL_REST_SERVER_DEFAULT_APP_ID","ksql.sink.window.change.log.additional.retention":"1000000","ksql.udf.collect.metrics":"false","ksql.persistent.prefix":"query_"},"overwrittenProperties":[]}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:06 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/healthcheck.yml b/tests/vcr_cassettes_backup/healthcheck.yml
deleted file mode 100644
index 95b7873..0000000
--- a/tests/vcr_cassettes_backup/healthcheck.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-interactions:
-- request:
- body: null
- headers:
- Accept: ['*/*']
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- User-Agent: [python-requests/2.19.1]
- method: GET
- uri: http://localhost:8088/status
- response:
- body: {string: '{"commandStatuses":{"stream/PREBID_TRAFFIC_LOG_TOTAL_STREAM/create":"SUCCESS","stream/CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH/create":"SUCCESS","stream/TEST_TABLE/drop":"SUCCESS","stream/PREBID_TRAFFIC_LOG_TOTAL_STREAM/drop":"SUCCESS","stream/CREATE_STREAM_AS_WITHOUT_CONDITIONS/create":"SUCCESS","stream/PAGEVIEWS_ORIGINAL/drop":"SUCCESS","stream/KSQL_PYTHON_TESTTEST_KSQL_CREATE_STREAM/create":"SUCCESS","stream/TEST_TABLE/create":"SUCCESS","stream/PREBID_TRAFFIC_LOG_VALID_STREAM/drop":"SUCCESS","stream/CREATE_STREAM_AS_WITH_CONDITIONS_WITHOUT_STARTWITH/create":"SUCCESS","stream/PAGEVIEWS_ORIGINAL/create":"SUCCESS","stream/CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH_WITH_AND/create":"SUCCESS","stream/PREBID_TRAFFIC_LOG_VALID_STREAM/create":"ERROR","stream/KSQL_PYTHON_TESTTEST_KSQL_CREATE_STREAM/drop":"SUCCESS","stream/CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH/drop":"QUEUED"}}'}
- headers:
- Content-Type: [application/vnd.ksql.v1+json]
- Date: ['Fri, 20 Jul 2018 20:10:10 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_create_stream.yml b/tests/vcr_cassettes_backup/ksql_create_stream.yml
deleted file mode 100644
index e40a8cc..0000000
--- a/tests/vcr_cassettes_backup/ksql_create_stream.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "CREATE STREAM ksql_python_testtest_ksql_create_stream (viewtime
- bigint, userid varchar, pageid varchar) WITH (kafka_topic=''exist_topic'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['198']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE STREAM ksql_python_testtest_ksql_create_stream
- (viewtime bigint, userid varchar, pageid varchar) WITH
- (kafka_topic=''exist_topic'', value_format=''DELIMITED'');","commandId":"stream/KSQL_PYTHON_TESTTEST_KSQL_CREATE_STREAM/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:09 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_create_stream_as_with_conditions_with_startwith.yml b/tests/vcr_cassettes_backup/ksql_create_stream_as_with_conditions_with_startwith.yml
deleted file mode 100644
index 642cc7b..0000000
--- a/tests/vcr_cassettes_backup/ksql_create_stream_as_with_conditions_with_startwith.yml
+++ /dev/null
@@ -1,132 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED pageviews_original;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['49']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''PAGEVIEWS_ORIGINAL'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED pageviews_original;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:41 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n pageviews_original;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['63']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n pageviews_original;","commandId":"stream/PAGEVIEWS_ORIGINAL/drop","commandStatus":{"status":"SUCCESS","message":"Source
- PAGEVIEWS_ORIGINAL does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:41 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED create_stream_as_with_conditions_with_startwith;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['78']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH''
- in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED create_stream_as_with_conditions_with_startwith;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:42 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n create_stream_as_with_conditions_with_startwith;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['92']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n create_stream_as_with_conditions_with_startwith;","commandId":"stream/CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH/drop","commandStatus":{"status":"SUCCESS","message":"Source
- CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:42 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE stream pageviews_original (name string, age bigint, userid
- string, pageid bigint) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['160']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream pageviews_original
- (name string, age bigint, userid string, pageid bigint) WITH (kafka_topic=''exist_topic'',
- value_format=''DELIMITED'');","commandId":"stream/PAGEVIEWS_ORIGINAL/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:42 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE stream create_stream_as_with_conditions_with_startwith
- WITH (kafka_topic=''create_stream_as_with_conditions_with_startwith'', value_format=''DELIMITED'',
- timestamp=''logtime'') AS SELECT rowtime as logtime, * FROM pageviews_original
- where userid = ''foo_%'';"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['269']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream create_stream_as_with_conditions_with_startwith
- WITH (kafka_topic=''create_stream_as_with_conditions_with_startwith'', value_format=''DELIMITED'',
- timestamp=''logtime'') AS SELECT rowtime as logtime, * FROM pageviews_original
- where userid = ''foo_%'';","commandId":"stream/CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created and running"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:43 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_create_stream_as_with_conditions_with_startwith_with_and.yml b/tests/vcr_cassettes_backup/ksql_create_stream_as_with_conditions_with_startwith_with_and.yml
deleted file mode 100644
index 62d4a1d..0000000
--- a/tests/vcr_cassettes_backup/ksql_create_stream_as_with_conditions_with_startwith_with_and.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "CREATE stream pageviews_original (name string, age bigint, userid
- string, pageid bigint) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['160']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Topic
- already registered.","stack_trace":["io.confluent.ksql.ddl.commands.RegisterTopicCommand.run(RegisterTopicCommand.java:99)","io.confluent.ksql.ddl.commands.CreateStreamCommand.run(CreateStreamCommand.java:43)","io.confluent.ksql.ddl.commands.DdlCommandExec.executeOnMetaStore(DdlCommandExec.java:61)","io.confluent.ksql.ddl.commands.DdlCommandExec.execute(DdlCommandExec.java:54)","io.confluent.ksql.rest.server.resources.KsqlResource.executeDdlCommand(KsqlResource.java:783)","io.confluent.ksql.rest.server.resources.KsqlResource.lambda$registerDdlCommandTasks$20(KsqlResource.java:716)","io.confluent.ksql.rest.server.resources.KsqlResource.getStatementExecutionPlan(KsqlResource.java:635)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:258)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"CREATE
- stream pageviews_original (name string, age bigint, userid string, pageid
- bigint) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:50 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "CREATE stream create_stream_as_with_conditions_with_startwith_with_and
- WITH (kafka_topic=''create_stream_as_with_conditions_with_startwith_with_and'',
- value_format=''DELIMITED'', timestamp=''logtime'') AS SELECT rowtime as logtime,
- * FROM pageviews_original where userid = ''foo_%'' and age > 10;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['300']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream create_stream_as_with_conditions_with_startwith_with_and
- WITH (kafka_topic=''create_stream_as_with_conditions_with_startwith_with_and'',
- value_format=''DELIMITED'', timestamp=''logtime'') AS SELECT rowtime as logtime,
- * FROM pageviews_original where userid = ''foo_%'' and age > 10;","commandId":"stream/CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH_WITH_AND/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created and running"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:50 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_create_stream_as_with_conditions_without_startwith.yml b/tests/vcr_cassettes_backup/ksql_create_stream_as_with_conditions_without_startwith.yml
deleted file mode 100644
index 96aa9fb..0000000
--- a/tests/vcr_cassettes_backup/ksql_create_stream_as_with_conditions_without_startwith.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "CREATE stream pageviews_original (name string, age bigint, userid
- string, pageid bigint) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['160']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Topic
- already registered.","stack_trace":["io.confluent.ksql.ddl.commands.RegisterTopicCommand.run(RegisterTopicCommand.java:99)","io.confluent.ksql.ddl.commands.CreateStreamCommand.run(CreateStreamCommand.java:43)","io.confluent.ksql.ddl.commands.DdlCommandExec.executeOnMetaStore(DdlCommandExec.java:61)","io.confluent.ksql.ddl.commands.DdlCommandExec.execute(DdlCommandExec.java:54)","io.confluent.ksql.rest.server.resources.KsqlResource.executeDdlCommand(KsqlResource.java:783)","io.confluent.ksql.rest.server.resources.KsqlResource.lambda$registerDdlCommandTasks$20(KsqlResource.java:716)","io.confluent.ksql.rest.server.resources.KsqlResource.getStatementExecutionPlan(KsqlResource.java:635)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:258)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"CREATE
- stream pageviews_original (name string, age bigint, userid string, pageid
- bigint) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:56 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "CREATE stream create_stream_as_with_conditions_without_startwith
- WITH (kafka_topic=''create_stream_as_with_conditions_without_startwith'', value_format=''DELIMITED'',
- timestamp=''logtime'') AS SELECT rowtime as logtime, * FROM pageviews_original
- where userid = ''foo'';"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['273']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream create_stream_as_with_conditions_without_startwith
- WITH (kafka_topic=''create_stream_as_with_conditions_without_startwith'',
- value_format=''DELIMITED'', timestamp=''logtime'') AS SELECT rowtime as logtime,
- * FROM pageviews_original where userid = ''foo'';","commandId":"stream/CREATE_STREAM_AS_WITH_CONDITIONS_WITHOUT_STARTWITH/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created and running"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:56 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_create_stream_as_with_wrong_timestamp.yml b/tests/vcr_cassettes_backup/ksql_create_stream_as_with_wrong_timestamp.yml
deleted file mode 100644
index c18ad24..0000000
--- a/tests/vcr_cassettes_backup/ksql_create_stream_as_with_wrong_timestamp.yml
+++ /dev/null
@@ -1,145 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED prebid_traffic_log_total_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['62']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''PREBID_TRAFFIC_LOG_TOTAL_STREAM'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED prebid_traffic_log_total_stream;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:10 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n prebid_traffic_log_total_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['76']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n prebid_traffic_log_total_stream;","commandId":"stream/PREBID_TRAFFIC_LOG_TOTAL_STREAM/drop","commandStatus":{"status":"SUCCESS","message":"Source
- PREBID_TRAFFIC_LOG_TOTAL_STREAM does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:11 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED prebid_traffic_log_valid_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['62']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''PREBID_TRAFFIC_LOG_VALID_STREAM'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED prebid_traffic_log_valid_stream;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:11 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n prebid_traffic_log_valid_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['76']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n prebid_traffic_log_valid_stream;","commandId":"stream/PREBID_TRAFFIC_LOG_VALID_STREAM/drop","commandStatus":{"status":"SUCCESS","message":"Source
- PREBID_TRAFFIC_LOG_VALID_STREAM does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:11 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE stream prebid_traffic_log_total_stream (name string, age
- bigint, userid string, pageid bigint) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['173']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream prebid_traffic_log_total_stream
- (name string, age bigint, userid string, pageid bigint) WITH (kafka_topic=''exist_topic'',
- value_format=''DELIMITED'');","commandId":"stream/PREBID_TRAFFIC_LOG_TOTAL_STREAM/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:11 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE stream prebid_traffic_log_valid_stream WITH (kafka_topic=''prebid_traffic_log_valid_topic'',
- value_format=''DELIMITED'', timestamp=''foo'') AS SELECT * FROM prebid_traffic_log_total_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['202']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream prebid_traffic_log_valid_stream
- WITH (kafka_topic=''prebid_traffic_log_valid_topic'', value_format=''DELIMITED'',
- timestamp=''foo'') AS SELECT * FROM prebid_traffic_log_total_stream;","commandId":"stream/PREBID_TRAFFIC_LOG_VALID_STREAM/create","commandStatus":{"status":"ERROR","message":"io.confluent.ksql.util.KsqlException:
- No column with the provided timestamp column name in the WITH clause, FOO,
- exists in the defined schema.\n\tat io.confluent.ksql.util.timestamp.TimestampExtractionPolicyFactory.lambda$create$0(TimestampExtractionPolicyFactory.java:41)\n\tat
- java.util.Optional.orElseThrow(Optional.java:290)\n\tat io.confluent.ksql.util.timestamp.TimestampExtractionPolicyFactory.create(TimestampExtractionPolicyFactory.java:41)\n\tat
- io.confluent.ksql.planner.LogicalPlanner.getTimestampExtractionPolicy(LogicalPlanner.java:126)\n\tat
- io.confluent.ksql.planner.LogicalPlanner.buildOutputNode(LogicalPlanner.java:93)\n\tat
- io.confluent.ksql.planner.LogicalPlanner.buildPlan(LogicalPlanner.java:83)\n\tat
- io.confluent.ksql.QueryEngine.buildQueryLogicalPlan(QueryEngine.java:118)\n\tat
- io.confluent.ksql.QueryEngine.buildLogicalPlans(QueryEngine.java:90)\n\tat
- io.confluent.ksql.KsqlEngine.planQueries(KsqlEngine.java:221)\n\tat io.confluent.ksql.KsqlEngine.buildMultipleQueries(KsqlEngine.java:211)\n\tat
- io.confluent.ksql.rest.server.computation.StatementExecutor.startQuery(StatementExecutor.java:372)\n\tat
- io.confluent.ksql.rest.server.computation.StatementExecutor.handleCreateAsSelect(StatementExecutor.java:317)\n\tat
- io.confluent.ksql.rest.server.computation.StatementExecutor.executeStatement(StatementExecutor.java:234)\n\tat
- io.confluent.ksql.rest.server.computation.StatementExecutor.handleStatementWithTerminatedQueries(StatementExecutor.java:206)\n\tat
- io.confluent.ksql.rest.server.computation.StatementExecutor.handleStatement(StatementExecutor.java:112)\n\tat
- io.confluent.ksql.rest.server.computation.CommandRunner.executeStatement(CommandRunner.java:105)\n\tat
- io.confluent.ksql.rest.server.computation.CommandRunner.fetchAndRunCommands(CommandRunner.java:88)\n\tat
- io.confluent.ksql.rest.server.computation.CommandRunner.run(CommandRunner.java:63)\n\tat
- java.lang.Thread.run(Thread.java:748)\n"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:12 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_create_stream_as_without_conditions.yml b/tests/vcr_cassettes_backup/ksql_create_stream_as_without_conditions.yml
deleted file mode 100644
index 1fb4cde..0000000
--- a/tests/vcr_cassettes_backup/ksql_create_stream_as_without_conditions.yml
+++ /dev/null
@@ -1,48 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "CREATE stream pageviews_original (name string, age bigint, userid
- string, pageid bigint) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['160']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Topic
- already registered.","stack_trace":["io.confluent.ksql.ddl.commands.RegisterTopicCommand.run(RegisterTopicCommand.java:99)","io.confluent.ksql.ddl.commands.CreateStreamCommand.run(CreateStreamCommand.java:43)","io.confluent.ksql.ddl.commands.DdlCommandExec.executeOnMetaStore(DdlCommandExec.java:61)","io.confluent.ksql.ddl.commands.DdlCommandExec.execute(DdlCommandExec.java:54)","io.confluent.ksql.rest.server.resources.KsqlResource.executeDdlCommand(KsqlResource.java:783)","io.confluent.ksql.rest.server.resources.KsqlResource.lambda$registerDdlCommandTasks$20(KsqlResource.java:716)","io.confluent.ksql.rest.server.resources.KsqlResource.getStatementExecutionPlan(KsqlResource.java:635)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:258)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"CREATE
- stream pageviews_original (name string, age bigint, userid string, pageid
- bigint) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:00 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "CREATE stream create_stream_as_without_conditions WITH (kafka_topic=''create_stream_as_without_conditions'',
- value_format=''DELIMITED'', timestamp=''logtime'') AS SELECT rowtime as logtime,
- * FROM pageviews_original;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['222']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream create_stream_as_without_conditions
- WITH (kafka_topic=''create_stream_as_without_conditions'', value_format=''DELIMITED'',
- timestamp=''logtime'') AS SELECT rowtime as logtime, * FROM pageviews_original;","commandId":"stream/CREATE_STREAM_AS_WITHOUT_CONDITIONS/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created and running"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:01 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_create_stream_by_builder.yml b/tests/vcr_cassettes_backup/ksql_create_stream_by_builder.yml
deleted file mode 100644
index d78aae9..0000000
--- a/tests/vcr_cassettes_backup/ksql_create_stream_by_builder.yml
+++ /dev/null
@@ -1,65 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED test_table;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['41']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''TEST_TABLE'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED test_table;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:15 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n test_table;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['55']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n test_table;","commandId":"stream/TEST_TABLE/drop","commandStatus":{"status":"SUCCESS","message":"Source
- TEST_TABLE does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:15 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE stream test_table (viewtime bigint, userid varchar, pageid
- varchar) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['146']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream test_table
- (viewtime bigint, userid varchar, pageid varchar) WITH (kafka_topic=''exist_topic'',
- value_format=''DELIMITED'');","commandId":"stream/TEST_TABLE/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:15 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_create_stream_by_builder_api.yml b/tests/vcr_cassettes_backup/ksql_create_stream_by_builder_api.yml
deleted file mode 100644
index 647f201..0000000
--- a/tests/vcr_cassettes_backup/ksql_create_stream_by_builder_api.yml
+++ /dev/null
@@ -1,63 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED test_table;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['41']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"sourceDescription","statementText":"DESCRIBE EXTENDED
- test_table;","sourceDescription":{"name":"TEST_TABLE","readQueries":[],"writeQueries":[],"fields":[{"name":"ROWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"ROWKEY","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"VIEWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"USERID","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"PAGEID","schema":{"type":"STRING","fields":null,"memberSchema":null}}],"type":"STREAM","key":"","timestamp":"","statistics":"","errorStats":"","extended":true,"format":"DELIMITED","topic":"exist_topic","partitions":1,"replication":1}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:16 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n test_table;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['55']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n test_table;","commandId":"stream/TEST_TABLE/drop","commandStatus":{"status":"SUCCESS","message":"Source
- TEST_TABLE was dropped. "}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:16 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE stream test_table (viewtime bigint, userid varchar, pageid
- varchar) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['146']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream test_table
- (viewtime bigint, userid varchar, pageid varchar) WITH (kafka_topic=''exist_topic'',
- value_format=''DELIMITED'');","commandId":"stream/TEST_TABLE/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:16 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_show_table.yml b/tests/vcr_cassettes_backup/ksql_show_table.yml
deleted file mode 100644
index 0d2d6fb..0000000
--- a/tests/vcr_cassettes_backup/ksql_show_table.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "show tables;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['24']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"tables","statementText":"show tables;","tables":[]}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:11 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_topic_already_registered.yml b/tests/vcr_cassettes_backup/ksql_topic_already_registered.yml
deleted file mode 100644
index 6ecb9fe..0000000
--- a/tests/vcr_cassettes_backup/ksql_topic_already_registered.yml
+++ /dev/null
@@ -1,87 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED foo_table;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['40']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''FOO_TABLE'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED foo_table;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:14 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n foo_table;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['54']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n foo_table;","commandId":"stream/FOO_TABLE/drop","commandStatus":{"status":"SUCCESS","message":"Source
- FOO_TABLE does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:14 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE stream foo_table (name string, age bigint) WITH (kafka_topic=''exist_topic'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['121']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream foo_table
- (name string, age bigint) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');","commandId":"stream/FOO_TABLE/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:14 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE stream foo_table (name string, age bigint) WITH (kafka_topic=''exist_topic'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['121']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Topic
- already registered.","stack_trace":["io.confluent.ksql.ddl.commands.RegisterTopicCommand.run(RegisterTopicCommand.java:99)","io.confluent.ksql.ddl.commands.CreateStreamCommand.run(CreateStreamCommand.java:43)","io.confluent.ksql.ddl.commands.DdlCommandExec.executeOnMetaStore(DdlCommandExec.java:61)","io.confluent.ksql.ddl.commands.DdlCommandExec.execute(DdlCommandExec.java:54)","io.confluent.ksql.rest.server.resources.KsqlResource.executeDdlCommand(KsqlResource.java:783)","io.confluent.ksql.rest.server.resources.KsqlResource.lambda$registerDdlCommandTasks$20(KsqlResource.java:716)","io.confluent.ksql.rest.server.resources.KsqlResource.getStatementExecutionPlan(KsqlResource.java:635)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:258)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"CREATE
- stream foo_table (name string, age bigint) WITH (kafka_topic=''exist_topic'',
- value_format=''DELIMITED'');","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:15 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-version: 1
diff --git a/tests/vcr_cassettes_backup/raise_create_error_no_topic.yml b/tests/vcr_cassettes_backup/raise_create_error_no_topic.yml
deleted file mode 100644
index 3cbc375..0000000
--- a/tests/vcr_cassettes_backup/raise_create_error_no_topic.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "CREATE stream foo_table (name string, age bigint) WITH (kafka_topic=''this_topic_is_not_exist'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['133']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Kafka
- topic does not exist: this_topic_is_not_exist","stack_trace":["io.confluent.ksql.ddl.commands.AbstractCreateStreamCommand.registerTopicFirst(AbstractCreateStreamCommand.java:184)","io.confluent.ksql.ddl.commands.AbstractCreateStreamCommand.<init>(AbstractCreateStreamCommand.java:81)","io.confluent.ksql.ddl.commands.CreateStreamCommand.<init>(CreateStreamCommand.java:34)","io.confluent.ksql.rest.server.resources.KsqlResource.lambda$registerDdlCommandTasks$20(KsqlResource.java:713)","io.confluent.ksql.rest.server.resources.KsqlResource.getStatementExecutionPlan(KsqlResource.java:635)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:258)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"CREATE
- stream foo_table (name string, age bigint) WITH (kafka_topic=''this_topic_is_not_exist'',
- value_format=''DELIMITED'');","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:13 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-version: 1
diff --git a/tests/vcr_cassettes_backup/utils_test_drop_all_streams.yml b/tests/vcr_cassettes_backup/utils_test_drop_all_streams.yml
deleted file mode 100644
index e3dd6b7..0000000
--- a/tests/vcr_cassettes_backup/utils_test_drop_all_streams.yml
+++ /dev/null
@@ -1,144 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_all_streams;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['69']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED ksql_python_test_test_drop_all_streams;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:17 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n ksql_python_test_test_drop_all_streams;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['83']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n ksql_python_test_test_drop_all_streams;","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS/drop","commandStatus":{"status":"SUCCESS","message":"Source
- KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:17 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE STREAM ksql_python_test_test_drop_all_streams (viewtime
- bigint, userid varchar, pageid varchar) WITH (kafka_topic=''ksql_python_test_exist_topic'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['215']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE STREAM ksql_python_test_test_drop_all_streams
- (viewtime bigint, userid varchar, pageid varchar) WITH
- (kafka_topic=''ksql_python_test_exist_topic'', value_format=''DELIMITED'');","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:17 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "SHOW STREAMS;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['25']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"streams","statementText":"SHOW STREAMS;","streams":[{"type":"STREAM","name":"TEST_TABLE","topic":"exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"CREATE_STREAM_AS_WITH_CONDITIONS_WITHOUT_STARTWITH","topic":"create_stream_as_with_conditions_without_startwith","format":"DELIMITED"},{"type":"STREAM","name":"PREBID_TRAFFIC_LOG_TOTAL_STREAM","topic":"exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"PAGEVIEWS_ORIGINAL","topic":"exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"CREATE_STREAM_AS_WITHOUT_CONDITIONS","topic":"create_stream_as_without_conditions","format":"DELIMITED"},{"type":"STREAM","name":"KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS","topic":"ksql_python_test_exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"FOO_TABLE","topic":"exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH_WITH_AND","topic":"create_stream_as_with_conditions_with_startwith_with_and","format":"DELIMITED"},{"type":"STREAM","name":"CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH","topic":"create_stream_as_with_conditions_with_startwith","format":"DELIMITED"}]}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:18 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['69']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"sourceDescription","statementText":"DESCRIBE EXTENDED
- KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS;","sourceDescription":{"name":"KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS","readQueries":[],"writeQueries":[],"fields":[{"name":"ROWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"ROWKEY","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"VIEWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"USERID","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"PAGEID","schema":{"type":"STRING","fields":null,"memberSchema":null}}],"type":"STREAM","key":"","timestamp":"","statistics":"","errorStats":"","extended":true,"format":"DELIMITED","topic":"ksql_python_test_exist_topic","partitions":1,"replication":1}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:18 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['83']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS;","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS/drop","commandStatus":{"status":"SUCCESS","message":"Source
- KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS was dropped. "}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:18 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_all_streams;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['69']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED ksql_python_test_test_drop_all_streams;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:18 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-version: 1
diff --git a/tests/vcr_cassettes_backup/utils_test_drop_stream.yml b/tests/vcr_cassettes_backup/utils_test_drop_stream.yml
deleted file mode 100644
index b329d22..0000000
--- a/tests/vcr_cassettes_backup/utils_test_drop_stream.yml
+++ /dev/null
@@ -1,145 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['64']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''KSQL_PYTHON_TEST_TEST_DROP_STREAM'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED ksql_python_test_test_drop_stream;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:18 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n ksql_python_test_test_drop_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['78']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n ksql_python_test_test_drop_stream;","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_STREAM/drop","commandStatus":{"status":"SUCCESS","message":"Source
- KSQL_PYTHON_TEST_TEST_DROP_STREAM does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:18 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE STREAM ksql_python_test_test_drop_stream (viewtime bigint,
- userid varchar, pageid varchar) WITH (kafka_topic=''ksql_python_test_exist_topic'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['209']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE STREAM ksql_python_test_test_drop_stream
- (viewtime bigint, userid varchar, pageid varchar) WITH
- (kafka_topic=''ksql_python_test_exist_topic'', value_format=''DELIMITED'');","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_STREAM/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:19 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['64']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"sourceDescription","statementText":"DESCRIBE EXTENDED
- ksql_python_test_test_drop_stream;","sourceDescription":{"name":"KSQL_PYTHON_TEST_TEST_DROP_STREAM","readQueries":[],"writeQueries":[],"fields":[{"name":"ROWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"ROWKEY","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"VIEWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"USERID","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"PAGEID","schema":{"type":"STRING","fields":null,"memberSchema":null}}],"type":"STREAM","key":"","timestamp":"","statistics":"","errorStats":"","extended":true,"format":"DELIMITED","topic":"ksql_python_test_exist_topic","partitions":1,"replication":1}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:19 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['64']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"sourceDescription","statementText":"DESCRIBE EXTENDED
- ksql_python_test_test_drop_stream;","sourceDescription":{"name":"KSQL_PYTHON_TEST_TEST_DROP_STREAM","readQueries":[],"writeQueries":[],"fields":[{"name":"ROWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"ROWKEY","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"VIEWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"USERID","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"PAGEID","schema":{"type":"STRING","fields":null,"memberSchema":null}}],"type":"STREAM","key":"","timestamp":"","statistics":"","errorStats":"","extended":true,"format":"DELIMITED","topic":"ksql_python_test_exist_topic","partitions":1,"replication":1}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:19 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n ksql_python_test_test_drop_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['78']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n ksql_python_test_test_drop_stream;","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_STREAM/drop","commandStatus":{"status":"SUCCESS","message":"Source
- KSQL_PYTHON_TEST_TEST_DROP_STREAM was dropped. "}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:19 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['64']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''KSQL_PYTHON_TEST_TEST_DROP_STREAM'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED ksql_python_test_test_drop_stream;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:19 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-version: 1
diff --git a/tests/vcr_cassettes_backup/utils_test_drop_stream_create_as_stream.yml b/tests/vcr_cassettes_backup/utils_test_drop_stream_create_as_stream.yml
deleted file mode 100644
index b8c8de2..0000000
--- a/tests/vcr_cassettes_backup/utils_test_drop_stream_create_as_stream.yml
+++ /dev/null
@@ -1,187 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['64']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''KSQL_PYTHON_TEST_TEST_DROP_STREAM'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED ksql_python_test_test_drop_stream;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:21 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n ksql_python_test_test_drop_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['78']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n ksql_python_test_test_drop_stream;","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_STREAM/drop","commandStatus":{"status":"SUCCESS","message":"Source
- KSQL_PYTHON_TEST_TEST_DROP_STREAM does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:21 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE STREAM ksql_python_test_test_drop_stream (viewtime bigint,
- userid varchar, pageid varchar) WITH (kafka_topic=''ksql_python_test_exist_topic'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['209']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE STREAM ksql_python_test_test_drop_stream
- (viewtime bigint, userid varchar, pageid varchar) WITH
- (kafka_topic=''ksql_python_test_exist_topic'', value_format=''DELIMITED'');","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_STREAM/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:21 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE STREAM ksql_python_test_test_drop_stream_as as select
- * from ksql_python_test_test_drop_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['114']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE STREAM ksql_python_test_test_drop_stream_as
- as select * from ksql_python_test_test_drop_stream;","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created and running"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:21 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_stream_as;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['67']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"sourceDescription","statementText":"DESCRIBE EXTENDED
- ksql_python_test_test_drop_stream_as;","sourceDescription":{"name":"KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS","readQueries":[],"writeQueries":[{"sinks":["KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS"],"id":"CSAS_KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS_4","queryString":"CREATE
- STREAM ksql_python_test_test_drop_stream_as as select * from ksql_python_test_test_drop_stream;"}],"fields":[{"name":"ROWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"ROWKEY","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"VIEWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"USERID","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"PAGEID","schema":{"type":"STRING","fields":null,"memberSchema":null}}],"type":"STREAM","key":"","timestamp":"","statistics":"","errorStats":"","extended":true,"format":"DELIMITED","topic":"KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS","partitions":4,"replication":1}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:23 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_stream_as;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['67']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"sourceDescription","statementText":"DESCRIBE EXTENDED
- ksql_python_test_test_drop_stream_as;","sourceDescription":{"name":"KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS","readQueries":[],"writeQueries":[{"sinks":["KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS"],"id":"CSAS_KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS_4","queryString":"CREATE
- STREAM ksql_python_test_test_drop_stream_as as select * from ksql_python_test_test_drop_stream;"}],"fields":[{"name":"ROWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"ROWKEY","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"VIEWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"USERID","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"PAGEID","schema":{"type":"STRING","fields":null,"memberSchema":null}}],"type":"STREAM","key":"","timestamp":"","statistics":"","errorStats":"","extended":true,"format":"DELIMITED","topic":"KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS","partitions":4,"replication":1}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:24 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "TERMINATE CSAS_KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS_4;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['66']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"TERMINATE CSAS_KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS_4;","commandId":"terminate/CSAS_KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS_4/execute","commandStatus":{"status":"QUEUED","message":"Statement
- written to command topic"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:24 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n ksql_python_test_test_drop_stream_as;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['81']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n ksql_python_test_test_drop_stream_as;","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS/drop","commandStatus":{"status":"SUCCESS","message":"Source
- KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS was dropped. "}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:29 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_stream_as;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['67']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED ksql_python_test_test_drop_stream_as;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:30 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-version: 1
diff --git a/tests/vcr_cassettes_backup/utils_test_get_all_streams.yml b/tests/vcr_cassettes_backup/utils_test_get_all_streams.yml
deleted file mode 100644
index 3dfbca3..0000000
--- a/tests/vcr_cassettes_backup/utils_test_get_all_streams.yml
+++ /dev/null
@@ -1,84 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_get_all_streams;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['68']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''KSQL_PYTHON_TEST_TEST_GET_ALL_STREAMS'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED ksql_python_test_test_get_all_streams;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:31 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n ksql_python_test_test_get_all_streams;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['82']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n ksql_python_test_test_get_all_streams;","commandId":"stream/KSQL_PYTHON_TEST_TEST_GET_ALL_STREAMS/drop","commandStatus":{"status":"SUCCESS","message":"Source
- KSQL_PYTHON_TEST_TEST_GET_ALL_STREAMS does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:31 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE STREAM ksql_python_test_test_get_all_streams (viewtime
- bigint, userid varchar, pageid varchar) WITH (kafka_topic=''ksql_python_test_exist_topic'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['214']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE STREAM ksql_python_test_test_get_all_streams
- (viewtime bigint, userid varchar, pageid varchar) WITH
- (kafka_topic=''ksql_python_test_exist_topic'', value_format=''DELIMITED'');","commandId":"stream/KSQL_PYTHON_TEST_TEST_GET_ALL_STREAMS/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:31 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "SHOW STREAMS;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['25']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"streams","statementText":"SHOW STREAMS;","streams":[{"type":"STREAM","name":"TEST_TABLE","topic":"exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"CREATE_STREAM_AS_WITH_CONDITIONS_WITHOUT_STARTWITH","topic":"create_stream_as_with_conditions_without_startwith","format":"DELIMITED"},{"type":"STREAM","name":"PREBID_TRAFFIC_LOG_TOTAL_STREAM","topic":"exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"KSQL_PYTHON_TEST_TEST_GET_ALL_STREAMS","topic":"ksql_python_test_exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"PAGEVIEWS_ORIGINAL","topic":"exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"CREATE_STREAM_AS_WITHOUT_CONDITIONS","topic":"create_stream_as_without_conditions","format":"DELIMITED"},{"type":"STREAM","name":"FOO_TABLE","topic":"exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH_WITH_AND","topic":"create_stream_as_with_conditions_with_startwith_with_and","format":"DELIMITED"},{"type":"STREAM","name":"CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH","topic":"create_stream_as_with_conditions_with_startwith","format":"DELIMITED"}]}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:32 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/utils_test_get_dependent_queries.yml b/tests/vcr_cassettes_backup/utils_test_get_dependent_queries.yml
deleted file mode 100644
index 46a1d57..0000000
--- a/tests/vcr_cassettes_backup/utils_test_get_dependent_queries.yml
+++ /dev/null
@@ -1,108 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_get_dependent_queries;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['74']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''KSQL_PYTHON_TEST_TEST_GET_DEPENDENT_QUERIES'' in the
- Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED ksql_python_test_test_get_dependent_queries;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:33 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n ksql_python_test_test_get_dependent_queries;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['88']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n ksql_python_test_test_get_dependent_queries;","commandId":"stream/KSQL_PYTHON_TEST_TEST_GET_DEPENDENT_QUERIES/drop","commandStatus":{"status":"SUCCESS","message":"Source
- KSQL_PYTHON_TEST_TEST_GET_DEPENDENT_QUERIES does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:33 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE STREAM ksql_python_test_test_get_dependent_queries (viewtime
- bigint, userid varchar, pageid varchar) WITH (kafka_topic=''ksql_python_test_exist_topic'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['219']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE STREAM ksql_python_test_test_get_dependent_queries
- (viewtime bigint, userid varchar, pageid varchar) WITH
- (kafka_topic=''ksql_python_test_exist_topic'', value_format=''DELIMITED'');","commandId":"stream/KSQL_PYTHON_TEST_TEST_GET_DEPENDENT_QUERIES/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:33 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE STREAM ksql_python_test_test_get_dependent_queries_as
- as select * from ksql_python_test_test_get_dependent_queries;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['134']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE STREAM ksql_python_test_test_get_dependent_queries_as
- as select * from ksql_python_test_test_get_dependent_queries;","commandId":"stream/KSQL_PYTHON_TEST_TEST_GET_DEPENDENT_QUERIES_AS/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created and running"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:33 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_get_dependent_queries_as;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['77']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"sourceDescription","statementText":"DESCRIBE EXTENDED
- ksql_python_test_test_get_dependent_queries_as;","sourceDescription":{"name":"KSQL_PYTHON_TEST_TEST_GET_DEPENDENT_QUERIES_AS","readQueries":[],"writeQueries":[{"sinks":["KSQL_PYTHON_TEST_TEST_GET_DEPENDENT_QUERIES_AS"],"id":"CSAS_KSQL_PYTHON_TEST_TEST_GET_DEPENDENT_QUERIES_AS_5","queryString":"CREATE
- STREAM ksql_python_test_test_get_dependent_queries_as as select * from ksql_python_test_test_get_dependent_queries;"}],"fields":[{"name":"ROWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"ROWKEY","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"VIEWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"USERID","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"PAGEID","schema":{"type":"STRING","fields":null,"memberSchema":null}}],"type":"STREAM","key":"","timestamp":"","statistics":"","errorStats":"","extended":true,"format":"DELIMITED","topic":"KSQL_PYTHON_TEST_TEST_GET_DEPENDENT_QUERIES_AS","partitions":4,"replication":1}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:35 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/utils_test_get_stream_info.yml b/tests/vcr_cassettes_backup/utils_test_get_stream_info.yml
deleted file mode 100644
index a6378fb..0000000
--- a/tests/vcr_cassettes_backup/utils_test_get_stream_info.yml
+++ /dev/null
@@ -1,85 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_get_stream_info;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['68']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''KSQL_PYTHON_TEST_TEST_GET_STREAM_INFO'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED ksql_python_test_test_get_stream_info;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:44 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n ksql_python_test_test_get_stream_info;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['82']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n ksql_python_test_test_get_stream_info;","commandId":"stream/KSQL_PYTHON_TEST_TEST_GET_STREAM_INFO/drop","commandStatus":{"status":"SUCCESS","message":"Source
- KSQL_PYTHON_TEST_TEST_GET_STREAM_INFO does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:45 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE STREAM ksql_python_test_test_get_stream_info (viewtime
- bigint, userid varchar, pageid varchar) WITH (kafka_topic=''ksql_python_test_exist_topic'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['214']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE STREAM ksql_python_test_test_get_stream_info
- (viewtime bigint, userid varchar, pageid varchar) WITH
- (kafka_topic=''ksql_python_test_exist_topic'', value_format=''DELIMITED'');","commandId":"stream/KSQL_PYTHON_TEST_TEST_GET_STREAM_INFO/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:45 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_get_stream_info;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['68']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"sourceDescription","statementText":"DESCRIBE EXTENDED
- ksql_python_test_test_get_stream_info;","sourceDescription":{"name":"KSQL_PYTHON_TEST_TEST_GET_STREAM_INFO","readQueries":[],"writeQueries":[],"fields":[{"name":"ROWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"ROWKEY","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"VIEWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"USERID","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"PAGEID","schema":{"type":"STRING","fields":null,"memberSchema":null}}],"type":"STREAM","key":"","timestamp":"","statistics":"","errorStats":"","extended":true,"format":"DELIMITED","topic":"ksql_python_test_exist_topic","partitions":1,"replication":1}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:45 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
| client.ksql('show tables') returns error 'not all arguments converted during string formatting'
`from ksql import KSQLAPI
api_key = 'ZD74E3GRK4QXWO6W'
api_secret = 'RByQinKf4ZYodiBLuCKybx92SSPrQwEwnA8DOaVfJEhAVf3LQ096yFteZkep4XKx'
ksql_endpoint = 'https://pksqlc-42o7q.us-east-1.aws.confluent.cloud:443'
client = KSQLAPI(ksql_endpoint, api_key=api_key, secret=api_secret)
client.ksql('show tables')`
This code returns:
`not all arguments converted during string formatting`
The offending code is in line 108 of api.py
` base64string = base64.b64encode('{}:{}' % (self.api_key, self.secret))`
Other calls to client return the same error, such as
`client.query('select userid from users')` | 0.0 | dbd864e2f424805a7c3170dbdfe3723fe7aea403 | [
"tests/test_client.py::TestKSQLAPI::test_ksql_show_tables_with_api_key"
]
| [
"tests/test_client.py::TestKSQLAPI::test_bad_requests",
"tests/test_client.py::TestKSQLAPI::test_create_stream_as_with_conditions_with_startwith",
"tests/test_client.py::TestKSQLAPI::test_create_stream_as_with_conditions_with_startwith_with_and",
"tests/test_client.py::TestKSQLAPI::test_create_stream_as_with_conditions_without_startwith",
"tests/test_client.py::TestKSQLAPI::test_create_stream_as_without_conditions",
"tests/test_client.py::TestKSQLAPI::test_get_ksql_version_success",
"tests/test_client.py::TestKSQLAPI::test_get_properties",
"tests/test_client.py::TestKSQLAPI::test_get_url",
"tests/test_client.py::TestKSQLAPI::test_ksql_create_stream",
"tests/test_client.py::TestKSQLAPI::test_ksql_create_stream_as_with_wrong_timestamp",
"tests/test_client.py::TestKSQLAPI::test_ksql_create_stream_by_builder",
"tests/test_client.py::TestKSQLAPI::test_ksql_create_stream_by_builder_api",
"tests/test_client.py::TestKSQLAPI::test_ksql_server_healthcheck",
"tests/test_client.py::TestKSQLAPI::test_ksql_show_tables",
"tests/test_client.py::TestKSQLAPI::test_ksql_show_tables_with_no_semicolon",
"tests/test_client.py::TestKSQLAPI::test_raise_create_error_no_topic",
"tests/test_client.py::TestKSQLAPI::test_raise_create_error_topic_already_registered",
"tests/test_client.py::TestKSQLAPI::test_with_timeout"
]
| {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | 2020-08-18 17:34:25+00:00 | mit | 1,440 |
|
bskinn__sphobjinv-160 | diff --git a/doc/source/cli/convert.rst b/doc/source/cli/convert.rst
index 5d501ad..ac49179 100644
--- a/doc/source/cli/convert.rst
+++ b/doc/source/cli/convert.rst
@@ -158,7 +158,7 @@ If processing of JSON files by API URL is desirable, please
Path (or URL, if :option:`--url` is specified) to file to be converted.
If passed as ``-``, |soi| will attempt import of a plaintext or JSON
- inventory from ``stdin``.
+ inventory from ``stdin`` (incompatible with :option:`--url`).
.. option:: outfile
@@ -191,7 +191,8 @@ If processing of JSON files by API URL is desirable, please
.. option:: -u, --url
- Treat :option:`infile` as a URL for download.
+ Treat :option:`infile` as a URL for download. Cannot be used when
+ :option:`infile` is passed as ``-``.
.. option:: -e, --expand
diff --git a/doc/source/cli/suggest.rst b/doc/source/cli/suggest.rst
index 6ffccb8..67de7e2 100644
--- a/doc/source/cli/suggest.rst
+++ b/doc/source/cli/suggest.rst
@@ -67,7 +67,7 @@ If download of JSON files by URL is desirable, please
Path (or URL, if :option:`--url` is specified) to file to be searched.
If passed as ``-``, |soi| will attempt import of a plaintext or JSON
- inventory from ``stdin``.
+ inventory from ``stdin`` (incompatible with :option:`--url`).
.. option:: search
@@ -103,7 +103,8 @@ If download of JSON files by URL is desirable, please
.. option:: -u, --url
- Treat :option:`infile` as a URL for download.
+ Treat :option:`infile` as a URL for download. Cannot be used when
+ :option:`infile` is passed as ``-``.
diff --git a/src/sphobjinv/cli/core.py b/src/sphobjinv/cli/core.py
index b74fd66..9e06b0b 100644
--- a/src/sphobjinv/cli/core.py
+++ b/src/sphobjinv/cli/core.py
@@ -203,6 +203,8 @@ def main():
# These inventory-load functions should call
# sys.exit(n) internally in error-exit situations
if params[PrsConst.URL]:
+ if params[PrsConst.INFILE] == "-":
+ prs.error("argument -u/--url not allowed with '-' as infile")
inv, in_path = inv_url(params)
elif params[PrsConst.INFILE] == "-":
inv = inv_stdin(params)
diff --git a/src/sphobjinv/cli/parser.py b/src/sphobjinv/cli/parser.py
index ae3a47a..b034ba6 100644
--- a/src/sphobjinv/cli/parser.py
+++ b/src/sphobjinv/cli/parser.py
@@ -299,9 +299,8 @@ def getparser():
spr_convert.add_argument(
"-" + PrsConst.QUIET[0],
"--" + PrsConst.QUIET,
- help="Suppress printing of status messages "
- "and overwrite output files "
- "without prompting",
+ help="Suppress printing of status messages and "
+ "overwrite output files without prompting",
action="store_true",
)
@@ -309,7 +308,9 @@ def getparser():
spr_convert.add_argument(
"-" + PrsConst.URL[0],
"--" + PrsConst.URL,
- help="Treat 'infile' as a URL for download",
+ help="Treat 'infile' as a URL for download. Cannot be used with --{}.".format(
+ PrsConst.URL
+ ),
action="store_true",
)
@@ -358,7 +359,9 @@ def getparser():
spr_suggest.add_argument(
"-" + PrsConst.URL[0],
"--" + PrsConst.URL,
- help="Treat 'infile' as a URL for download",
+ help="Treat 'infile' as a URL for download. Cannot be used with --{}.".format(
+ PrsConst.URL
+ ),
action="store_true",
)
| bskinn/sphobjinv | c3b3b42779015826bcb5f4f1d44c025d2d9da30d | diff --git a/tests/test_cli.py b/tests/test_cli.py
index a122112..6c0e450 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -460,6 +460,12 @@ class TestFail:
file_url = "file:///" + str(in_path.resolve())
run_cmdline_test(["convert", "plain", "-u", file_url], expect=1)
+ def test_clifail_no_url_with_stdin(self, run_cmdline_test):
+ """Confirm parser exit when -u passed with "-" infile."""
+ with stdio_mgr() as (in_, out_, err_):
+ run_cmdline_test(["convert", "plain", "-u", "-"], expect=2)
+ assert "--url not allowed" in err_.getvalue()
+
class TestStdio:
"""Tests for the stdin/stdout functionality."""
| Revise CLI to make read-from-stdin incompatible with '-u'
Ambiguous/inconsistent set of inputs, otherwise.
- [x] Implementation
- [x] Tests
- [x] Docs | 0.0 | c3b3b42779015826bcb5f4f1d44c025d2d9da30d | [
"tests/test_cli.py::TestFail::test_clifail_no_url_with_stdin"
]
| [
"tests/test_cli.py::TestConvertGood::test_cli_convert_various_pathargs[src_path-no_dst_path-no_dst_name]",
"tests/test_cli.py::TestConvertGood::test_cli_convert_default_outname[inv--json]",
"tests/test_cli.py::TestConvertGood::test_cli_convert_various_pathargs[src_path-dst_path-no_dst_name]",
"tests/test_cli.py::TestConvertGood::test_cli_stdin_clobber",
"tests/test_cli.py::TestConvertGood::test_cli_convert_various_pathargs[no_src_path-dst_path-no_dst_name]",
"tests/test_cli.py::TestConvertGood::test_cli_convert_default_outname[txt--zlib]",
"tests/test_cli.py::TestConvertGood::test_cli_json_no_metadata_url",
"tests/test_cli.py::TestConvertGood::test_cli_convert_default_outname[json--zlib]",
"tests/test_cli.py::TestConvertGood::test_cli_convert_expandcontract",
"tests/test_cli.py::TestConvertGood::test_cli_convert_various_pathargs[src_path-dst_path-dst_name]",
"tests/test_cli.py::TestConvertGood::test_cli_convert_default_outname[txt--json]",
"tests/test_cli.py::TestConvertGood::test_cli_convert_cycle_formats[testall_inv_path21]",
"tests/test_cli.py::TestConvertGood::test_cli_convert_default_outname[inv--plain]",
"tests/test_cli.py::TestConvertGood::test_cli_convert_various_pathargs[no_src_path-no_dst_path-dst_name]",
"tests/test_cli.py::TestConvertGood::test_cli_convert_various_pathargs[no_src_path-dst_path-dst_name]",
"tests/test_cli.py::TestConvertGood::test_cli_json_export_import",
"tests/test_cli.py::TestConvertGood::test_cli_convert_default_outname[json--plain]",
"tests/test_cli.py::TestConvertGood::test_cli_convert_various_pathargs[src_path-no_dst_path-dst_name]",
"tests/test_cli.py::TestConvertGood::test_cli_convert_various_pathargs[no_src_path-no_dst_path-no_dst_name]",
"tests/test_cli.py::TestConvertGood::test_cli_overwrite_prompt_and_behavior",
"tests/test_cli.py::TestSuggestGood::test_cli_suggest_withscore",
"tests/test_cli.py::TestSuggestGood::test_cli_suggest_noresults",
"tests/test_cli.py::TestSuggestGood::test_cli_suggest_withindex",
"tests/test_cli.py::TestSuggestGood::test_cli_suggest_long_list[y\\n--t-57]",
"tests/test_cli.py::TestSuggestGood::test_cli_suggest_long_list[--at-56]",
"tests/test_cli.py::TestSuggestGood::test_cli_suggest_long_list[n\\n--t-1]",
"tests/test_cli.py::TestSuggestGood::test_cli_suggest_nameonly",
"tests/test_cli.py::TestSuggestGood::test_cli_suggest_withscoreandindex",
"tests/test_cli.py::TestSuggestGood::test_cli_suggest_many_results_stdin",
"tests/test_cli.py::TestStdio::test_cli_stdio_input[SourceTypes.BytesPlaintext]",
"tests/test_cli.py::TestStdio::test_cli_stdio_output[plain]",
"tests/test_cli.py::TestStdio::test_cli_stdio_input[SourceTypes.DictJSON]",
"tests/test_cli.py::TestStdio::test_cli_stdio_output[json]",
"tests/test_cli.py::TestStdio::test_cli_stdio_zlib_input_fails",
"tests/test_cli.py::TestStdio::test_cli_stdio_zlib_output_fails",
"tests/test_cli.py::TestFail::test_clifail_convert_localfile_as_url",
"tests/test_cli.py::TestFail::test_clifail_convert_badoutputdir",
"tests/test_cli.py::TestFail::test_clifail_convert_wrongfiletype",
"tests/test_cli.py::TestFail::test_clifail_convert_nosrc[False]",
"tests/test_cli.py::TestFail::test_clifail_convert_missingfile",
"tests/test_cli.py::TestFail::test_clifail_convert_pathonlysrc",
"tests/test_cli.py::TestFail::test_clifail_convert_nosrc[True]",
"tests/test_cli.py::TestMisc::test_cli_invocations[python",
"tests/test_cli.py::TestMisc::test_cli_invocations[sphobjinv]",
"tests/test_cli.py::TestMisc::test_cli_noargs_shows_help",
"tests/test_cli.py::TestMisc::test_cli_version_exits_ok"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2020-11-25 13:37:29+00:00 | mit | 1,441 |
|
bskinn__sphobjinv-226 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index c092f6e..f4349e4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project strives to adhere to
[Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+ * UnicodeDecodeErrors are ignored when converting inventories to ASCII.
### [2.2.1] - 2022-02-05
diff --git a/src/sphobjinv/_vendored/fuzzywuzzy/utils.py b/src/sphobjinv/_vendored/fuzzywuzzy/utils.py
index ab756b4..28ce02f 100644
--- a/src/sphobjinv/_vendored/fuzzywuzzy/utils.py
+++ b/src/sphobjinv/_vendored/fuzzywuzzy/utils.py
@@ -9,7 +9,7 @@ trans_table=bytes.maketrans(table_from.encode(), table_to.encode()) # B Skinn 2
def asciionly(s):
- return s.encode().translate(None, bad_chars).decode() # B Skinn 2021-12-11
+ return s.encode().translate(None, bad_chars).decode(errors='replace') # B Skinn 2021-12-11
# remove non-ASCII characters from strings
def asciidammit(s):
@@ -32,8 +32,4 @@ def validate_string(s):
def full_process(s):
s = asciidammit(s)
# B Skinn 2021-12-11
- return s.encode().translate(trans_table, bad_chars).decode().strip()
-
-
-
-
+ return s.encode().translate(trans_table, bad_chars).decode(errors='replace').strip()
| bskinn/sphobjinv | 2123ff8826228e411f87ea704e0cb2643048b2f0 | diff --git a/tests/test_api_good.py b/tests/test_api_good.py
index 8bb8910..c05e3b1 100644
--- a/tests/test_api_good.py
+++ b/tests/test_api_good.py
@@ -474,14 +474,6 @@ class TestInventory:
"""Confirm that a suggest operation works on all smoke-test inventories."""
inv = soi.Inventory(testall_inv_path)
- if "fonttools" in inv.project.lower():
- try:
- inv.suggest("class")
- except UnicodeDecodeError:
- pytest.xfail("Known unhandled bad character in decode operation")
- else: # pragma: no cover
- pytest.fail("'fonttools' was expected to fail, but didn't")
-
inv.suggest("class")
@pytest.mark.testall
| UnicodeDecodeError
**Brief description**
<!-- Clear and concise description of the bug. -->
Not clear if this is a bug/limitation in sphobjinv or if the objects.inv in question is messed up, but I get a UnicodeDecodeError for
```
sphobjinv suggest -u https://fonttools.readthedocs.io/en/latest/objects.inv TTFont
```
(and related versions of the call)
**Expected behavior**
<!-- Description of what you expected to happen. -->
A suggestion.
**Actual behavior**
<!-- Description of what actually happened. -->
```
No inventory at provided URL.
Attempting "https://fonttools.readthedocs.io/en/latest/objects.inv" ...
Remote inventory found.
Traceback (most recent call last):
File "/local/data1/miniconda3/bin/sphobjinv", line 8, in <module>
sys.exit(main())
File "/local/data1/miniconda3/lib/python3.8/site-packages/sphobjinv/cli/core.py", line 214, in main
do_suggest(inv, params)
File "/local/data1/miniconda3/lib/python3.8/site-packages/sphobjinv/cli/core.py", line 109, in do_suggest
results = inv.suggest(
File "/local/data1/miniconda3/lib/python3.8/site-packages/sphobjinv/inventory.py", line 530, in suggest
for match, score in fwp.extract(name, srch_list, limit=None)
File "/local/data1/miniconda3/lib/python3.8/site-packages/sphobjinv/_vendored/fuzzywuzzy/process.py", line 60, in extract
processed = processor(choice)
File "/local/data1/miniconda3/lib/python3.8/site-packages/sphobjinv/_vendored/fuzzywuzzy/process.py", line 51, in <lambda>
processor = lambda x: utils.asciidammit(x)
File "/local/data1/miniconda3/lib/python3.8/site-packages/sphobjinv/_vendored/fuzzywuzzy/utils.py", line 17, in asciidammit
return asciionly(s)
File "/local/data1/miniconda3/lib/python3.8/site-packages/sphobjinv/_vendored/fuzzywuzzy/utils.py", line 12, in asciionly
return s.encode().translate(None, bad_chars).decode() # B Skinn 2021-12-11
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe2 in position 21: invalid continuation byte
```
**To reproduce**
<!--
Steps to reproduce the behavior:
1. Open '...'
2. Type '...'
3. See error
-->
```
sphobjinv suggest -u https://fonttools.readthedocs.io/en/latest/objects.inv TTFont
```
**Attachments**
<!--
If possible, please paste in an inventory file that
demonstrates the bug, or provide a link to a relevant
inventory on web.
If relevant, paste in screenshot(s) demonstrating
the bug.
-->
**System information**
- Device:
- OS: CentOS 7
**Python environment**
*Python*
<!--
Please paste the output of `$ {python} --version --version`
executed in the relvant environment.
-->
Python 3.8.12 | packaged by conda-forge | (default, Jan 30 2022, 23:42:07)
[GCC 9.4.0]
*Libraries*
<!--
Please paste the output of `$ pip list` or `$ pip freeze`
executed in the relevant environment.
-->
Skipping this, let me know if required.
**Additional information**
<!-- Add any other context about the problem here. -->
| 0.0 | 2123ff8826228e411f87ea704e0cb2643048b2f0 | [
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[fonttools]"
]
| [
"tests/test_api_good.py::TestInventory::test_api_inventory_bytes_fname_instantiation[no_op--fname_zlib]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[psutil]",
"tests/test_api_good.py::TestInventory::test_api_inventory_flatdict_jsonvalidate[expand]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[opencv]",
"tests/test_api_good.py::TestInventory::test_api_inventory_bytes_fname_instantiation[no_op--zlib]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[pymongo]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[sqlalchemy]",
"tests/test_api_good.py::TestInventory::test_api_inventory_default_none_instantiation",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[cookiecutter]",
"tests/test_api_good.py::TestInventory::test_api_inventory_bytes_fname_instantiation[str--zlib]",
"tests/test_api_good.py::TestInventory::test_api_inventory_flatdict_jsonvalidate[contract]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[nltk]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[numpy]",
"tests/test_api_good.py::TestInventory::test_api_inventory_bytes_fname_instantiation[str--plaintext]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[pyserial]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[sklearn]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[h5py]",
"tests/test_api_good.py::TestInventory::test_api_inventory_flatdict_reimportwithmetadata[int]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[matplotlib]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[noinfo]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[pyexcel]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[mypy]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[beaker]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[pandas]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[scrapy]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[yt]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[pelican]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[eyeD3]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[cclib]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[requests]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[plone]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[jinja2]",
"tests/test_api_good.py::TestInventory::test_api_inventory_flatdict_reimport",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[faker]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[mistune]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[tinydb]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[coverage]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[attrs]",
"tests/test_api_good.py::TestInventory::test_api_inventory_bytes_fname_instantiation[str--fname_zlib]",
"tests/test_api_good.py::TestInventory::test_api_inventory_bytes_fname_instantiation[str--fname_plain]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[django_channels]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[gspread]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[hypothesis]",
"tests/test_api_good.py::TestInventory::test_api_inventory_flatdict_jsonvalidate[none]",
"tests/test_api_good.py::TestInventory::test_api_inventory_namesuggest",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[click]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[scapy]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[attrs_20_3_0]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[NAPALM]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[sarge]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[pytest]",
"tests/test_api_good.py::TestInventory::test_api_inventory_datafile_gen_and_reimport[attrs]",
"tests/test_api_good.py::TestInventory::test_api_inventory_bytes_fname_instantiation[no_op--plaintext]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[python]",
"tests/test_api_good.py::TestInventory::test_api_inventory_toosmallflatdict_importbutignore",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[jsonschema]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[pdfminer]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[tox]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[mkdoc_zlib0]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[celery]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[noproject]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[bokeh]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[flask]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[bootstrap_datepicker]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[twython]",
"tests/test_api_good.py::TestInventory::test_api_inventory_flatdict_reimportwithmetadata[str]",
"tests/test_api_good.py::TestInventory::test_api_inventory_equality",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[pyqt]",
"tests/test_api_good.py::TestInventory::test_api_inventory_one_object_flatdict",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[scipy]",
"tests/test_api_good.py::TestInventory::test_api_inventory_bytes_fname_instantiation[no_op--fname_plain]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[pygame]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[flake8]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[pingo]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[sympy]",
"tests/test_api_good.py::TestInventory::test_api_inventory_flatdict_reimportwithmetadata[dict]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[rocket]",
"tests/test_api_good.py::TestInventory::test_api_inventory_suggest_operation[sphinx]",
"tests/test_api_good.py::TestInventory::test_api_inventory_matches_sphinx_ifile[attrs]",
"tests/test_api_good.py::TestDataObj::test_api_dataobj_equality",
"tests/test_api_good.py::TestDataObj::test_api_dataobj_datalinefxn[False-True---byte_lines]",
"tests/test_api_good.py::TestDataObj::test_api_dataobjstr_flatdictfxn",
"tests/test_api_good.py::TestDataObj::test_api_dataobjbytes_init",
"tests/test_api_good.py::TestDataObj::test_api_dataobjstr_init",
"tests/test_api_good.py::TestDataObj::test_api_dataobj_datalinefxn[True-True---str_lines]",
"tests/test_api_good.py::TestDataObj::test_api_dataobj_evolvename[use_bytes_True]",
"tests/test_api_good.py::TestDataObj::test_api_dataobj_datalinefxn[False-False---byte_lines]",
"tests/test_api_good.py::TestDataObj::test_api_dataobj_datalinefxn[True-True---byte_lines]",
"tests/test_api_good.py::TestDataObj::test_api_dataobj_datalinefxn[True-False---byte_lines]",
"tests/test_api_good.py::TestDataObj::test_api_dataobjbytes_flatdictfxn",
"tests/test_api_good.py::TestDataObj::test_api_dataobj_datalinefxn[False-True---str_lines]",
"tests/test_api_good.py::TestDataObj::test_api_dataobj_evolvename[use_bytes_False]",
"tests/test_api_good.py::TestDataObj::test_api_dataobj_datalinefxn[True-False---str_lines]",
"tests/test_api_good.py::TestDataObj::test_api_dataobj_datalinefxn[False-False---str_lines]",
"tests/test_api_good.py::TestCore::test_source_types_iteration[url-url]",
"tests/test_api_good.py::TestCore::test_source_types_iteration[dict_json-dict_json]",
"tests/test_api_good.py::TestCore::test_source_types_iteration[manual-manual]",
"tests/test_api_good.py::TestCore::test_source_types_iteration[bytes_zlib-bytes_zlib]",
"tests/test_api_good.py::TestCore::test_api_compress[no_op]",
"tests/test_api_good.py::TestCore::test_api_compress[str]",
"tests/test_api_good.py::TestCore::test_flatdict_schema_valid",
"tests/test_api_good.py::TestCore::test_api_data_regex[-3-datadict1]",
"tests/test_api_good.py::TestCore::test_source_types_iteration[bytes_plain-bytes_plain]",
"tests/test_api_good.py::TestCore::test_source_types_iteration[fname_plain-fname_plain]",
"tests/test_api_good.py::TestCore::test_source_types_iteration[fname_zlib-fname_zlib]",
"tests/test_api_good.py::TestCore::test_api_data_regex[0-datadict0]",
"tests/test_api_good.py::TestCore::test_api_decompress[no_op]",
"tests/test_api_good.py::TestCore::test_api_compress_win_eols",
"tests/test_api_good.py::TestCore::test_api_decompress[str]"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2022-03-15 15:43:41+00:00 | mit | 1,442 |
|
bsquizz__ocdeployer-38 | diff --git a/ocdeployer/env.py b/ocdeployer/env.py
index 7ef3e7a..c644e9a 100644
--- a/ocdeployer/env.py
+++ b/ocdeployer/env.py
@@ -277,15 +277,12 @@ class LegacyEnvConfigHandler(EnvConfigHandler):
def _get_env_name(file_path):
return os.path.splitext(os.path.basename(file_path))[0]
- def __init__(self, env_files):
+ def __init__(self, env_files, env_dir_name="env"):
self.env_files = env_files
- self._last_service_set = None
_env_names = [self._get_env_name(fp) for fp in self.env_files]
- self.env_names = _dedupe_preserve_order(_env_names)
- if len(_env_names) != len(self.env_names):
- log.warning("Duplicate env names provided: %s", _env_names)
+ super().__init__(_env_names, env_dir_name)
- def _load_vars_per_env(self):
+ def _load_vars_per_env(self, path=None):
data = {}
for file_path in self.env_files:
diff --git a/ocdeployer/templates.py b/ocdeployer/templates.py
index 005fea1..4560693 100644
--- a/ocdeployer/templates.py
+++ b/ocdeployer/templates.py
@@ -160,6 +160,17 @@ class Template(object):
with open(self.path, "r") as f:
return self._jinja_safe(f.read())
+ @staticmethod
+ def _format_oc_parameter(param_value):
+ """
+ Hack around yaml dump behaviour for different datatypes
+ Examples:
+ yaml.dump(True) -> 'true\n...\n'
+ yaml.dump('True') -> "'True'\n"
+ yaml.dump('123') -> "'123'\n"
+ """
+ return yaml.dump(param_value).replace("\n...\n", "").strip()
+
def _process_via_oc(self, content, parameters=None, label=None):
"""
Run 'oc process' on the template and update content with the processed output
@@ -176,10 +187,11 @@ class Template(object):
if not parameters:
parameters = {}
- # Create set of param strings to pass into 'oc process'
params_and_vals = {}
for param_name, param_value in parameters.items():
- params_and_vals[param_name] = "{}={}".format(param_name, param_value)
+ params_and_vals[param_name] = "{}={}".format(
+ param_name, self._format_oc_parameter(param_value)
+ )
extra_args = []
# Only insert the parameter if it was defined in the template
diff --git a/requirements.txt b/requirements.txt
index 9630002..8547cb2 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-PyYAML
+PyYAML==5.3
sh
prompter
click
| bsquizz/ocdeployer | c121a6ac9286389fb33a67190e7ecfbb9544e5ce | diff --git a/tests/test_deploy.py b/tests/test_deploy.py
index ae53a1e..fcff4f4 100644
--- a/tests/test_deploy.py
+++ b/tests/test_deploy.py
@@ -10,7 +10,7 @@ def patched_runner(env_values, mock_load_vars_per_env, legacy=False):
if not env_values:
handler = None
elif legacy:
- handler = LegacyEnvConfigHandler(env_files=env_values)
+ handler = LegacyEnvConfigHandler(env_files=env_values, env_dir_name="envTEST")
handler.env_names = env_values
else:
handler = EnvConfigHandler(env_names=env_values, env_dir_name="envTEST")
diff --git a/tests/test_templates.py b/tests/test_templates.py
new file mode 100644
index 0000000..d55d6fe
--- /dev/null
+++ b/tests/test_templates.py
@@ -0,0 +1,19 @@
+import pytest
+
+from ocdeployer.templates import Template
+
+
[email protected](
+ 'value,expected',
+ (
+ (True, 'true'),
+ ('True', "'True'"),
+ ('true', "'true'"),
+ ('123', "'123'"),
+ (123, '123'),
+ ('123:123:123', '123:123:123'),
+ ('some text', 'some text')
+ )
+)
+def test_template_oc_param_format(value, expected):
+ assert Template._format_oc_parameter(value) == expected
| AttributeError: 'LegacyEnvConfigHandler' object has no attribute 'env_dir_name'
ocdeployer deploy -f --sets vmaas --template-dir buildfactory -e builder-env.yml vmaas-qe --secrets-local-dir secrets/sanitized
```
INFO:ocdeployer.deploy:Handling config for service set 'vmaas'
Traceback (most recent call last):
File "/iqe_venv/bin/ocdeployer", line 8, in <module>
sys.exit(main())
File "/iqe_venv/lib64/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/iqe_venv/lib64/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/iqe_venv/lib64/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/iqe_venv/lib64/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/iqe_venv/lib64/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/iqe_venv/lib64/python3.6/site-packages/ocdeployer/__main__.py", line 375, in deploy_to_project
dry_run=False,
File "/iqe_venv/lib64/python3.6/site-packages/ocdeployer/deploy.py", line 523, in run
all_processed_templates[service_set] = self._deploy_service_set(service_set)
File "/iqe_venv/lib64/python3.6/site-packages/ocdeployer/deploy.py", line 430, in _deploy_service_set
set_cfg = self._get_service_set_cfg(service_set, dir_path)
File "/iqe_venv/lib64/python3.6/site-packages/ocdeployer/deploy.py", line 421, in _get_service_set_cfg
set_env_cfg = self.env_config_handler.get_service_set_env_cfg(dir_path, service_set)
File "/iqe_venv/lib64/python3.6/site-packages/ocdeployer/env.py", line 190, in get_service_set_env_cfg
self._get_service_set_vars(service_set_dir, service_set), service_set=service_set
File "/iqe_venv/lib64/python3.6/site-packages/ocdeployer/env.py", line 139, in _get_service_set_vars
path = os.path.join(service_set_dir, self.env_dir_name)
AttributeError: 'LegacyEnvConfigHandler' object has no attribute 'env_dir_name'
``` | 0.0 | c121a6ac9286389fb33a67190e7ecfbb9544e5ce | [
"tests/test_deploy.py::test__get_variables_sanity[legacy=true]",
"tests/test_deploy.py::test__get_variables_merge_from_global[legacy=true]",
"tests/test_deploy.py::test__get_variables_service_overwrite_parameter[legacy=true]",
"tests/test_deploy.py::test__get_variables_service_overwrite_variable[legacy=true]",
"tests/test_deploy.py::test__get_variables_component_overwrite_parameter[legacy=true]",
"tests/test_deploy.py::test__get_variables_component_overwrite_variable[legacy=true]",
"tests/test_deploy.py::test__get_variables_multiple_envs_legacy",
"tests/test_templates.py::test_template_oc_param_format[True-true]",
"tests/test_templates.py::test_template_oc_param_format[True-'True']",
"tests/test_templates.py::test_template_oc_param_format[true-'true']",
"tests/test_templates.py::test_template_oc_param_format[123-'123']",
"tests/test_templates.py::test_template_oc_param_format[123-123]",
"tests/test_templates.py::test_template_oc_param_format[123:123:123-123:123:123]",
"tests/test_templates.py::test_template_oc_param_format[some"
]
| [
"tests/test_deploy.py::test_cfg_no_env_given",
"tests/test_deploy.py::test_cfg_no_env_cfg",
"tests/test_deploy.py::test_cfg_base_env_cfg",
"tests/test_deploy.py::test_cfg_set_env_cfg",
"tests/test_deploy.py::test__no_env_given",
"tests/test_deploy.py::test__get_variables_sanity[legacy=false]",
"tests/test_deploy.py::test__get_variables_merge_from_global[legacy=false]",
"tests/test_deploy.py::test__get_variables_service_overwrite_parameter[legacy=false]",
"tests/test_deploy.py::test__get_variables_service_overwrite_variable[legacy=false]",
"tests/test_deploy.py::test__get_variables_component_overwrite_parameter[legacy=false]",
"tests/test_deploy.py::test__get_variables_component_overwrite_variable[legacy=false]",
"tests/test_deploy.py::test__get_variables_base_and_service_set",
"tests/test_deploy.py::test__get_variables_service_set_only",
"tests/test_deploy.py::test__get_variables_service_set_overrides",
"tests/test_deploy.py::test__get_variables_multiple_envs",
"tests/test_deploy.py::test__get_variables_multiple_envs_precedence",
"tests/test_deploy.py::test__get_variables_multiple_envs_precedence_reversed"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2020-02-18 13:59:57+00:00 | mit | 1,443 |
|
bst-mug__acres-101 | diff --git a/acres/fastngram/fastngram.py b/acres/fastngram/fastngram.py
new file mode 100644
index 0000000..276b87d
--- /dev/null
+++ b/acres/fastngram/fastngram.py
@@ -0,0 +1,109 @@
+"""
+A faster version of n-gram matching that uses dictionaries for speed-up.
+"""
+
+from collections import OrderedDict
+from typing import List, Dict, Set, Tuple, Iterator
+
+from acres.preprocess import resource_factory
+
+
+class ContextMap:
+ """
+ A map of contexts to center words.
+ """
+
+ def __init__(self) -> None:
+ self.map = {} # type: Dict[Tuple[str, str], Set[str]]
+
+ def add_context(self, center: str, left_context: str, right_context: str) -> None:
+ """
+ Add a center n-gram with a context.
+
+ :param center:
+ :param left_context:
+ :param right_context:
+ :return:
+ """
+ context = (left_context, right_context)
+ self.map.setdefault(context, set())
+ self.map[context].add(center)
+
+ def centers(self, left_context: str, right_context: str) -> Set[str]:
+ """
+ Find center n-grams that happen on a given context.
+
+ :param left_context:
+ :param right_context:
+ :return:
+ """
+ context = (left_context, right_context)
+ if context not in self.map:
+ return set()
+ return self.map[context]
+
+
+def expandn(acronym: str, left_context: str = "", right_context: str = "") -> Iterator[str]:
+ """
+ Find an unlimited set of expansion candidates for an acronym given its left and right context. \
+ Note that no filtering is done here.
+
+ :param acronym: Not used.
+ :param left_context:
+ :param right_context:
+ :return:
+ """
+ model = resource_factory.get_fastngram()
+
+ # TODO support for n-grams (n > 1). May need an OrderedDict.
+ count_map = model[1]
+ for freq, context_map in count_map.items():
+ # TODO require a min_freq?
+ center_ngrams = context_map.centers(left_context, right_context)
+ for ngram in center_ngrams:
+ yield ngram
+
+
+def expand(acronym: str, left_context: str = "", right_context: str = "") -> List[str]:
+ """
+ Find a limited set of expansion candidates for an acronym given its left and right context.
+
+ :param acronym:
+ :param left_context:
+ :param right_context:
+ :return:
+ """
+ # Limit expansions while we don't use generators downstream
+ # TODO 1k may not be enough if we're not doing ANY filtering here (e.g. initial).
+ # https://github.com/bst-mug/acres/issues/28
+ limit = 1000
+ i = 0
+ ret = [] # type: List[str]
+ for ngram in expandn(acronym, left_context, right_context):
+ ret.append(ngram)
+ i += 1
+ if i > limit:
+ break
+ return ret
+
+
+def optimizer(ngrams: Dict[str, int]) -> 'Dict[int, OrderedDict[int, ContextMap]]':
+ """
+ Create a search-optimized represenation of an ngram-list.
+
+ :param ngrams:
+ :return:
+ """
+ model = {} # type: Dict[int, OrderedDict[int, ContextMap]]
+
+ # Ensure ngrams are ordered by decreasing frequency.
+ sorted_ngrams = sorted(ngrams.items(), key=lambda x: x[1], reverse=True)
+
+ for ngram, freq in sorted_ngrams:
+ # size = len(ngram.split(" "))
+ context = ContextMap()
+ context.add_context(ngram, "", "")
+ model.setdefault(1, OrderedDict())
+ model[1][freq] = context
+
+ return model
diff --git a/acres/preprocess/resource_factory.py b/acres/preprocess/resource_factory.py
index 9ca2824..6f1d3fe 100644
--- a/acres/preprocess/resource_factory.py
+++ b/acres/preprocess/resource_factory.py
@@ -6,10 +6,12 @@ This module provides methods for lazily loading resources.
import logging
import os.path
import pickle
+from collections import OrderedDict
from typing import Dict, Set, List, Tuple, Any
from gensim.models import Word2Vec
+from acres.fastngram import fastngram
from acres.nn import train
from acres.preprocess import dumps
from acres.stats import dictionary
@@ -37,6 +39,7 @@ NGRAMSTAT = {} # type: Dict[int, Tuple[int,str]]
CHARACTER_NGRAMS = {} # type: Dict[str, int]
WORD_NGRAMS = {} # type: Dict[str, int]
DICTIONARY = {} # type: Dict[str, List[str]]
+FAST_NGRAM = {} # type: Dict[int, OrderedDict[int, Dict[str, Set[str]]]]
def get_log_corpus_filename() -> str:
@@ -303,6 +306,22 @@ def get_dictionary() -> Dict[str, List[str]]:
return DICTIONARY
+def get_fastngram() -> 'Dict[int, OrderedDict[int, fastngram.ContextMap]]':
+ """
+ Lazy load the fast n-gram model.
+
+ :return:
+ """
+ global FAST_NGRAM
+
+ if not FAST_NGRAM:
+ word_ngrams = get_word_ngrams()
+ logger.info("Optimizing ngrams...")
+ FAST_NGRAM = fastngram.optimizer(word_ngrams)
+
+ return FAST_NGRAM
+
+
def reset() -> None:
"""
Resets global variables to force model recreation.
diff --git a/acres/resolution/resolver.py b/acres/resolution/resolver.py
index e9c7470..162cafc 100644
--- a/acres/resolution/resolver.py
+++ b/acres/resolution/resolver.py
@@ -1,6 +1,7 @@
from enum import Enum
from typing import List, Dict, Tuple
+from acres.fastngram import fastngram
from acres.ngram import finder
from acres.nn import test
from acres.rater import rater
@@ -15,11 +16,13 @@ class Strategy(Enum):
NGRAM = 1
WORD2VEC = 2
DICTIONARY = 3
+ FASTNGRAM = 4
NGRAM_CACHE = {} # type: Dict[Tuple, List[str]]
WORD2VEC_CACHE = {} # type: Dict[Tuple, List[str]]
DICTIONARY_CACHE = {} # type: Dict[Tuple, List[str]]
+FASTNGRAM_CACHE = {} # type: Dict[Tuple, List[str]]
def cached_resolve(acronym: str, left_context: str, right_context: str,
@@ -39,7 +42,8 @@ def cached_resolve(acronym: str, left_context: str, right_context: str,
switcher = {
Strategy.NGRAM: NGRAM_CACHE,
Strategy.WORD2VEC: WORD2VEC_CACHE,
- Strategy.DICTIONARY: DICTIONARY_CACHE
+ Strategy.DICTIONARY: DICTIONARY_CACHE,
+ Strategy.FASTNGRAM: FASTNGRAM_CACHE
}
cache = switcher.get(strategy)
@@ -90,7 +94,8 @@ def resolve(acronym: str, left_context: str, right_context: str, strategy: Strat
switcher = {
Strategy.NGRAM: finder.robust_find_embeddings,
Strategy.WORD2VEC: test.find_candidates,
- Strategy.DICTIONARY: dictionary.expand
+ Strategy.DICTIONARY: dictionary.expand,
+ Strategy.FASTNGRAM: fastngram.expand,
}
func = switcher.get(strategy)
| bst-mug/acres | c102aa63d432cc2cddffb29311344e55c6175045 | diff --git a/tests/conftest.py b/tests/conftest.py
index 9d700a5..7cce7bd 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,4 +1,5 @@
-import pytest, os
+import os
+import pytest
from acres.preprocess import resource_factory
@@ -49,6 +50,23 @@ def ngramstat():
resource_factory.NGRAMSTAT = old
[email protected](scope="module")
+def word_ngrams():
+ # Setup: save current one and assign a fake one
+ old = resource_factory.WORD_NGRAMS
+ resource_factory.WORD_NGRAMS = {"EKG": 500,
+ "Elektrokardiogramm": 200,
+ "performed EKG yesterday": 50,
+ "performed Elektrokardiogramm yesterday": 20,
+ "performed Elektro kardiogramm yesterday": 10, # sic
+ "performed Effusion yesterday": 5
+ }
+ yield resource_factory.WORD_NGRAMS
+
+ # Teardown: revert back to old
+ resource_factory.WORD_NGRAMS = old
+
+
@pytest.fixture(scope="module")
def index():
# Setup: save current one and assign a fake one
diff --git a/tests/fastngram/test_fastngram.py b/tests/fastngram/test_fastngram.py
new file mode 100644
index 0000000..36d7033
--- /dev/null
+++ b/tests/fastngram/test_fastngram.py
@@ -0,0 +1,6 @@
+from acres.fastngram.fastngram import expand
+
+
+def test_expand(word_ngrams):
+ expansions = expand("EKG")
+ assert "Elektrokardiogramm" in expansions
| Baseline: most frequent n-gram that passes filtering
Should also support multi-word acronyms. | 0.0 | c102aa63d432cc2cddffb29311344e55c6175045 | [
"[100%]",
"tests/fastngram/test_fastngram.py::test_expand"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2019-08-30 17:04:45+00:00 | apache-2.0 | 1,444 |
|
btel__svg_utils-104 | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 15831e0..73321ce 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/psf/black
- rev: 20.8b1
+ rev: 22.8.0
hooks:
- id: black
language_version: python3
diff --git a/src/svgutils/common.py b/src/svgutils/common.py
new file mode 100644
index 0000000..9928d72
--- /dev/null
+++ b/src/svgutils/common.py
@@ -0,0 +1,59 @@
+import re
+
+
+class Unit:
+ """Implementation of SVG units and conversions between them.
+
+ Parameters
+ ----------
+ measure : str
+ value with unit (for example, '2cm')
+ """
+
+ per_inch = {"px": 90, "cm": 2.54, "mm": 25.4, "pt": 72.0}
+
+ def __init__(self, measure):
+ try:
+ self.value = float(measure)
+ self.unit = "px"
+ except ValueError:
+ m = re.match("([0-9]+\.?[0-9]*)([a-z]+)", measure)
+ value, unit = m.groups()
+ self.value = float(value)
+ self.unit = unit
+
+ def to(self, unit):
+ """Convert to a given unit.
+
+ Parameters
+ ----------
+ unit : str
+ Name of the unit to convert to.
+
+ Returns
+ -------
+ u : Unit
+ new Unit object with the requested unit and computed value.
+ """
+ u = Unit("0cm")
+ u.value = self.value / self.per_inch[self.unit] * self.per_inch[unit]
+ u.unit = unit
+ return u
+
+ def __str__(self):
+ return "{}{}".format(self.value, self.unit)
+
+ def __repr__(self):
+ return "Unit({})".format(str(self))
+
+ def __mul__(self, number):
+ u = Unit("0cm")
+ u.value = self.value * number
+ u.unit = self.unit
+ return u
+
+ def __truediv__(self, number):
+ return self * (1.0 / number)
+
+ def __div__(self, number):
+ return self * (1.0 / number)
diff --git a/src/svgutils/compose.py b/src/svgutils/compose.py
index 8a30a3b..0932fe5 100644
--- a/src/svgutils/compose.py
+++ b/src/svgutils/compose.py
@@ -15,9 +15,9 @@ Features:
"""
import os
-import re
from svgutils import transform as _transform
+from svgutils.common import Unit
CONFIG = {
"svg.file_path": ".",
@@ -358,61 +358,3 @@ class Figure(Panel):
if iy > nrows:
break
return self
-
-
-class Unit:
- """Implementation of SVG units and conversions between them.
-
- Parameters
- ----------
- measure : str
- value with unit (for example, '2cm')
- """
-
- per_inch = {"px": 90, "cm": 2.54, "mm": 25.4, "pt": 72.0}
-
- def __init__(self, measure):
- try:
- self.value = float(measure)
- self.unit = "px"
- except ValueError:
- m = re.match("([0-9]+\.?[0-9]*)([a-z]+)", measure)
- value, unit = m.groups()
- self.value = float(value)
- self.unit = unit
-
- def to(self, unit):
- """Convert to a given unit.
-
- Parameters
- ----------
- unit : str
- Name of the unit to convert to.
-
- Returns
- -------
- u : Unit
- new Unit object with the requested unit and computed value.
- """
- u = Unit("0cm")
- u.value = self.value / self.per_inch[self.unit] * self.per_inch[unit]
- u.unit = unit
- return u
-
- def __str__(self):
- return "{}{}".format(self.value, self.unit)
-
- def __repr__(self):
- return "Unit({})".format(str(self))
-
- def __mul__(self, number):
- u = Unit("0cm")
- u.value = self.value * number
- u.unit = self.unit
- return u
-
- def __truediv__(self, number):
- return self * (1.0 / number)
-
- def __div__(self, number):
- return self * (1.0 / number)
diff --git a/src/svgutils/transform.py b/src/svgutils/transform.py
index ef15f9e..5cddfa3 100644
--- a/src/svgutils/transform.py
+++ b/src/svgutils/transform.py
@@ -7,6 +7,8 @@ try:
except ImportError:
from io import StringIO
+from svgutils.common import Unit
+
SVG_NAMESPACE = "http://www.w3.org/2000/svg"
XLINK_NAMESPACE = "http://www.w3.org/1999/xlink"
SVG = "{%s}" % SVG_NAMESPACE
@@ -239,17 +241,10 @@ class SVGFigure(object):
self._height = 0
if width:
- try:
- self.width = width # this goes to @width.setter a few lines down
- except AttributeError:
- # int or str
- self._width = width
+ self.width = width # this goes to @width.setter a few lines down
if height:
- try:
- self.height = height # this goes to @height.setter a few lines down
- except AttributeError:
- self._height = height
+ self.height = height # this goes to @height.setter a few lines down
@property
def width(self):
@@ -258,6 +253,8 @@ class SVGFigure(object):
@width.setter
def width(self, value):
+ if not isinstance(value, Unit):
+ value = Unit(value)
self._width = value.value
self.root.set("width", str(value))
self.root.set("viewBox", "0 0 %s %s" % (self._width, self._height))
@@ -269,6 +266,8 @@ class SVGFigure(object):
@height.setter
def height(self, value):
+ if not isinstance(value, Unit):
+ value = Unit(value)
self._height = value.value
self.root.set("height", str(value))
self.root.set("viewBox", "0 0 %s %s" % (self._width, self._height))
| btel/svg_utils | 4abf7fb18cea0da04b6b2a0bcacbec5daded1662 | diff --git a/tests/test_transform.py b/tests/test_transform.py
index 7074ea6..ca449da 100644
--- a/tests/test_transform.py
+++ b/tests/test_transform.py
@@ -75,3 +75,10 @@ def test_svg_figure_writes_width_height_and_view_box():
assert 'width="400.0mm"' in written_content
assert 'height="300.0mm"' in written_content
assert 'viewBox="0 0 400.0 300.0"' in written_content
+
+
+def test_svg_figure__width_height_tostr():
+
+ svg_fig = transform.SVGFigure("400px", "300px")
+ assert b'height="300.0px"' in svg_fig.to_str()
+ assert b'width="400.0px"' in svg_fig.to_str()
| SVGFigure does not set width and height element if created directly
Width and height aren't correctly set in the XML if `transform.SVGFigure` is created directly:
```python
import svgutils
svgutils.transform.SVGFigure("10cm", "16cm").to_str()
```
prints
```python
b'<?xml version=\'1.0\' encoding=\'ASCII\' standalone=\'yes\'?>\n<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1"/>\n'
```
This code is used in the tutorial:
https://github.com/btel/svg_utils/blob/4abf7fb18cea0da04b6b2a0bcacbec5daded1662/docs/source/tutorials/scripts/fig_final.py#L5
Therefore I expect that this is not an intended behavior. | 0.0 | 4abf7fb18cea0da04b6b2a0bcacbec5daded1662 | [
"tests/test_transform.py::test_svg_figure__width_height_tostr"
]
| [
"tests/test_transform.py::test_get_size",
"tests/test_transform.py::test_group_class",
"tests/test_transform.py::test_skew",
"tests/test_transform.py::test_scale_xy",
"tests/test_transform.py::test_create_svg_figure",
"tests/test_transform.py::test_svg_figure_writes_width_height_and_view_box"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2022-09-27 20:43:50+00:00 | mit | 1,445 |
|
bttner__kanelbulle-5 | diff --git a/kanelbulle/config/__init__.py b/kanelbulle/config/__init__.py
new file mode 100644
index 0000000..c3a0993
--- /dev/null
+++ b/kanelbulle/config/__init__.py
@@ -0,0 +1,1 @@
+"""Modules related to config"""
diff --git a/kanelbulle/config/config.py b/kanelbulle/config/config.py
new file mode 100644
index 0000000..863bed8
--- /dev/null
+++ b/kanelbulle/config/config.py
@@ -0,0 +1,32 @@
+"""Configuration storage."""
+
+import json
+
+
+class Container:
+ """Load and store configurations.
+
+ Attributes:
+ data: configuration data (dict).
+ error: error message (str).
+ """
+
+ def __init__(self):
+ self.data = None
+ self.error = "No error"
+ self.load()
+
+ def load(self):
+ """Load configurations from file."""
+ try:
+ with open("arms/config/config.json", 'r') as stream:
+ self.data = json.load(stream)
+ except OSError as e:
+ self.data = None
+ self.error = "OSError - " + str(e)
+ except ValueError as e:
+ self.data = None
+ self.error = "ValueError - " + str(e)
+
+
+var = Container()
| bttner/kanelbulle | 6588a2b8f09fe8c96088be6073cc47f268e152e7 | diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py
new file mode 100644
index 0000000..ddc30fd
--- /dev/null
+++ b/tests/unit/test_config.py
@@ -0,0 +1,35 @@
+"""Tests for kanelbulle.config.config."""
+
+from unittest import mock
+from kanelbulle.config import config
+
+
+def test_var_is_container():
+ """The variable var shall be an instance of the class Container."""
+ assert isinstance(config.var, config.Container) == True
+
+
[email protected](config, 'open')
+def test_load_os_error(mock_open):
+ """IF a system-related error occurs, WHEN the configuration file is
+ loaded, THEN the respective error message shall be stored and the data
+ field shall be empty.
+ """
+ mock_open.side_effect = OSError
+ config.var.data = "OSError"
+ config.var.load()
+ assert (config.var.data is None) == True
+ assert ("OSError" in config.var.error) == True
+
+
[email protected](config, 'open')
+def test_load_value_error(mock_open):
+ """IF a value error occurs, WHEN the configuration file is
+ loaded, THEN the respective error message shall be stored and the data
+ field shall be empty.
+ """
+ mock_open.side_effect = ValueError
+ config.var.data = "ValueError"
+ config.var.load()
+ assert (config.var.data is None) == True
+ assert ("ValueError" in config.var.error) == True
| Implement configuration
Requirements:
- Implement a possibility to load and store configuration data.
- Respective unit tests for the introduced method/class should be provided as well. | 0.0 | 6588a2b8f09fe8c96088be6073cc47f268e152e7 | [
"tests/unit/test_config.py::test_var_is_container",
"tests/unit/test_config.py::test_load_os_error",
"tests/unit/test_config.py::test_load_value_error"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
} | 2018-10-25 16:38:14+00:00 | apache-2.0 | 1,446 |
|
bttner__kanelbulle-6 | diff --git a/kanelbulle/utils/log.py b/kanelbulle/utils/log.py
new file mode 100644
index 0000000..47e1c69
--- /dev/null
+++ b/kanelbulle/utils/log.py
@@ -0,0 +1,75 @@
+"""Utilities related to logging."""
+
+import logging
+from kanelbulle.config import config
+
+# Log format to use.
+FORMATTER = logging.Formatter('%(asctime)s | %(name)-10s | '
+ '%(levelname)-8s | %(message)s', '%Y-%m-%d %H:%M')
+
+# Log levels to use.
+LOG_LEVELS = {
+ 'CRITICAL': logging.CRITICAL,
+ 'ERROR': logging.ERROR,
+ 'WARNING': logging.WARNING,
+ 'INFO': logging.INFO,
+ 'DEBUG': logging.DEBUG,
+}
+
+# Load configurations from file. If it fails, take predefined values.
+ERROR = False
+FILE_LEVEL = logging.DEBUG
+CONSOLE_LEVEL = logging.WARNING
+
+try:
+ CONFIG = config.var.data['logger']
+ FILE_LEVEL = LOG_LEVELS[CONFIG['file']['level'].upper()]
+ CONSOLE_LEVEL = LOG_LEVELS[CONFIG['console']['level'].upper()]
+except (TypeError, KeyError):
+ ERROR = True
+
+# Initialize handlers.
+FILE = logging.FileHandler('kanelbulle/utils/log.log')
+FILE.setFormatter(FORMATTER)
+FILE.setLevel(FILE_LEVEL)
+
+CONSOLE = logging.StreamHandler()
+CONSOLE.setFormatter(FORMATTER)
+CONSOLE.setLevel(CONSOLE_LEVEL)
+
+
+def __init__():
+ """Reset log file or create a new one in case the respective file does not
+ exist.
+ """
+ wfile = logging.FileHandler('kanelbulle/utils/log.log', mode='w')
+ wfile.setLevel(logging.DEBUG)
+
+ logger = logging.getLogger(__name__)
+ logger.setLevel(logging.DEBUG)
+ logger.addHandler(wfile)
+ logger.info('Log file')
+ logger.info(90*'-')
+
+
+def get_logger(name):
+ """Set up a new logger.
+
+ Args:
+ name: name of the new logger (string).
+
+ Return:
+ Logger with specified name.
+ """
+ logger = logging.getLogger(name)
+ logger.setLevel(logging.DEBUG)
+ logger.addHandler(FILE)
+ logger.addHandler(CONSOLE)
+
+ return logger
+
+
+# The different loggers used (alphabetical order).
+app = get_logger('app')
+config = get_logger('config')
+log = get_logger('log')
| bttner/kanelbulle | 87c1c28af495abe5f3db4bce9d1c9762b5accc4b | diff --git a/tests/unit/test_log.py b/tests/unit/test_log.py
new file mode 100644
index 0000000..0217c0d
--- /dev/null
+++ b/tests/unit/test_log.py
@@ -0,0 +1,45 @@
+"""Tests for kanelbulle.utils.log."""
+
+import importlib
+import pytest
+from unittest import mock
+from kanelbulle.utils import log
+from kanelbulle.config import config
+
+config_none = None
+config_empty = {}
+config_no_logger = {"color": "blue"}
+config_no_file = {"logger": {"color": {"level": "wrong"}, "console": {
+ "level": "warning"}}}
+config_no_level = {"logger": {"file": {"color": "blue"}, "console": {
+ "color": "red"}}}
+config_wrong_logger = {"logger": "wrong"}
+config_wrong_file = {"logger": {"file": "wrong", "console": {
+ "level": "warning"}}}
+config_wrong_level = {"logger": {"file": {"level": "wrong"}, "console": {
+ "level": "warning"}}}
[email protected]('config_data', [config_none, config_empty,
+ config_no_logger, config_no_file,
+ config_no_level, config_wrong_logger,
+ config_wrong_file, config_wrong_level])
+def test_config_error(config_data):
+ """IF the configuration file does not include an entry for
+ logging or is incomplete, THEN a corresponding error value should be set to
+ True and predefined values should be used instead.
+ """
+ with mock.patch.object(config.var, 'data', config_data):
+ pre_file_level = log.FILE_LEVEL
+ pre_console_level = log.CONSOLE_LEVEL
+ importlib.reload(log)
+ assert log.ERROR == True
+ assert log.FILE_LEVEL == pre_file_level
+ assert log.CONSOLE_LEVEL == pre_console_level
+
+
[email protected](log, 'logging')
+def test_init_(mock_logging):
+ """The initialization of the log module shall overwrite the existing log
+ file or shall create a new one if a respective file does not exist.
+ """
+ log.__init__()
+ mock_logging.FileHandler.assert_called_with(mock.ANY, mode='w')
| Implement a simple logger
To track events that happen when the software runs, a simple logger should be implemented. Requirements:
- Can load settings via a _json_ file.
- Saves the output in a _log_ file.
- Can handle different loggers (one logger per file).
Sufficient tests should be included as well. | 0.0 | 87c1c28af495abe5f3db4bce9d1c9762b5accc4b | [
"tests/unit/test_log.py::test_config_error[None]",
"tests/unit/test_log.py::test_config_error[config_data1]",
"tests/unit/test_log.py::test_config_error[config_data2]",
"tests/unit/test_log.py::test_config_error[config_data3]",
"tests/unit/test_log.py::test_config_error[config_data4]",
"tests/unit/test_log.py::test_config_error[config_data5]",
"tests/unit/test_log.py::test_config_error[config_data6]",
"tests/unit/test_log.py::test_config_error[config_data7]",
"tests/unit/test_log.py::test_init_"
]
| []
| {
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
} | 2018-10-25 16:53:43+00:00 | apache-2.0 | 1,447 |
|
btubbs__sseclient-39 | diff --git a/sseclient.py b/sseclient.py
index ee7e16d..26e1b83 100644
--- a/sseclient.py
+++ b/sseclient.py
@@ -55,6 +55,8 @@ class SSEClient(object):
requester = self.session or requests
self.resp = requester.get(self.url, stream=True, **self.requests_kwargs)
self.resp_iterator = self.iter_content()
+ self.decoder = codecs.getincrementaldecoder(
+ self.resp.encoding)(errors='replace')
# TODO: Ensure we're handling redirects. Might also stick the 'origin'
# attribute on Events like the Javascript spec requires.
@@ -85,14 +87,12 @@ class SSEClient(object):
return self
def __next__(self):
- decoder = codecs.getincrementaldecoder(
- self.resp.encoding)(errors='replace')
while not self._event_complete():
try:
next_chunk = next(self.resp_iterator)
if not next_chunk:
raise EOFError()
- self.buf += decoder.decode(next_chunk)
+ self.buf += self.decoder.decode(next_chunk)
except (StopIteration, requests.RequestException, EOFError, six.moves.http_client.IncompleteRead) as e:
print(e)
| btubbs/sseclient | 7b81911f3a70a1b4cd65d2694f805aa4f5cc3b34 | diff --git a/test_sseclient.py b/test_sseclient.py
index bb0d767..73cf7a3 100644
--- a/test_sseclient.py
+++ b/test_sseclient.py
@@ -213,10 +213,35 @@ def test_client_sends_cookies():
s.cookies = RequestsCookieJar()
s.cookies['foo'] = 'bar'
with mock.patch('sseclient.requests.Session.send') as m:
+ m.return_value.encoding = "utf-8"
sseclient.SSEClient('http://blah.com', session=s)
prepared_request = m.call_args[0][0]
assert prepared_request.headers['Cookie'] == 'foo=bar'
[email protected]
+def unicode_multibyte_responses(monkeypatch):
+ content = join_events(
+ E(data='ööööööööööööööööööööööööööööööööööööööööööööööööööööööööö', id='first', retry='2000', event='blah'),
+ E(data='äääääääääääääääääääääääääääääääääääääääääääääääääääääääää', id='second', retry='4000', event='blerg'),
+ E(data='üüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüü', id='third'),
+ )
+ fake_get = mock.Mock(return_value=FakeResponse(200, content))
+ monkeypatch.setattr(requests, 'get', fake_get)
+
+ yield
+
+ fake_get.assert_called_once_with(
+ 'http://blah.com',
+ headers={'Accept': 'text/event-stream', 'Cache-Control': 'no-cache'},
+ stream=True)
+
[email protected]("unicode_multibyte_responses")
+def test_multiple_messages():
+ c = sseclient.SSEClient('http://blah.com',chunk_size=51)
+ assert next(c).data == 'ööööööööööööööööööööööööööööööööööööööööööööööööööööööööö'
+ assert next(c).data == 'äääääääääääääääääääääääääääääääääääääääääääääääääääääääää'
+ assert next(c).data == 'üüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüüü'
+
def test_event_stream():
"""Check whether event.data can be loaded."""
limit = 50
| Spurious replacement characters (U+FFFD) in events
The unicode decoder state is not saved across SSEClient.__next__() calls. Thus if the buffer ends in the middle of a multi-byte encoding the decoder fails to decode the char on the next call. | 0.0 | 7b81911f3a70a1b4cd65d2694f805aa4f5cc3b34 | [
"test_sseclient.py::test_multiple_messages"
]
| [
"test_sseclient.py::test_round_trip_parse",
"test_sseclient.py::test_no_colon",
"test_sseclient.py::test_no_space",
"test_sseclient.py::test_comment",
"test_sseclient.py::test_retry_is_integer",
"test_sseclient.py::test_default_event",
"test_sseclient.py::test_eols",
"test_sseclient.py::test_last_id_remembered",
"test_sseclient.py::test_retry_remembered",
"test_sseclient.py::test_extra_newlines_after_event",
"test_sseclient.py::test_simple_iteration",
"test_sseclient.py::test_client_sends_cookies",
"test_sseclient.py::test_event_stream"
]
| {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | 2019-12-15 11:57:23+00:00 | mit | 1,448 |
|
btubbs__sseclient-50 | diff --git a/README.rst b/README.rst
index 04e66ab..d424ec5 100644
--- a/README.rst
+++ b/README.rst
@@ -7,6 +7,11 @@ streams (also known as EventSource, after the name of the Javascript interface
inside browsers). The SSEClient class accepts a url on init, and is then an
iterator over messages coming from the server.
+Maintenance
+-----------
+
+I (btubbs) haven't been using this library in my own work for years, so I put limited time into maintaining it. I will check in on pull requests and issues once per month. If you are interested in providing more active support for the library, please reach out.
+
Installation
------------
diff --git a/setup.py b/setup.py
index 217f8e2..eb5d8d4 100644
--- a/setup.py
+++ b/setup.py
@@ -13,7 +13,7 @@ pytest_runner = ['pytest_runner>=2.1'] if needs_pytest else []
setup(
name='sseclient',
- version='0.0.26',
+ version='0.0.27',
author='Brent Tubbs',
author_email='[email protected]',
py_modules=['sseclient'],
diff --git a/sseclient.py b/sseclient.py
index d2193e2..3bacccf 100644
--- a/sseclient.py
+++ b/sseclient.py
@@ -14,7 +14,7 @@ import six
import requests
-__version__ = '0.0.26'
+__version__ = '0.0.27'
# Technically, we should support streams that mix line endings. This regex,
# however, assumes that a system will provide consistent line endings.
@@ -55,8 +55,8 @@ class SSEClient(object):
requester = self.session or requests
self.resp = requester.get(self.url, stream=True, **self.requests_kwargs)
self.resp_iterator = self.iter_content()
- self.decoder = codecs.getincrementaldecoder(
- self.resp.encoding)(errors='replace')
+ encoding = self.resp.encoding or self.resp.apparent_encoding
+ self.decoder = codecs.getincrementaldecoder(encoding)(errors='replace')
# TODO: Ensure we're handling redirects. Might also stick the 'origin'
# attribute on Events like the Javascript spec requires.
@@ -67,12 +67,13 @@ class SSEClient(object):
while True:
if hasattr(self.resp.raw, '_fp') and \
hasattr(self.resp.raw._fp, 'fp') and \
- hasattr(self.resp.raw._fp.fp, 'read1'):
+ hasattr(self.resp.raw._fp.fp, 'read1') and \
+ not self.resp.raw.chunked:
chunk = self.resp.raw._fp.fp.read1(self.chunk_size)
else:
- # _fp is not available, this means that we cannot use short
- # reads and this will block until the full chunk size is
- # actually read
+ # _fp is not available or we are using chunked encoding
+ # this means that we cannot use short reads and this will
+ # block until the full chunk size is actually read
chunk = self.resp.raw.read(self.chunk_size)
if not chunk:
break
@@ -87,40 +88,47 @@ class SSEClient(object):
return self
def __next__(self):
- while not self._event_complete():
- try:
- next_chunk = next(self.resp_iterator)
- if not next_chunk:
- raise EOFError()
- self.buf += self.decoder.decode(next_chunk)
-
- except (StopIteration, requests.RequestException, EOFError, six.moves.http_client.IncompleteRead) as e:
- print(e)
- time.sleep(self.retry / 1000.0)
- self._connect()
-
- # The SSE spec only supports resuming from a whole message, so
- # if we have half a message we should throw it out.
- head, sep, tail = self.buf.rpartition('\n')
- self.buf = head + sep
- continue
-
- # Split the complete event (up to the end_of_field) into event_string,
- # and retain anything after the current complete event in self.buf
- # for next time.
- (event_string, self.buf) = re.split(end_of_field, self.buf, maxsplit=1)
- msg = Event.parse(event_string)
-
- # If the server requests a specific retry delay, we need to honor it.
- if msg.retry:
- self.retry = msg.retry
-
- # last_id should only be set if included in the message. It's not
- # forgotten if a message omits it.
- if msg.id:
- self.last_id = msg.id
-
- return msg
+ while True: #loop until event emitted
+ while not self._event_complete():
+ try:
+ next_chunk = next(self.resp_iterator)
+ if not next_chunk:
+ raise EOFError()
+ self.buf += self.decoder.decode(next_chunk)
+
+ except (StopIteration, requests.RequestException, EOFError, six.moves.http_client.IncompleteRead) as e:
+ print(e)
+ time.sleep(self.retry / 1000.0)
+ self._connect()
+
+ # The SSE spec only supports resuming from a whole message, so
+ # if we have half a message we should throw it out.
+ head, sep, tail = self.buf.rpartition('\n')
+ self.buf = head + sep
+ continue
+
+ # Split the complete event (up to the end_of_field) into event_string,
+ # and retain anything after the current complete event in self.buf
+ # for next time.
+ (event_string, self.buf) = re.split(end_of_field, self.buf, maxsplit=1)
+ msg = Event.parse(event_string)
+
+ # If the server requests a specific retry delay, we need to honor it.
+ if msg.retry:
+ self.retry = msg.retry
+
+ # last_id should only be set if included in the message. It's not
+ # forgotten if a message omits it.
+ if msg.id:
+ self.last_id = msg.id
+
+ #Set the last event ID string of the event source to the value of the last event ID buffer.
+ msg.lastEventId =self.last_id
+
+ # if data in event, emit and return
+ if msg.data !='':
+ return msg
+
if six.PY2:
next = __next__
| btubbs/sseclient | 1f9a3a2d4221fb6f886d09c23e26f8589fc9d1a4 | diff --git a/test_sseclient.py b/test_sseclient.py
index 73cf7a3..8ecef81 100644
--- a/test_sseclient.py
+++ b/test_sseclient.py
@@ -48,12 +48,23 @@ def test_no_space():
m = E.parse('data:hi')
assert m.data == 'hi'
+def test_with_space():
+ m = E.parse('data: hi')
+ assert m.data == 'hi'
+
+def test_with_leading_space():
+ m = E.parse('data: hi')
+ assert m.data == ' hi'
def test_comment():
raw = ":this is a comment\ndata: this is some data"
m = E.parse(raw)
assert m.data == 'this is some data'
+def test_comment_only():
+ raw = ":this is a comment"
+ m = E.parse(raw)
+ assert m.data == ''
def test_retry_is_integer():
m = E.parse('data: hi\nretry: 4000')
@@ -73,9 +84,10 @@ def test_eols():
class FakeResponse(object):
- def __init__(self, status_code, content, headers=None):
+ def __init__(self, status_code, content, headers=None, encoding="utf-8"):
self.status_code = status_code
- self.encoding = "utf-8"
+ self.encoding = encoding
+ self.apparent_encoding = "utf-8"
if not isinstance(content, six.text_type):
content = content.decode("utf-8")
self.stream = content
@@ -95,9 +107,10 @@ def join_events(*events):
# Tests of parsing a multi event stream
-def test_last_id_remembered(monkeypatch):
[email protected]("encoding", ["utf-8", None])
+def test_last_id_remembered(monkeypatch, encoding):
content = 'data: message 1\nid: abcdef\n\ndata: message 2\n\n'
- fake_get = mock.Mock(return_value=FakeResponse(200, content))
+ fake_get = mock.Mock(return_value=FakeResponse(200, content, encoding=encoding))
monkeypatch.setattr(requests, 'get', fake_get)
c = sseclient.SSEClient('http://blah.com')
@@ -121,6 +134,26 @@ def test_retry_remembered(monkeypatch):
assert m2.retry is None
assert c.retry == 5000
+def test_commentonly_ignored(monkeypatch):
+ content = ':comment\n\ndata: message after comment\n\n'
+ fake_get = mock.Mock(return_value=FakeResponse(200, content))
+ monkeypatch.setattr(requests, 'get', fake_get)
+
+ c = sseclient.SSEClient('http://blah.com')
+ #the comment only event should be ignored entirely and not emitted
+ m1 = next(c)
+ assert m1.data == 'message after comment'
+
+def test_retryonly_ignored(monkeypatch):
+ content = 'retry: 10000\n\ndata: will be emitted\n\n'
+ fake_get = mock.Mock(return_value=FakeResponse(200, content))
+ monkeypatch.setattr(requests, 'get', fake_get)
+
+ c = sseclient.SSEClient('http://blah.com')
+ #the retry only event should be processed but not emitted
+
+ m1 = next(c)
+ assert m1.data == ' will be emitted'
def test_extra_newlines_after_event(monkeypatch):
"""
@@ -149,8 +182,8 @@ data: hello3
assert m1.event == 'hello'
assert m1.data == 'hello1'
- assert m2.data == 'hello2'
assert m2.event == 'hello'
+ assert m2.data == 'hello2'
assert m3.data == 'hello3'
assert m3.event == 'hello'
| The first event retrieved by sseclient is always empty
`
.>>> from sseclient import SSEClient
.>>> client = SSEClient(url='https://stream.wikimedia.org/v2/stream/recentchange')
.>>> data = next(client)
.>>> data.__dict__
{'data': '', 'event': 'message', 'id': None, 'retry': None}
.>>> data = next(client)
.>>> data.__dict__
{'data': '{"$schema":"/mediawiki/recentchange/1.0.0","meta":{"uri":"https://commons.wikimedia.org/wiki/File:Geschichte_des_Schultei%C3%9Fenamts_und_der_Stadt_Neumarkt_014.jpg","request_id":"Xj6I2ApAED4AADHcxFEAAABL","id":"7573678b-87b2-4325-81f8-e338613e537b","dt":"2020-02-08T10:09:30Z","domain":"commons.wikimedia.org","stream":"mediawiki.recentchange","topic":"eqiad.mediawiki.recentchange","partition":0,"offset":2157854683},"id":1327149461,"type":"log","namespace":6,"title":"File:Geschichte des Schulteißenamts und der Stadt Neumarkt 014.jpg","comment":"User created page with UploadWizard","timestamp":1581156570,"user":"DALIBRI","bot":false,"log_id":290782528,"log_type":"upload","log_action":"upload","log_params":{"img_sha1":"5oe8esm1e76smmt6zbv811c0m7ay1jo","img_timestamp":"20200208100930"},"log_action_comment":"uploaded "[[File:Geschichte des Schulteißenamts und der Stadt Neumarkt 014.jpg]]": User created page with UploadWizard","server_url":"https://commons.wikimedia.org","server_name":"commons.wikimedia.org","server_script_path":"/w","wiki":"commonswiki","parsedcomment":"User created page with UploadWizard"}', 'event': 'message', 'id': '[{"topic":"eqiad.mediawiki.recentchange","partition":0,"timestamp":1581156570001},{"topic":"codfw.mediawiki.recentchange","partition":0,"offset":-1}]', 'retry': None}
>>>
` | 0.0 | 1f9a3a2d4221fb6f886d09c23e26f8589fc9d1a4 | [
"test_sseclient.py::test_last_id_remembered[None]",
"test_sseclient.py::test_commentonly_ignored",
"test_sseclient.py::test_retryonly_ignored"
]
| [
"test_sseclient.py::test_round_trip_parse",
"test_sseclient.py::test_no_colon",
"test_sseclient.py::test_no_space",
"test_sseclient.py::test_with_space",
"test_sseclient.py::test_with_leading_space",
"test_sseclient.py::test_comment",
"test_sseclient.py::test_comment_only",
"test_sseclient.py::test_retry_is_integer",
"test_sseclient.py::test_default_event",
"test_sseclient.py::test_eols",
"test_sseclient.py::test_last_id_remembered[utf-8]",
"test_sseclient.py::test_retry_remembered",
"test_sseclient.py::test_extra_newlines_after_event",
"test_sseclient.py::test_multiple_messages",
"test_sseclient.py::test_simple_iteration",
"test_sseclient.py::test_client_sends_cookies",
"test_sseclient.py::test_event_stream"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2020-09-11 01:50:58+00:00 | mit | 1,449 |
|
bugsnag__bugsnag-python-188 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index a658f72..64f8a09 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,13 @@
Changelog
=========
+## TBD
+
+### Fixes
+
+* WSGI middleware will no longer raise when given a badly encoded URL
+ [#188](https://github.com/bugsnag/bugsnag-python/pull/188)
+
## 3.6.0 (2019-06-25)
### Enhancements
diff --git a/bugsnag/wsgi/middleware.py b/bugsnag/wsgi/middleware.py
index b4b0aad..62adddf 100644
--- a/bugsnag/wsgi/middleware.py
+++ b/bugsnag/wsgi/middleware.py
@@ -21,11 +21,12 @@ def add_wsgi_request_data_to_notification(notification):
environ = notification.request_config.wsgi_environ
request = Request(environ)
+ path = request_path(environ)
- notification.context = "%s %s" % (request.method, request_path(environ))
+ notification.context = "%s %s" % (request.method, path)
notification.set_user(id=request.remote_addr)
notification.add_tab("request", {
- "url": request.path_url,
+ "url": "%s%s" % (request.application_url, path),
"headers": dict(request.headers),
"params": dict(request.params),
})
| bugsnag/bugsnag-python | 22bd9e187c7347b80169f8507ba4da44663adfc8 | diff --git a/tests/test_path_encoding.py b/tests/test_path_encoding.py
new file mode 100644
index 0000000..47a10e7
--- /dev/null
+++ b/tests/test_path_encoding.py
@@ -0,0 +1,121 @@
+# coding=utf-8
+
+import unittest
+from six.moves import urllib
+
+from bugsnag.notification import Notification
+from bugsnag.configuration import (Configuration, RequestConfiguration)
+
+
+class PathEncodingTest(unittest.TestCase):
+ environ = {
+ 'SCRIPT_NAME': '',
+ 'SERVER_NAME': 'localhost',
+ 'SERVER_PORT': '80',
+ 'wsgi.url_scheme': 'http',
+ }
+
+ def test_path_supports_ascii_characters(self):
+ import bugsnag.wsgi.middleware
+
+ environ = self.environ.copy()
+ environ['PATH_INFO'] = '/hello/world'
+
+ bugsnag.configure_request(wsgi_environ=environ)
+
+ config = Configuration()
+ notification = Notification(
+ Exception("oops"),
+ config,
+ RequestConfiguration.get_instance()
+ )
+
+ bugsnag.wsgi.middleware.add_wsgi_request_data_to_notification(
+ notification
+ )
+
+ self.assertEqual(
+ 'http://localhost/hello/world',
+ notification.meta_data['request']['url']
+ )
+
+ def test_wrongly_encoded_url_should_not_raise(self):
+ import bugsnag.wsgi.middleware
+
+ environ = self.environ.copy()
+ environ['PATH_INFO'] = '/%83'
+
+ bugsnag.configure_request(wsgi_environ=environ)
+
+ config = Configuration()
+ notification = Notification(
+ Exception("oops"),
+ config,
+ RequestConfiguration.get_instance()
+ )
+
+ bugsnag.wsgi.middleware.add_wsgi_request_data_to_notification(
+ notification
+ )
+
+ # We have to use "urllib.parse.quote" here because the exact output
+ # differs on different Python versions because of how they handle
+ # invalid encoding sequences
+ self.assertEqual(
+ 'http://localhost/%s' % urllib.parse.quote('%83'),
+ notification.meta_data['request']['url']
+ )
+
+ def test_path_supports_emoji(self):
+ import bugsnag.wsgi.middleware
+
+ environ = self.environ.copy()
+ environ['PATH_INFO'] = '/😇'
+
+ config = Configuration()
+ notification = Notification(
+ Exception("oops"),
+ config,
+ RequestConfiguration.get_instance()
+ )
+
+ bugsnag.configure_request(wsgi_environ=environ)
+
+ bugsnag.wsgi.middleware.add_wsgi_request_data_to_notification(
+ notification
+ )
+
+ # You can validate this by using "encodeURIComponent" in a browser.
+ self.assertEqual(
+ 'http://localhost/%F0%9F%98%87',
+ notification.meta_data['request']['url']
+ )
+
+ def test_path_supports_non_ascii_characters(self):
+ import bugsnag.wsgi.middleware
+
+ environ = self.environ.copy()
+ environ['PATH_INFO'] = '/ôßłガ'
+
+ config = Configuration()
+ notification = Notification(
+ Exception("oops"),
+ config,
+ RequestConfiguration.get_instance()
+ )
+
+ bugsnag.configure_request(wsgi_environ=environ)
+
+ bugsnag.wsgi.middleware.add_wsgi_request_data_to_notification(
+ notification
+ )
+
+ # You can validate this by using "encodeURIComponent" in a browser.
+ self.assertEqual(
+ 'http://localhost/%C3%B4%C3%9F%C5%82%E3%82%AC',
+ notification.meta_data['request']['url']
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
| WSGI middleware breaks when handling badly encoded URLs
### Description
In public-facing applications, we cannot control what kinds of URLs will be requested by clients -- which means we may have to deal with wrong or broken encoding.
### Issue
`bugsnag.wsgi.middleware.add_wsgi_request_data_to_notification` uses `webob.Request.path_url`, which raises an exception if the URL path is encoded improperly. It would not break if it instead e.g. used the already existing helper function `bugsnag.wsgi.request_path`.
### Environment
Library versions:
- python version: both 2.7 and 3.7
- bugsnag-python version: 3.6.0
### Example code snippet
```python
import unittest
import unittest.mock
import urllib.parse
class PathEncodingTest(unittest.TestCase):
environ = {
# https://github.com/python/cpython/blob/master/Lib/wsgiref/simple_server.py#L85
'PATH_INFO': urllib.parse.unquote('/%83', 'latin-1'),
'SCRIPT_NAME': '',
'SERVER_NAME': 'localhost',
'SERVER_PORT': '80',
'wsgi.url_scheme': 'http',
}
def test_wrongly_encoded_url_should_not_raise(self):
import bugsnag.wsgi.middleware
notification = unittest.mock.Mock()
notification.request_config.wsgi_environ = self.environ
# This currently raises UnicodeDecodeError
bugsnag.wsgi.middleware.add_wsgi_request_data_to_notification(
notification)
# but I'd expect something like this:
self.assertEqual(
'http://localhost/%83',
notification.add_tab.call_args_list[0][0][1]['url'])
def test_using_the_already_existing_helper_function_works_somewhat(self):
import bugsnag.wsgi
import webob
r = webob.Request(self.environ)
self.assertEqual(
# Not sure how correct this is, but at least it doesn't break
'http://localhost/%C2%83',
r.application_url + bugsnag.wsgi.request_path(self.environ))
def test_werkzeug_instead_of_webob_also_handles_it_without_error(self):
import werkzeug
r = werkzeug.Request(self.environ)
self.assertEqual('http://localhost/%83', r.base_url)
if __name__ == '__main__':
unittest.main()
```
<details><summary>Error messages:</summary>
```
..E
======================================================================
ERROR: test_wrongly_encoded_url_should_not_raise (__main__.EncodingTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "../foo.py", line 21, in test_wrongly_encoded_url_should_not_raise
notification)
File "/Users/schnerring/Downloads/venv/lib/python3.7/site-packages/bugsnag/wsgi/middleware.py", line 28, in add_wsgi_request_data_to_notification
"url": request.path_url,
File "/Users/schnerring/Downloads/venv/lib/python3.7/site-packages/webob/request.py", line 467, in path_url
bpath_info = bytes_(self.path_info, self.url_encoding)
File "/Users/schnerring/Downloads/venv/lib/python3.7/site-packages/webob/descriptors.py", line 70, in fget
return req.encget(key, encattr=encattr)
File "/Users/schnerring/Downloads/venv/lib/python3.7/site-packages/webob/request.py", line 165, in encget
return bytes_(val, 'latin-1').decode(encoding)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x83 in position 1: invalid start byte
----------------------------------------------------------------------
Ran 3 tests in 0.099s
FAILED (errors=1)
```
</details>
| 0.0 | 22bd9e187c7347b80169f8507ba4da44663adfc8 | [
"tests/test_path_encoding.py::PathEncodingTest::test_path_supports_emoji",
"tests/test_path_encoding.py::PathEncodingTest::test_path_supports_non_ascii_characters"
]
| [
"tests/test_path_encoding.py::PathEncodingTest::test_path_supports_ascii_characters",
"tests/test_path_encoding.py::PathEncodingTest::test_wrongly_encoded_url_should_not_raise"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2020-05-04 16:31:34+00:00 | mit | 1,450 |
|
bugsnag__bugsnag-python-212 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 23433c2..48c4bf0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -8,6 +8,9 @@ Changelog
* Support forcing an individual event to be sent synchronously to Bugsnag.
Given a configuration where asynchronous=True (the default setting), use
`notify(ex, asynchronous=False)` to block until the event is delivered.
+* Support configuring app type, which is a searchable field on the Bugsnag
+ dashboard. Set `Configuration.app_type` to add a `type` property to the app
+ metadata of an event.
### Fixes
diff --git a/bugsnag/configuration.py b/bugsnag/configuration.py
index 7084011..4bc2d78 100644
--- a/bugsnag/configuration.py
+++ b/bugsnag/configuration.py
@@ -68,6 +68,7 @@ class Configuration(_BaseConfiguration):
self.delivery = create_default_delivery()
self.lib_root = get_python_lib()
self.project_root = os.getcwd()
+ self.app_type = None
self.app_version = None
self.params_filters = ["password", "password_confirmation", "cookie",
"authorization"]
@@ -111,7 +112,7 @@ class Configuration(_BaseConfiguration):
'ignore_classes', 'lib_root', 'notify_release_stages',
'params_filters', 'project_root', 'proxy_host', 'release_stage',
'send_code', 'session_endpoint', 'traceback_exclude_modules',
- 'use_ssl',
+ 'use_ssl', 'app_type',
]
for option_name in options.keys():
@@ -134,6 +135,18 @@ class Configuration(_BaseConfiguration):
def api_key(self, value):
self._api_key = value
+ @property
+ def app_type(self):
+ """
+ Category for the current application or task
+ """
+ return self._app_type
+
+ @app_type.setter # type: ignore
+ @validate_str_setter
+ def app_type(self, value):
+ self._app_type = value
+
@property
def app_version(self):
"""
diff --git a/bugsnag/notification.py b/bugsnag/notification.py
index aaab7c5..8a18c82 100644
--- a/bugsnag/notification.py
+++ b/bugsnag/notification.py
@@ -50,6 +50,7 @@ class Notification(object):
self.release_stage = get_config("release_stage")
self.app_version = get_config("app_version")
+ self.app_type = get_config("app_type")
self.hostname = get_config("hostname")
self.runtime_versions = get_config("runtime_versions")
self.send_code = get_config("send_code")
@@ -232,7 +233,10 @@ class Notification(object):
"severityReason": self.severity_reason,
"unhandled": self.unhandled,
"releaseStage": self.release_stage,
- "appVersion": self.app_version,
+ "app": {
+ "version": self.app_version,
+ "type": self.app_type,
+ },
"context": self.context,
"groupingHash": self.grouping_hash,
"exceptions": [{
| bugsnag/bugsnag-python | def523131531c808312d09c6c60767aa9f9602a4 | diff --git a/tests/integrations/conftest.py b/tests/integrations/conftest.py
index 7edb31b..9425c21 100644
--- a/tests/integrations/conftest.py
+++ b/tests/integrations/conftest.py
@@ -11,4 +11,5 @@ def bugsnag_server():
yield server
+ bugsnag.configure(app_type=None)
server.shutdown()
diff --git a/tests/test_configuration.py b/tests/test_configuration.py
index 47d0603..be9b29c 100644
--- a/tests/test_configuration.py
+++ b/tests/test_configuration.py
@@ -138,6 +138,21 @@ class TestConfiguration(unittest.TestCase):
assert len(record) == 1
assert c.endpoint == 'https://notify.example.com'
+ def test_validate_app_type(self):
+ c = Configuration()
+ assert c.app_type is None
+ with pytest.warns(RuntimeWarning) as record:
+ c.configure(app_type=[])
+
+ assert len(record) == 1
+ assert str(record[0].message) == 'app_type should be str, got list'
+ assert c.app_type is None
+
+ c.configure(app_type='rq')
+
+ assert len(record) == 1
+ assert c.app_type == 'rq'
+
def test_validate_app_version(self):
c = Configuration()
with pytest.warns(RuntimeWarning) as record:
diff --git a/tests/test_notification.py b/tests/test_notification.py
index 20b10c6..3997dee 100644
--- a/tests/test_notification.py
+++ b/tests/test_notification.py
@@ -159,3 +159,26 @@ class TestNotification(unittest.TestCase):
device = payload['events'][0]['device']
self.assertEqual('test_host_name', device['hostname'])
self.assertEqual('9.9.9', device['runtimeVersions']['python'])
+
+ def test_default_app_type(self):
+ """
+ app_type is None by default
+ """
+ config = Configuration()
+ notification = Notification(Exception("oops"), config, {})
+ payload = json.loads(notification._payload())
+ app = payload['events'][0]['app']
+
+ assert app['type'] is None
+
+ def test_configured_app_type(self):
+ """
+ It should include app type if specified
+ """
+ config = Configuration()
+ config.configure(app_type='rq')
+ notification = Notification(Exception("oops"), config, {})
+ payload = json.loads(notification._payload())
+ app = payload['events'][0]['app']
+
+ assert app['type'] == 'rq'
diff --git a/tests/test_notify.py b/tests/test_notify.py
index 185b0a8..9b90d9c 100644
--- a/tests/test_notify.py
+++ b/tests/test_notify.py
@@ -80,7 +80,7 @@ class TestBugsnag(IntegrationTest):
bugsnag.notify(ScaryException('unexpected failover'))
json_body = self.server.received[0]['json_body']
event = json_body['events'][0]
- self.assertEqual('343.2.10', event['appVersion'])
+ self.assertEqual('343.2.10', event['app']['version'])
def test_notify_override_context(self):
bugsnag.notify(ScaryException('unexpected failover'),
@@ -189,6 +189,31 @@ class TestBugsnag(IntegrationTest):
bugsnag.notify(ScaryException('unexpected failover'))
self.assertEqual(0, len(self.server.received))
+ def test_notify_custom_app_type(self):
+ bugsnag.notify(ScaryException('unexpected failover'), app_type='work')
+ json_body = self.server.received[0]['json_body']
+ event = json_body['events'][0]
+ self.assertEqual('work', event['app']['type'])
+
+ def test_notify_callback_app_type(self):
+
+ def callback(report):
+ report.app_type = 'whopper'
+
+ bugsnag.configure(app_type='rq')
+ bugsnag.before_notify(callback)
+ bugsnag.notify(ScaryException('unexpected failover'))
+ json_body = self.server.received[0]['json_body']
+ event = json_body['events'][0]
+ self.assertEqual('whopper', event['app']['type'])
+
+ def test_notify_configured_app_type(self):
+ bugsnag.configure(app_type='rq')
+ bugsnag.notify(ScaryException('unexpected failover'))
+ json_body = self.server.received[0]['json_body']
+ event = json_body['events'][0]
+ self.assertEqual('rq', event['app']['type'])
+
def test_notify_sends_when_before_notify_throws(self):
def callback(report):
| No way to specify the app_type information
### Description
The API accepts a `app.type` field in the `events` array, but the Python lib doesn't provide a way to specify it.
### Issue
One should be able to specify this field using the official Python client.
| 0.0 | def523131531c808312d09c6c60767aa9f9602a4 | [
"tests/test_configuration.py::TestConfiguration::test_validate_app_type",
"tests/test_notification.py::TestNotification::test_configured_app_type",
"tests/test_notification.py::TestNotification::test_default_app_type",
"tests/test_notify.py::TestBugsnag::test_notify_callback_app_type",
"tests/test_notify.py::TestBugsnag::test_notify_configured_app_type",
"tests/test_notify.py::TestBugsnag::test_notify_configured_app_version",
"tests/test_notify.py::TestBugsnag::test_notify_custom_app_type"
]
| [
"tests/test_configuration.py::TestConfiguration::test_custom_get_endpoint_default_ssl",
"tests/test_configuration.py::TestConfiguration::test_custom_get_endpoint_no_use_ssl",
"tests/test_configuration.py::TestConfiguration::test_custom_get_endpoint_use_ssl",
"tests/test_configuration.py::TestConfiguration::test_default_middleware_location",
"tests/test_configuration.py::TestConfiguration::test_full_custom_get_endpoint",
"tests/test_configuration.py::TestConfiguration::test_full_custom_get_endpoint_no_use_ssl",
"tests/test_configuration.py::TestConfiguration::test_full_custom_get_endpoint_use_ssl",
"tests/test_configuration.py::TestConfiguration::test_get_endpoint_no_use_ssl",
"tests/test_configuration.py::TestConfiguration::test_get_endpoint_use_ssl",
"tests/test_configuration.py::TestConfiguration::test_hostname",
"tests/test_configuration.py::TestConfiguration::test_ignore_classes",
"tests/test_configuration.py::TestConfiguration::test_reads_api_key_from_environ",
"tests/test_configuration.py::TestConfiguration::test_session_tracking_defaults",
"tests/test_configuration.py::TestConfiguration::test_should_notify",
"tests/test_configuration.py::TestConfiguration::test_validate_api_key",
"tests/test_configuration.py::TestConfiguration::test_validate_app_version",
"tests/test_configuration.py::TestConfiguration::test_validate_asynchronous",
"tests/test_configuration.py::TestConfiguration::test_validate_auto_capture_sessions",
"tests/test_configuration.py::TestConfiguration::test_validate_auto_notify",
"tests/test_configuration.py::TestConfiguration::test_validate_delivery",
"tests/test_configuration.py::TestConfiguration::test_validate_endpoint",
"tests/test_configuration.py::TestConfiguration::test_validate_hostname",
"tests/test_configuration.py::TestConfiguration::test_validate_ignore_classes",
"tests/test_configuration.py::TestConfiguration::test_validate_lib_root",
"tests/test_configuration.py::TestConfiguration::test_validate_notify_release_stages",
"tests/test_configuration.py::TestConfiguration::test_validate_params_filters",
"tests/test_configuration.py::TestConfiguration::test_validate_project_root",
"tests/test_configuration.py::TestConfiguration::test_validate_proxy_host",
"tests/test_configuration.py::TestConfiguration::test_validate_release_stage",
"tests/test_configuration.py::TestConfiguration::test_validate_send_code",
"tests/test_configuration.py::TestConfiguration::test_validate_session_endpoint",
"tests/test_configuration.py::TestConfiguration::test_validate_traceback_exclude_modules",
"tests/test_configuration.py::TestConfiguration::test_validate_unknown_config_option",
"tests/test_notification.py::TestNotification::test_code",
"tests/test_notification.py::TestNotification::test_code_at_end_of_file",
"tests/test_notification.py::TestNotification::test_code_at_start_of_file",
"tests/test_notification.py::TestNotification::test_code_turned_off",
"tests/test_notification.py::TestNotification::test_device_data",
"tests/test_notification.py::TestNotification::test_no_traceback_exclude_modules",
"tests/test_notification.py::TestNotification::test_sanitize",
"tests/test_notification.py::TestNotification::test_traceback_exclude_modules",
"tests/test_notify.py::TestBugsnag::test_asynchronous_notify",
"tests/test_notify.py::TestBugsnag::test_auto_notify_defaults",
"tests/test_notify.py::TestBugsnag::test_auto_notify_overrides",
"tests/test_notify.py::TestBugsnag::test_external_middleware_can_change_severity",
"tests/test_notify.py::TestBugsnag::test_external_middleware_cannot_change_severity_reason",
"tests/test_notify.py::TestBugsnag::test_internal_middleware_can_change_severity_reason",
"tests/test_notify.py::TestBugsnag::test_internal_middleware_changes_severity",
"tests/test_notify.py::TestBugsnag::test_middleware_stack_order",
"tests/test_notify.py::TestBugsnag::test_middleware_stack_order_legacy",
"tests/test_notify.py::TestBugsnag::test_notify_bad_encoding_exception_tuple",
"tests/test_notify.py::TestBugsnag::test_notify_bad_encoding_metadata",
"tests/test_notify.py::TestBugsnag::test_notify_before_notify_add_custom_data",
"tests/test_notify.py::TestBugsnag::test_notify_before_notify_modifying_api_key",
"tests/test_notify.py::TestBugsnag::test_notify_before_notify_modifying_metadata",
"tests/test_notify.py::TestBugsnag::test_notify_before_notify_remove_api_key",
"tests/test_notify.py::TestBugsnag::test_notify_configured_api_key",
"tests/test_notify.py::TestBugsnag::test_notify_configured_hostname",
"tests/test_notify.py::TestBugsnag::test_notify_configured_invalid_api_key",
"tests/test_notify.py::TestBugsnag::test_notify_configured_lib_root",
"tests/test_notify.py::TestBugsnag::test_notify_configured_metadata_sections",
"tests/test_notify.py::TestBugsnag::test_notify_configured_project_root",
"tests/test_notify.py::TestBugsnag::test_notify_configured_release_stage",
"tests/test_notify.py::TestBugsnag::test_notify_default_severity",
"tests/test_notify.py::TestBugsnag::test_notify_device_filter",
"tests/test_notify.py::TestBugsnag::test_notify_error_class",
"tests/test_notify.py::TestBugsnag::test_notify_error_message",
"tests/test_notify.py::TestBugsnag::test_notify_exception_tuple",
"tests/test_notify.py::TestBugsnag::test_notify_exception_tuple_with_traceback",
"tests/test_notify.py::TestBugsnag::test_notify_exception_with_traceback_option",
"tests/test_notify.py::TestBugsnag::test_notify_ignore_class",
"tests/test_notify.py::TestBugsnag::test_notify_invalid_severity",
"tests/test_notify.py::TestBugsnag::test_notify_invalid_values_tuple",
"tests/test_notify.py::TestBugsnag::test_notify_metadata_bool_value",
"tests/test_notify.py::TestBugsnag::test_notify_metadata_complex_value",
"tests/test_notify.py::TestBugsnag::test_notify_metadata_filter",
"tests/test_notify.py::TestBugsnag::test_notify_metadata_integer_value",
"tests/test_notify.py::TestBugsnag::test_notify_metadata_set_value",
"tests/test_notify.py::TestBugsnag::test_notify_metadata_tuple_value",
"tests/test_notify.py::TestBugsnag::test_notify_method",
"tests/test_notify.py::TestBugsnag::test_notify_non_exception",
"tests/test_notify.py::TestBugsnag::test_notify_override_api_key",
"tests/test_notify.py::TestBugsnag::test_notify_override_context",
"tests/test_notify.py::TestBugsnag::test_notify_override_deprecated_user_id",
"tests/test_notify.py::TestBugsnag::test_notify_override_grouping_hash",
"tests/test_notify.py::TestBugsnag::test_notify_override_metadata_sections",
"tests/test_notify.py::TestBugsnag::test_notify_override_severity",
"tests/test_notify.py::TestBugsnag::test_notify_override_user",
"tests/test_notify.py::TestBugsnag::test_notify_payload_matching_filter",
"tests/test_notify.py::TestBugsnag::test_notify_payload_version",
"tests/test_notify.py::TestBugsnag::test_notify_proxy",
"tests/test_notify.py::TestBugsnag::test_notify_recursive_metadata_array",
"tests/test_notify.py::TestBugsnag::test_notify_recursive_metadata_dict",
"tests/test_notify.py::TestBugsnag::test_notify_request_count",
"tests/test_notify.py::TestBugsnag::test_notify_sends_when_before_notify_throws",
"tests/test_notify.py::TestBugsnag::test_notify_severity_overridden",
"tests/test_notify.py::TestBugsnag::test_notify_single_value_tuple",
"tests/test_notify.py::TestBugsnag::test_notify_stacktrace",
"tests/test_notify.py::TestBugsnag::test_notify_unconfigured_release_stage",
"tests/test_notify.py::TestBugsnag::test_notify_unhandled_defaults",
"tests/test_notify.py::TestBugsnag::test_notify_unhandled_severity_callback",
"tests/test_notify.py::TestBugsnag::test_notify_unicode_metadata",
"tests/test_notify.py::TestBugsnag::test_notify_user_filter",
"tests/test_notify.py::TestBugsnag::test_synchronous_individual_notify"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2020-08-13 16:18:22+00:00 | mit | 1,451 |
|
bugsnag__bugsnag-python-333 | diff --git a/bugsnag/event.py b/bugsnag/event.py
index a06cae3..1b18a9d 100644
--- a/bugsnag/event.py
+++ b/bugsnag/event.py
@@ -113,7 +113,7 @@ class Event:
self.metadata = {} # type: Dict[str, Dict[str, Any]]
if 'meta_data' in options:
- warnings.warn('The Event "metadata" argument has been replaced ' +
+ warnings.warn('The Event "meta_data" argument has been replaced ' +
'with "metadata"', DeprecationWarning)
for name, tab in options.pop("meta_data").items():
self.add_tab(name, tab)
@@ -126,8 +126,8 @@ class Event:
@property
def meta_data(self) -> Dict[str, Dict[str, Any]]:
- warnings.warn('The Event "metadata" property has been replaced ' +
- 'with "meta_data".', DeprecationWarning)
+ warnings.warn('The Event "meta_data" property has been replaced ' +
+ 'with "metadata".', DeprecationWarning)
return self.metadata
@property
| bugsnag/bugsnag-python | cc6aa2027b3c152b35b984638f9c749f25e7d39f | diff --git a/tests/test_event.py b/tests/test_event.py
index a5d91b3..7eda818 100644
--- a/tests/test_event.py
+++ b/tests/test_event.py
@@ -358,7 +358,7 @@ class TestEvent(unittest.TestCase):
assert len(records) > 0
i = len(records) - 1
- assert str(records[i].message) == ('The Event "metadata" ' +
+ assert str(records[i].message) == ('The Event "meta_data" ' +
'argument has been replaced ' +
'with "metadata"')
assert event.metadata['nuts']['almonds']
diff --git a/tests/test_notify.py b/tests/test_notify.py
index fc445b0..00c918e 100644
--- a/tests/test_notify.py
+++ b/tests/test_notify.py
@@ -843,7 +843,7 @@ class TestBugsnag(IntegrationTest):
meta_data={'fruit': {'apples': 2}})
assert len(records) == 1
- assert str(records[0].message) == ('The Event "metadata" ' +
+ assert str(records[0].message) == ('The Event "meta_data" ' +
'argument has been replaced ' +
'with "metadata"')
| DeprecationWarning: The Event "metadata" argument has been replaced with "metadata" - Typo
<!-- Before raising, please check if somebody else has already reported your issue. -->
### Describe the bug
typo in Depercation warning
it should be `DeprecationWarning: The Event "meta_data" argument has been replaced with "metadata"`
### Steps to reproduce

### Environment
* Bugsnag version: 4.2.1
* Python version: 3.9.13
* Integration framework version:
* ASGI: Daphne
* Celery: 5.2
* Django: 4.0
| 0.0 | cc6aa2027b3c152b35b984638f9c749f25e7d39f | [
"tests/test_event.py::TestEvent::test_meta_data_warning",
"tests/test_notify.py::TestBugsnag::test_meta_data_warning"
]
| [
"tests/test_event.py::TestEvent::test_a_source_function_can_be_provided",
"tests/test_event.py::TestEvent::test_a_traceback_can_be_provided",
"tests/test_event.py::TestEvent::test_adding_new_breadcrumbs_does_not_change_past_events",
"tests/test_event.py::TestEvent::test_breadcrumb_array_is_always_in_payload",
"tests/test_event.py::TestEvent::test_breadcrumbs_are_included_in_payload",
"tests/test_event.py::TestEvent::test_breadcrumbs_are_read_from_configuration",
"tests/test_event.py::TestEvent::test_code",
"tests/test_event.py::TestEvent::test_code_at_end_of_file",
"tests/test_event.py::TestEvent::test_code_at_start_of_file",
"tests/test_event.py::TestEvent::test_code_turned_off",
"tests/test_event.py::TestEvent::test_configured_app_type",
"tests/test_event.py::TestEvent::test_default_app_type",
"tests/test_event.py::TestEvent::test_default_request",
"tests/test_event.py::TestEvent::test_device_data",
"tests/test_event.py::TestEvent::test_mutating_breadcrumb_list_does_not_mutate_event",
"tests/test_event.py::TestEvent::test_no_traceback_exclude_modules",
"tests/test_event.py::TestEvent::test_original_exception_can_be_reassigned",
"tests/test_event.py::TestEvent::test_sanitize",
"tests/test_event.py::TestEvent::test_source_function_is_ignored_when_invalid",
"tests/test_event.py::TestEvent::test_stacktrace_can_be_mutated",
"tests/test_event.py::TestEvent::test_stacktrace_can_be_reassigned",
"tests/test_event.py::TestEvent::test_traceback_exclude_modules",
"tests/test_notify.py::TestBugsnag::test_asynchronous_notify",
"tests/test_notify.py::TestBugsnag::test_auto_notify_defaults",
"tests/test_notify.py::TestBugsnag::test_auto_notify_exc_info",
"tests/test_notify.py::TestBugsnag::test_auto_notify_exc_info_overrides",
"tests/test_notify.py::TestBugsnag::test_auto_notify_ignored_exc_info",
"tests/test_notify.py::TestBugsnag::test_auto_notify_overrides",
"tests/test_notify.py::TestBugsnag::test_external_middleware_can_change_severity",
"tests/test_notify.py::TestBugsnag::test_external_middleware_cannot_change_severity_reason",
"tests/test_notify.py::TestBugsnag::test_ignore_classes_checks_exception_chain_with_explicit_cause",
"tests/test_notify.py::TestBugsnag::test_ignore_classes_checks_exception_chain_with_implicit_cause",
"tests/test_notify.py::TestBugsnag::test_ignore_classes_has_no_exception_chain_with_no_cause",
"tests/test_notify.py::TestBugsnag::test_internal_middleware_can_change_severity_reason",
"tests/test_notify.py::TestBugsnag::test_internal_middleware_changes_severity",
"tests/test_notify.py::TestBugsnag::test_middleware_stack_order",
"tests/test_notify.py::TestBugsnag::test_middleware_stack_order_legacy",
"tests/test_notify.py::TestBugsnag::test_notify_bad_encoding_exception_tuple",
"tests/test_notify.py::TestBugsnag::test_notify_bad_encoding_metadata",
"tests/test_notify.py::TestBugsnag::test_notify_before_notify_add_custom_data",
"tests/test_notify.py::TestBugsnag::test_notify_before_notify_modifying_api_key",
"tests/test_notify.py::TestBugsnag::test_notify_before_notify_modifying_metadata",
"tests/test_notify.py::TestBugsnag::test_notify_before_notify_remove_api_key",
"tests/test_notify.py::TestBugsnag::test_notify_callback_app_type",
"tests/test_notify.py::TestBugsnag::test_notify_configured_api_key",
"tests/test_notify.py::TestBugsnag::test_notify_configured_app_type",
"tests/test_notify.py::TestBugsnag::test_notify_configured_app_version",
"tests/test_notify.py::TestBugsnag::test_notify_configured_hostname",
"tests/test_notify.py::TestBugsnag::test_notify_configured_invalid_api_key",
"tests/test_notify.py::TestBugsnag::test_notify_configured_lib_root",
"tests/test_notify.py::TestBugsnag::test_notify_configured_metadata_sections",
"tests/test_notify.py::TestBugsnag::test_notify_configured_project_root",
"tests/test_notify.py::TestBugsnag::test_notify_configured_release_stage",
"tests/test_notify.py::TestBugsnag::test_notify_custom_app_type",
"tests/test_notify.py::TestBugsnag::test_notify_default_severity",
"tests/test_notify.py::TestBugsnag::test_notify_device_filter",
"tests/test_notify.py::TestBugsnag::test_notify_error_class",
"tests/test_notify.py::TestBugsnag::test_notify_error_message",
"tests/test_notify.py::TestBugsnag::test_notify_exception_tuple",
"tests/test_notify.py::TestBugsnag::test_notify_exception_tuple_with_traceback",
"tests/test_notify.py::TestBugsnag::test_notify_exception_with_traceback_option",
"tests/test_notify.py::TestBugsnag::test_notify_ignore_class",
"tests/test_notify.py::TestBugsnag::test_notify_invalid_severity",
"tests/test_notify.py::TestBugsnag::test_notify_invalid_values_tuple",
"tests/test_notify.py::TestBugsnag::test_notify_metadata_bool_value",
"tests/test_notify.py::TestBugsnag::test_notify_metadata_complex_value",
"tests/test_notify.py::TestBugsnag::test_notify_metadata_filter",
"tests/test_notify.py::TestBugsnag::test_notify_metadata_integer_value",
"tests/test_notify.py::TestBugsnag::test_notify_metadata_set_value",
"tests/test_notify.py::TestBugsnag::test_notify_metadata_tuple_value",
"tests/test_notify.py::TestBugsnag::test_notify_method",
"tests/test_notify.py::TestBugsnag::test_notify_non_exception",
"tests/test_notify.py::TestBugsnag::test_notify_override_api_key",
"tests/test_notify.py::TestBugsnag::test_notify_override_context",
"tests/test_notify.py::TestBugsnag::test_notify_override_deprecated_user_id",
"tests/test_notify.py::TestBugsnag::test_notify_override_grouping_hash",
"tests/test_notify.py::TestBugsnag::test_notify_override_metadata_sections",
"tests/test_notify.py::TestBugsnag::test_notify_override_severity",
"tests/test_notify.py::TestBugsnag::test_notify_override_user",
"tests/test_notify.py::TestBugsnag::test_notify_payload_matching_filter",
"tests/test_notify.py::TestBugsnag::test_notify_payload_version",
"tests/test_notify.py::TestBugsnag::test_notify_proxy",
"tests/test_notify.py::TestBugsnag::test_notify_recursive_metadata_array",
"tests/test_notify.py::TestBugsnag::test_notify_recursive_metadata_dict",
"tests/test_notify.py::TestBugsnag::test_notify_request_count",
"tests/test_notify.py::TestBugsnag::test_notify_sends_when_before_notify_throws",
"tests/test_notify.py::TestBugsnag::test_notify_severity_overridden",
"tests/test_notify.py::TestBugsnag::test_notify_single_value_tuple",
"tests/test_notify.py::TestBugsnag::test_notify_stacktrace",
"tests/test_notify.py::TestBugsnag::test_notify_unconfigured_release_stage",
"tests/test_notify.py::TestBugsnag::test_notify_unhandled_defaults",
"tests/test_notify.py::TestBugsnag::test_notify_unhandled_severity_callback",
"tests/test_notify.py::TestBugsnag::test_notify_unicode_metadata",
"tests/test_notify.py::TestBugsnag::test_notify_user_filter",
"tests/test_notify.py::TestBugsnag::test_synchronous_individual_notify"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false
} | 2023-02-06 17:39:27+00:00 | mit | 1,452 |
|
bugsnag__bugsnag-python-334 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2fc7866..517c2f9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,11 @@ Changelog
## TBD
+### Enhancements
+
+* Allows changing the grouping hash when using `BugsnagHandler` via the logger methods' `extra` keyword argument
+ [#334](https://github.com/bugsnag/bugsnag-python/pull/334)
+
### Bug fixes
* Fixes one of the fields being mistakenly replaced with `[RECURSIVE]` when encoding a list or dictionary with identical siblings but no recursion.
@@ -11,6 +16,7 @@ Changelog
* Fix the ignore class list not accounting for nested classes
[#342](https://github.com/bugsnag/bugsnag-python/pull/342)
+
## v4.4.0 (2023-02-21)
### Enhancements
diff --git a/bugsnag/handlers.py b/bugsnag/handlers.py
index e48b91a..ded43a1 100644
--- a/bugsnag/handlers.py
+++ b/bugsnag/handlers.py
@@ -18,7 +18,8 @@ class BugsnagHandler(logging.Handler, object):
self.custom_metadata_fields = extra_fields
self.callbacks = [self.extract_default_metadata,
self.extract_custom_metadata,
- self.extract_severity]
+ self.extract_severity,
+ self.extract_grouping_hash]
def emit(self, record: LogRecord):
"""
@@ -113,6 +114,13 @@ class BugsnagHandler(logging.Handler, object):
else:
options['severity'] = 'info'
+ def extract_grouping_hash(self, record: LogRecord, options: Dict):
+ """
+ Add the grouping_hash from a log record to the options
+ """
+ if 'groupingHash' in record.__dict__:
+ options['grouping_hash'] = record.__dict__['groupingHash']
+
def extract_custom_metadata(self, record: LogRecord, options: Dict):
"""
Append the contents of selected fields of a record to the metadata
| bugsnag/bugsnag-python | 21ddc08b3c6602324fd18f2915f1a4f439c75138 | diff --git a/tests/test_handlers.py b/tests/test_handlers.py
index e7e0d30..8aa4ebe 100644
--- a/tests/test_handlers.py
+++ b/tests/test_handlers.py
@@ -457,6 +457,18 @@ class HandlersTest(IntegrationTest):
self.assertEqual(event['metaData']['custom'],
{'exception': 'metadata'})
+ @use_client_logger
+ def test_logging_grouping_hash(self, handler, logger):
+ logger.info("This happened", extra={'groupingHash': '<hash value>'})
+
+ self.assertSentReportCount(1)
+ json_body = self.server.received[0]['json_body']
+ event = json_body['events'][0]
+ exception = event['exceptions'][0]
+
+ self.assertEqual(exception['message'], 'This happened')
+ self.assertEqual(event['groupingHash'], '<hash value>')
+
@use_client_logger
def test_log_filter_leaves_breadcrumbs_for_logs_below_report_level(
self,
| Unable to set grouping_hash using `logger.<level>()` methods
<!-- Before creating, please check if somebody else has already reported your feature request. -->
### Description
There is no way to change the grouping hash value when using logger methods and the BugsnagHandler to send event.
In a general manner, this module API does not make the best use of the standard logging API. That makes it very hard to integrate with existing programs and packages.
**Describe the solution you'd like**
```python
try:
# Raising code
except:
logger.exception("This happened", extra={'groupingHash': '<hash value>', ...})
# Handle the exception.
```
Or to make it even less vendor specific and provide the ability to adopt Bugsnag without modifying a single line of code, while still grouping as one needs, e.g. if the grouping key to use is already available in the log record extras:
```python
import logging
root_logger = logging.getLogger()
# Note that noone should have to write such code by hand (see alternatives section below)
root_logger.addHandler(BugsnagHandler(grouping_hash='<extra-key-name>', ...))
# And later on in the code
try:
# Raising code
except:
logger.exception('This happened', extra={'<extra-key-name>': '<hash value>', ...})
# Handle the exception
```
**Describe alternatives you've considered**
One way to override the `grouping_hash` so far is to use that keyword parameter of the `bugsnag.notify()` function.
This API is really not great as it imposes to use one that is provider specific in your code when there is no need to.
Another way would be, I believe, to use a [callback](https://github.com/bugsnag/bugsnag-python/blob/master/bugsnag/handlers.py#L80). The problem with this approach is that, as currently implemented, it is not possible to pass callbacks if one wants to configure logging using a standard [configuration file](https://docs.python.org/3/library/logging.config.html). One has to make explicit calls to the `BugsnagHandler.add_callback()` method somewhere in its code. Why would you write code, to do something that should be done through configuration ?
**Additional context**
<!--Add any other context about the feature request here.--> | 0.0 | 21ddc08b3c6602324fd18f2915f1a4f439c75138 | [
"tests/test_handlers.py::HandlersTest::test_logging_grouping_hash"
]
| [
"tests/test_handlers.py::HandlersTest::test_client_add_callback",
"tests/test_handlers.py::HandlersTest::test_client_callback_exception",
"tests/test_handlers.py::HandlersTest::test_client_callback_exception_metadata",
"tests/test_handlers.py::HandlersTest::test_client_clear_callbacks",
"tests/test_handlers.py::HandlersTest::test_client_crashing_callback",
"tests/test_handlers.py::HandlersTest::test_client_message",
"tests/test_handlers.py::HandlersTest::test_client_metadata_fields",
"tests/test_handlers.py::HandlersTest::test_client_remove_callback",
"tests/test_handlers.py::HandlersTest::test_client_severity_critical",
"tests/test_handlers.py::HandlersTest::test_client_severity_error",
"tests/test_handlers.py::HandlersTest::test_client_severity_info",
"tests/test_handlers.py::HandlersTest::test_client_severity_warning",
"tests/test_handlers.py::HandlersTest::test_custom_level",
"tests/test_handlers.py::HandlersTest::test_custom_levelname",
"tests/test_handlers.py::HandlersTest::test_exc_info",
"tests/test_handlers.py::HandlersTest::test_extra_fields",
"tests/test_handlers.py::HandlersTest::test_levelname_message",
"tests/test_handlers.py::HandlersTest::test_log_filter_does_not_leave_breadcrumbs_for_logs_below_its_level",
"tests/test_handlers.py::HandlersTest::test_log_filter_does_not_leave_breadcrumbs_when_bugsnag_create_breadcrumb_is_false",
"tests/test_handlers.py::HandlersTest::test_log_filter_does_not_leave_breadcrumbs_when_log_breadcrumbs_are_disabled",
"tests/test_handlers.py::HandlersTest::test_log_filter_leaves_breadcrumbs_for_logs_below_report_level",
"tests/test_handlers.py::HandlersTest::test_log_filter_leaves_breadcrumbs_when_handler_has_no_level",
"tests/test_handlers.py::HandlersTest::test_log_filter_leaves_breadcrumbs_when_manually_constructed",
"tests/test_handlers.py::HandlersTest::test_message",
"tests/test_handlers.py::HandlersTest::test_severity_critical",
"tests/test_handlers.py::HandlersTest::test_severity_error",
"tests/test_handlers.py::HandlersTest::test_severity_info",
"tests/test_handlers.py::HandlersTest::test_severity_warning"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2023-02-06 21:41:49+00:00 | mit | 1,453 |
|
bwhmather__python-validation-90 | diff --git a/validation/__init__.py b/validation/__init__.py
index 5ca0ab8..0f70a6a 100644
--- a/validation/__init__.py
+++ b/validation/__init__.py
@@ -21,6 +21,8 @@ from .datastructure import (
validate_tuple,
)
+from .uuid import validate_uuid
+
from .email import validate_email_address
__all__ = [
@@ -29,6 +31,6 @@ __all__ = [
'validate_date', 'validate_datetime',
'validate_list', 'validate_set',
'validate_mapping', 'validate_structure',
- 'validate_tuple',
+ 'validate_tuple', 'validate_uuid',
'validate_email_address',
]
diff --git a/validation/uuid.py b/validation/uuid.py
new file mode 100644
index 0000000..b5fcfca
--- /dev/null
+++ b/validation/uuid.py
@@ -0,0 +1,166 @@
+from __future__ import absolute_import
+
+import uuid
+
+from .core import _validate_bool
+from .number import _validate_int
+from .common import make_optional_argument_default
+
+
+_undefined = make_optional_argument_default()
+
+
+def _variant_to_string(variant):
+ return {
+ uuid.RESERVED_NCS: "RESERVED_NCS",
+ uuid.RFC_4122: "RFC_4122",
+ uuid.RESERVED_MICROSOFT: "RESERVED_MICROSOFT",
+ uuid.RESERVED_FUTURE: "RESERVED_FUTURE",
+ }.get(variant, "unknown")
+
+
+def _validate_uuid(
+ value,
+ variant,
+ version,
+ required
+):
+ if value is None:
+ if required:
+ raise TypeError("required value is None")
+ return
+
+ if not isinstance(value, uuid.UUID):
+ raise TypeError((
+ "expected uuid, but value is of type {cls!r}"
+ ).format(cls=value.__class__.__name__))
+
+ if variant is not None and value.variant != variant:
+ raise ValueError((
+ "expected {expected} variant, but uuid variant is {actual}"
+ ).format(
+ expected=_variant_to_string(variant),
+ actual=_variant_to_string(value.variant)
+ ))
+
+ if version is not None and value.version != version:
+ raise ValueError((
+ "expected UUID{expected}, but received UUID{actual}"
+ ).format(expected=version, actual=version))
+
+
+class _uuid_validator(object):
+ def __init__(
+ self,
+ variant,
+ version,
+ required
+ ):
+ if variant is not None and variant not in (
+ uuid.RESERVED_NCS,
+ uuid.RFC_4122,
+ uuid.RESERVED_MICROSOFT,
+ uuid.RESERVED_FUTURE,
+ ):
+ raise ValueError("unknown variant")
+ self.__variant = variant
+
+ _validate_int(version, required=False)
+ if version is not None:
+ if version not in (1, 3, 4, 5):
+ raise ValueError(
+ "unknown UUID version: {version}".format(version=version)
+ )
+
+ if variant is None:
+ variant = uuid.RFC_4122
+
+ if variant != uuid.RFC_4122:
+ raise ValueError((
+ "version is specified, but variant is {variant}"
+ ).format(variant=_variant_to_string(variant)))
+ self.__version = version
+
+ _validate_bool(required)
+ self.__required = required
+
+ def __call__(self, value):
+ _validate_uuid(
+ value,
+ variant=self.__variant,
+ version=self.__version,
+ required=self.__required
+ )
+
+ def __repr__(self):
+ args = []
+ if self.__variant is not None:
+ args.append('variant=uuid.{variant}'.format(
+ variant=_variant_to_string(self.__variant),
+ ))
+
+ if self.__version is not None:
+ args.append('version={version!r}'.format(
+ version=self.__version,
+ ))
+
+ if not self.__required:
+ args.append('required={required!r}'.format(
+ required=self.__required,
+ ))
+
+ return 'validate_uuid({args})'.format(args=', '.join(args))
+
+
+def validate_uuid(
+ value=_undefined,
+ variant=None,
+ version=None,
+ required=True,
+):
+ """
+ Checks that the target value is a valid UUID.
+
+ Parameters can be used to narrow down exactly what sort of UUID is
+ expected.
+
+ .. code:: python
+
+ def do_the_thing(identifier):
+ validate_uuid(
+ identifier,
+ variant=uuid.RFC_4122,
+ version=3,
+ )
+
+ # Do something
+ ...
+
+ :param unicode value:
+ The uuid to be validated.
+ :param int variant:
+ The UUID variant determines the internal layout of the UUID. This must
+ be one of `RESERVED_NCS`, `RFC_4122`, `RESERVED_MICROSOFT`, or
+ `RESERVED_FUTURE` from the `uuid` module.
+ :param int version:
+ Can be 1, 3, 4, or 5.
+ :param bool required:
+ Whether the value can be `None`. Defaults to `True`.
+
+ :raises TypeError:
+ If the value is not a unicode string , or if it was marked as
+ `required` but `None` was passed in.
+ :raises ValueError:
+ If the value was longer or shorter than expected, or did not match
+ the pattern.
+ """
+ validate = _uuid_validator(
+ variant=variant,
+ version=version,
+ required=required,
+ )
+
+ if value is not _undefined:
+ validate(value)
+ else:
+ return validate
diff --git a/validation/uuid.pyi b/validation/uuid.pyi
new file mode 100644
index 0000000..d2d54aa
--- /dev/null
+++ b/validation/uuid.pyi
@@ -0,0 +1,44 @@
+from typing import Union, overload, Callable, Pattern, Optional
+from uuid import UUID
+import six
+
+
+@overload
+def validate_uuid(
+ value: UUID,
+ *,
+ variant: Optional[str] = None,
+ version: Optional[int] = None,
+) -> None:
+ ...
+
+
+@overload
+def validate_uuid(
+ value: Optional[UUID],
+ *,
+ variant: Optional[str] = None,
+ version: Optional[int] = None,
+ required: bool,
+) -> None:
+ ...
+
+
+@overload
+def validate_uuid(
+ *,
+ variant: Optional[str] = None,
+ version: Optional[int] = None,
+) -> Callable[[UUID], None]:
+ ...
+
+
+@overload
+def validate_uuid(
+ *,
+ variant: Optional[str] = None,
+ version: Optional[int] = None,
+ required: bool,
+) -> Callable[[Optional[UUID]], None]:
+ ...
+
| bwhmather/python-validation | 20c71a5378b7960a2e4c3f859247a2b0504c2626 | diff --git a/validation/tests/__init__.py b/validation/tests/__init__.py
index 9bfb5d4..192cc58 100644
--- a/validation/tests/__init__.py
+++ b/validation/tests/__init__.py
@@ -15,6 +15,7 @@ from . import (
test_tuple,
test_optional_argument,
test_email,
+ test_uuid,
) # noqa:
@@ -34,4 +35,5 @@ suite = unittest.TestSuite((
loader.loadTestsFromModule(test_tuple), # type: ignore
loader.loadTestsFromModule(test_optional_argument), # type: ignore
loader.loadTestsFromModule(test_email), # type: ignore
+ loader.loadTestsFromModule(test_uuid), # type: ignore
))
diff --git a/validation/tests/test_uuid.py b/validation/tests/test_uuid.py
new file mode 100644
index 0000000..1ffc04f
--- /dev/null
+++ b/validation/tests/test_uuid.py
@@ -0,0 +1,64 @@
+import unittest
+import uuid
+
+from validation import validate_uuid
+
+
+class ValidateUUIDTestCase(unittest.TestCase):
+ def test_uuid1_valid(self):
+ validate_uuid(uuid.uuid1())
+
+ def test_uuid1_expected_valid(self):
+ validate_uuid(uuid.uuid1(), version=1)
+
+ def test_uuid1_expected_invalid(self):
+ with self.assertRaises(ValueError):
+ validate_uuid(uuid.uuid4(), version=1)
+
+ def test_uuid3_valid(self):
+ validate_uuid(uuid.uuid3(uuid.uuid4(), "name"))
+
+ def test_uuid3_expected_valid(self):
+ validate_uuid(uuid.uuid3(uuid.uuid4(), "name"), version=3)
+
+ def test_uuid3_expected_invalid(self):
+ with self.assertRaises(ValueError):
+ validate_uuid(uuid.uuid4(), version=3)
+
+ def test_uuid4_valid(self):
+ validate_uuid(uuid.uuid4())
+
+ def test_uuid5_valid(self):
+ validate_uuid(uuid.uuid5(uuid.uuid4(), "name"))
+
+ def test_rfc4122_valid(self):
+ validate_uuid(uuid.uuid4(), variant=uuid.RFC_4122)
+
+ def test_microsoft_invalid(self):
+ with self.assertRaises(ValueError):
+ validate_uuid(uuid.uuid4(), variant=uuid.RESERVED_MICROSOFT)
+
+ def test_incompatible_variant_version(self):
+ with self.assertRaises(ValueError):
+ validate_uuid(variant=uuid.RESERVED_MICROSOFT, version=4)
+
+ def test_not_required(self):
+ validate_uuid(None, required=False)
+
+ def test_required(self):
+ with self.assertRaises(TypeError):
+ validate_uuid(None)
+
+ def test_repr_required_false(self):
+ validator = validate_uuid(required=False)
+ self.assertEqual(
+ repr(validator),
+ 'validate_uuid(required=False)',
+ )
+
+ def test_repr_full(self):
+ validator = validate_uuid(variant=uuid.RFC_4122, version=3)
+ self.assertEqual(
+ repr(validator),
+ 'validate_uuid(variant=uuid.RFC_4122, version=3)',
+ )
| Add validation function for validating UUIDs | 0.0 | 20c71a5378b7960a2e4c3f859247a2b0504c2626 | [
"validation/tests/test_uuid.py::ValidateUUIDTestCase::test_incompatible_variant_version",
"validation/tests/test_uuid.py::ValidateUUIDTestCase::test_microsoft_invalid",
"validation/tests/test_uuid.py::ValidateUUIDTestCase::test_not_required",
"validation/tests/test_uuid.py::ValidateUUIDTestCase::test_repr_full",
"validation/tests/test_uuid.py::ValidateUUIDTestCase::test_repr_required_false",
"validation/tests/test_uuid.py::ValidateUUIDTestCase::test_required",
"validation/tests/test_uuid.py::ValidateUUIDTestCase::test_rfc4122_valid",
"validation/tests/test_uuid.py::ValidateUUIDTestCase::test_uuid1_expected_invalid",
"validation/tests/test_uuid.py::ValidateUUIDTestCase::test_uuid1_expected_valid",
"validation/tests/test_uuid.py::ValidateUUIDTestCase::test_uuid1_valid",
"validation/tests/test_uuid.py::ValidateUUIDTestCase::test_uuid3_expected_invalid",
"validation/tests/test_uuid.py::ValidateUUIDTestCase::test_uuid3_expected_valid",
"validation/tests/test_uuid.py::ValidateUUIDTestCase::test_uuid3_valid",
"validation/tests/test_uuid.py::ValidateUUIDTestCase::test_uuid4_valid",
"validation/tests/test_uuid.py::ValidateUUIDTestCase::test_uuid5_valid"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2021-08-20 08:11:01+00:00 | apache-2.0 | 1,454 |
|
bwhmather__python-validation-95 | diff --git a/validation/datastructure.py b/validation/datastructure.py
index 8863625..e7d5b52 100644
--- a/validation/datastructure.py
+++ b/validation/datastructure.py
@@ -463,7 +463,7 @@ def validate_mapping(
def _validate_structure(
value,
- schema=None, allow_extra=False,
+ schema=None, allow_extra=False, missing_as_none=False,
required=True,
):
if value is None:
@@ -478,20 +478,20 @@ def _validate_structure(
if schema is not None:
for key, validator in schema.items():
- if key not in value:
+ if not missing_as_none and key not in value:
raise KeyError((
"dictionary missing expected key: {key!r}"
).format(key=key))
try:
- validator(value[key])
+ validator(value.get(key, None))
except (TypeError, ValueError, KeyError):
_try_contextualize_exception(
"invalid value for key {key!r}".format(key=key),
)
raise
- if not allow_extra and set(schema) != set(value):
+ if not allow_extra and set(value) - set(schema):
raise ValueError((
"dictionary contains unexpected keys: {unexpected}"
).format(
@@ -503,7 +503,7 @@ def _validate_structure(
class _structure_validator(object):
- def __init__(self, schema, allow_extra, required):
+ def __init__(self, schema, allow_extra, missing_as_none, required):
_validate_structure(schema, schema=None, required=False)
if schema is not None:
# Make a copy of the schema to make sure it won't be mutated while
@@ -514,6 +514,9 @@ class _structure_validator(object):
_validate_bool(allow_extra)
self.__allow_extra = allow_extra
+ _validate_bool(missing_as_none)
+ self.__missing_as_none = missing_as_none
+
_validate_bool(required)
self.__required = required
@@ -522,6 +525,7 @@ class _structure_validator(object):
value,
schema=self.__schema,
allow_extra=self.__allow_extra,
+ missing_as_none=self.__missing_as_none,
required=self.__required,
)
@@ -537,6 +541,11 @@ class _structure_validator(object):
allow_extra=self.__allow_extra,
))
+ if self.__missing_as_none:
+ args.append('missing_as_none={missing_as_none!r}'.format(
+ missing_as_none=self.__missing_as_none,
+ ))
+
if not self.__required:
args.append('required={required!r}'.format(
required=self.__required,
@@ -547,7 +556,9 @@ class _structure_validator(object):
def validate_structure(
value=_undefined,
- schema=None, allow_extra=False,
+ schema=None,
+ allow_extra=False,
+ missing_as_none=False,
required=True,
):
"""
@@ -577,11 +588,18 @@ def validate_structure(
The schema against which the value should be checked.
:param bool allow_extra:
Set to `True` to ignore extra keys.
+ :param bool missing_as_none:
+ Set to treat keys that are absent from the structure as if they had
+ been set to None. Default is to raise an error if any keys are
+ missing.
:param bool required:
Whether the value can't be `None`. Defaults to True.
"""
validate = _structure_validator(
- schema=schema, allow_extra=allow_extra, required=required,
+ schema=schema,
+ allow_extra=allow_extra,
+ missing_as_none=missing_as_none,
+ required=required,
)
if value is not _undefined:
diff --git a/validation/datastructure.pyi b/validation/datastructure.pyi
index 99d646d..0dab9ef 100644
--- a/validation/datastructure.pyi
+++ b/validation/datastructure.pyi
@@ -194,8 +194,10 @@ def validate_mapping(
@overload
def validate_structure(
value: Dict,
- *, allow_extra: bool=False,
+ *,
schema: Dict=None,
+ allow_extra: bool=False,
+ missing_as_none: bool=False,
) -> None:
...
@@ -203,8 +205,10 @@ def validate_structure(
@overload
def validate_structure(
value: Optional[Dict],
- *, allow_extra: bool=False,
+ *,
schema: Dict=None,
+ allow_extra: bool=False,
+ missing_as_none: bool=False,
required: bool,
) -> None:
...
@@ -212,16 +216,20 @@ def validate_structure(
@overload
def validate_structure(
- *, allow_extra: bool=False,
+ *,
schema: Dict=None,
+ allow_extra: bool=False,
+ missing_as_none: bool=False,
) -> Callable[[Dict], None]:
...
@overload
def validate_structure(
- *, allow_extra: bool=False,
+ *,
schema: Dict=None,
+ allow_extra: bool=False,
+ missing_as_none: bool=False,
required: bool,
) -> Callable[[Optional[Dict]], None]:
...
| bwhmather/python-validation | 691f9d1b0adce0bd8f026f37a26149cee65be1ab | diff --git a/validation/tests/test_structure.py b/validation/tests/test_structure.py
index 2970236..b393540 100644
--- a/validation/tests/test_structure.py
+++ b/validation/tests/test_structure.py
@@ -85,6 +85,21 @@ class ValidateStructureTestCase(unittest.TestCase):
'unexpected': 2,
})
+ def test_schema_missing_as_none_required(self): # type: () -> None
+ validator = validate_structure(schema={
+ 'required': validate_int(),
+ }, missing_as_none=True)
+
+ with self.assertRaises(TypeError):
+ validator({})
+
+ def test_schema_missing_as_none_optional(self): # type: () -> None
+ validator = validate_structure(schema={
+ 'required': validate_int(required=False),
+ }, missing_as_none=True)
+
+ validator({})
+
def test_repr_1(self): # type: () -> None
validator = validate_structure(schema={'key': validate_int()})
self.assertEqual(
| Allow optional keys in validate_structure
Possible approaches:
- Pass a default value (probaly `None`) to nested validators if key is missing.
- Take a set of required values as a keyword argument.
- Take a set of optional values as a keyword argument. | 0.0 | 691f9d1b0adce0bd8f026f37a26149cee65be1ab | [
"validation/tests/test_structure.py::ValidateStructureTestCase::test_schema_missing_as_none_optional",
"validation/tests/test_structure.py::ValidateStructureTestCase::test_schema_missing_as_none_required"
]
| [
"validation/tests/test_structure.py::ValidateStructureTestCase::test_basic_valid",
"validation/tests/test_structure.py::ValidateStructureTestCase::test_dont_reraise_builtin_nonstring",
"validation/tests/test_structure.py::ValidateStructureTestCase::test_dont_reraise_builtin_subclass",
"validation/tests/test_structure.py::ValidateStructureTestCase::test_invalid_container_type",
"validation/tests/test_structure.py::ValidateStructureTestCase::test_not_required",
"validation/tests/test_structure.py::ValidateStructureTestCase::test_repr_1",
"validation/tests/test_structure.py::ValidateStructureTestCase::test_repr_2",
"validation/tests/test_structure.py::ValidateStructureTestCase::test_required",
"validation/tests/test_structure.py::ValidateStructureTestCase::test_reraise_builtin",
"validation/tests/test_structure.py::ValidateStructureTestCase::test_reraise_builtin_nomessage",
"validation/tests/test_structure.py::ValidateStructureTestCase::test_schema_allow_extra",
"validation/tests/test_structure.py::ValidateStructureTestCase::test_schema_invalid_value",
"validation/tests/test_structure.py::ValidateStructureTestCase::test_schema_invalid_value_type",
"validation/tests/test_structure.py::ValidateStructureTestCase::test_schema_missing_key",
"validation/tests/test_structure.py::ValidateStructureTestCase::test_schema_positional_argument",
"validation/tests/test_structure.py::ValidateStructureTestCase::test_schema_unexpected_key",
"validation/tests/test_structure.py::ValidateStructureTestCase::test_schema_valid"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2021-09-07 11:48:00+00:00 | apache-2.0 | 1,455 |
|
bwindsor__typed-config-8 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0b77c93..193bb80 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -30,3 +30,12 @@ Add `replace_source` and `set_sources` methods on `Config` and `ConfigProvider`
### v0.2.2
* Fix typing on `@section` decorator. This was previously preventing IDE autocompletion. The return type of the decorator now uses a generic to indicate that it returns the same type as it is passed. This means that whether or not it is passed a `Config` subclass is no longer type checked, but it means that the returned class has all the correct properties on it, which is more important.
* Add basic `repr` function to `Config`
+
+### v0.2.3
+Version bump to build after move from travis-ci.org to travis-ci.com
+
+### v0.2.4
+Version bump to build after move from travis-ci.org to travis-ci.com
+
+### v0.2.5
+Version bump to build after move from travis-ci.org to travis-ci.com
\ No newline at end of file
diff --git a/README.md b/README.md
index 45aec6b..45935a4 100644
--- a/README.md
+++ b/README.md
@@ -171,6 +171,9 @@ In this example we have three ways of casting:
3. Defining a custom function. Your function should take one string input and return one output of any type. To get type hint, just make sure your function has type annotations.
4. Using a lambda expression. The type inference may or may not work depending on your expression, so if it doesn't just write it as a function with type annotations.
+### Validation
+You can validate what has been supplied by providing a custom `cast` function to a `key`, which validates the configuration value in addition to parsing it.
+
### Extending configuration using shared ConfigProvider
Multiple application modules may use different configuration schemes while sharing the same configuration source. Analogously, various `Config` classes may provide different view of the same configuration data, sharing the same `ConfigProvider`.
@@ -274,6 +277,51 @@ print(extended_config.app_extension.api_key)
print(extended_config.database.host)
```
+### Configuration variables which depend on other configuration variables
+Sometimes you may wish to set the value of some configuration variables based on others. You may also wish to validate some variables, for example allowed values may be different depending on the value of another config variable. For this you can add a `post_read_hook`.
+
+The default implementation of `post_read_hook` returns an empty `dict`. You can override this by implementing your own `post_read_hook` method. It should receive only `self` as an input, and return a `dict`. This `dict` should be a simple mapping from config keys to values. For hierarchical configurations, you can nest the dictionaries. If you provide a `post_read_hook` in both a parent and a child class which both make changes to the same keys (don't do this) then the values returned by the child method will overwrite those by the parent.
+
+This hook is called whenever you call the `read` method. If you use lazy loading and skip calling the `read` method, you cannot use this hook.
+```python
+# my_app/config.py
+from typedconfig import Config, key, group_key, section
+from typedconfig.source import EnvironmentConfigSource
+
+@section('child')
+class ChildConfig(Config):
+ http_port_plus_one = key(cast=int, required=False)
+
+@section('app')
+class AppConfig(Config):
+ use_https = key(cast=bool)
+ http_port = key(key_name='port', cast=int, required=False)
+ child = group_key(ChildConfig)
+
+ def post_read_hook(self) -> dict:
+ config_updates = dict()
+ # If the port has not been provided, set it based on the value of use_https
+ if self.http_port is None:
+ config_updates.update(http_port=443 if self.use_https else 80)
+ else:
+ # Modify child config
+ config_updates.update(child=dict(http_port_plus_one=self.http_port + 1))
+
+ # Validate that the port number has a sensible value
+ # It is recommended to do validation inside the cast method for individual keys, however for dependent keys it can be useful here
+ if self.http_port is not None:
+ if self.use_https:
+ assert self.http_port in [443, 444, 445]
+ else:
+ assert self.http_port in [80, 81, 82]
+
+ return config_updates
+
+config = AppConfig()
+config.add_source(EnvironmentConfigSource())
+config.read()
+```
+
## Configuration Sources
Configuration sources are how your main `Config` class knows where to get its data from. These are totally extensible so that you can read in your configuration from wherever you like - from a database, from S3, anywhere that you can write code for.
diff --git a/typedconfig/__version__.py b/typedconfig/__version__.py
index 7b491b5..96487b8 100644
--- a/typedconfig/__version__.py
+++ b/typedconfig/__version__.py
@@ -1,3 +1,3 @@
-VERSION = (0, 2, 2)
+VERSION = (0, 2, 5)
__version__ = '.'.join(map(str, VERSION))
diff --git a/typedconfig/config.py b/typedconfig/config.py
index 3fbee9e..1f155f3 100644
--- a/typedconfig/config.py
+++ b/typedconfig/config.py
@@ -1,9 +1,9 @@
# This future import allows methods of Config to use Config as their return type
import typing
-from typing import TypeVar, List, Optional, Callable, Type, Union
+from itertools import chain
+from typing import TypeVar, List, Optional, Callable, Type, Union, Tuple, Any
from typedconfig.provider import ConfigProvider
from typedconfig.source import ConfigSource
-from itertools import dropwhile, islice, chain
import logging
import inspect
@@ -58,39 +58,25 @@ def key(section_name: str = None,
value: the parsed config value
"""
- if section_name is None:
- resolved_section_name = self._section_name
- if resolved_section_name is None:
- raise ValueError(
- "Section name was not specified by the key function or the section class decorator.")
- else:
- resolved_section_name = section_name
-
- if _mutable_state['key_name'] is None:
- def base_dict_items(cls):
- base = cls
- while True:
- yield base.__dict__.items()
- base = base.__base__
- if base is None:
- break
- resolved_key_name = list(islice(dropwhile(
- lambda x: x[1] is not getter, chain.from_iterable(base_dict_items(self.__class__))), 1))[0][0]
- _mutable_state['key_name'] = resolved_key_name.upper()
+ resolved_section_name = self._resolve_section_name(section_name)
+
+ resolved_key_name = _mutable_state['key_name']
+ if resolved_key_name is None:
+ resolved_key_name = self._resolve_key_name(getter)
+ _mutable_state['key_name'] = resolved_key_name
# If value is cached, just use the cached value
- cached_value = self._provider.get_from_cache(resolved_section_name, _mutable_state['key_name'])
+ cached_value = self._provider.get_from_cache(resolved_section_name, resolved_key_name)
if cached_value is not None:
return cached_value
- value = self._provider.get_key(resolved_section_name, _mutable_state['key_name'])
+ value = self._provider.get_key(resolved_section_name, resolved_key_name)
# If we still haven't found a config value and this parameter is required,
# raise an exception, otherwise use the default
if value is None:
if required:
- raise KeyError("Config parameter {0}.{1} not found".format(resolved_section_name,
- _mutable_state['key_name']))
+ raise KeyError("Config parameter {0}.{1} not found".format(resolved_section_name, resolved_key_name))
else:
value = default
@@ -100,11 +86,13 @@ def key(section_name: str = None,
# Cache this for next time if still not none
if value is not None:
- self._provider.add_to_cache(resolved_section_name, _mutable_state['key_name'], value)
+ self._provider.add_to_cache(resolved_section_name, resolved_key_name, value)
return value
setattr(getter.fget, Config._config_key_registration_string, True)
+ setattr(getter.fget, Config._config_key_key_name_string, key_name.upper() if key_name is not None else None)
+ setattr(getter.fget, Config._config_key_section_name_string, section_name)
return getter
@@ -152,6 +140,8 @@ class Config:
"""
_composed_config_registration_string = '__composed_config__'
_config_key_registration_string = '__config_key__'
+ _config_key_key_name_string = '__config_key_key_name__'
+ _config_key_section_name_string = '__config_key_section_name__'
def __init__(self, section_name=None, sources: List[ConfigSource] = None,
provider: Optional[ConfigProvider] = None):
@@ -189,10 +179,31 @@ class Config:
-------
A list of strings giving the names of the registered properties/methods
"""
- all_properties = inspect.getmembers(self.__class__, predicate=lambda x: self.is_member_registered(
- x, Config._config_key_registration_string))
+ all_properties = self._get_registered_properties_with_values()
return [f[0] for f in all_properties]
+ def _get_registered_properties_with_values(self) -> List[Tuple[str, Any]]:
+ return inspect.getmembers(self.__class__, predicate=lambda x: self.is_member_registered(
+ x, Config._config_key_registration_string))
+
+ def _resolve_section_name(self, key_section_name: Optional[str]) -> str:
+ if key_section_name is not None:
+ return key_section_name
+
+ if self._section_name is None:
+ raise ValueError(
+ "Section name was not specified by the key function or the section class decorator.")
+ return self._section_name
+
+ def _resolve_key_name(self, property_object: property) -> str:
+ key_key_name = getattr(property_object.fget, self._config_key_key_name_string)
+ if key_key_name is not None:
+ return key_key_name
+
+ members = inspect.getmembers(self.__class__, lambda x: x is property_object)
+ assert len(members) == 1
+ return members[0][0].upper()
+
@staticmethod
def is_member_registered(member, reg_string: str):
if isinstance(member, property):
@@ -229,6 +240,39 @@ class Config:
for f in registered_properties:
getattr(self, f)
+ self._post_read(self.post_read_hook())
+
+ def post_read_hook(self) -> dict:
+ """
+ This method can be overridden to modify config values after read() is called.
+ Returns
+ -------
+ A dict of key-value pairs containing new configuration values for key() items in this Config class
+ """
+ return dict()
+
+ def _post_read(self, updated_values: dict):
+ registered_properties = set(self.get_registered_properties())
+
+ for k, v in updated_values.items():
+ if isinstance(v, dict):
+ property_object = getattr(self.__class__, k)
+ if not self.is_member_registered(property_object, self._composed_config_registration_string):
+ raise KeyError(f"{k} is not a valid typed config group_key() of {self.__class__.__name__}")
+ child_config = getattr(self, k)
+ child_config._post_read(v)
+ else:
+ if k not in registered_properties:
+ raise KeyError(f"{k} is not a valid attribute of {self.__class__.__name__}")
+
+ property_object: property = getattr(self.__class__, k)
+ if not self.is_member_registered(property_object, self._config_key_registration_string):
+ raise KeyError(f"{k} is not a valid typed config key() object of {self.__class__.__name__}")
+
+ section_name = self._resolve_section_name(getattr(property_object.fget, self._config_key_section_name_string))
+ key_name = self._resolve_key_name(property_object)
+ self._provider.add_to_cache(section_name, key_name, v)
+
def clear_cache(self):
"""
Config values are cached the first time they are requested. This means that if, for example, config values are
| bwindsor/typed-config | c733c3a34dc0e7310807fa181ffb4a4cf7b69afc | diff --git a/test/test_configuration.py b/test/test_configuration.py
index 4663753..2c9e205 100644
--- a/test/test_configuration.py
+++ b/test/test_configuration.py
@@ -231,6 +231,23 @@ def test_key_name_inference():
assert 'def' == c.prop2
+def test_key_name_inference_multi_level():
+ class SampleConfigBase(Config):
+ prop1 = key(section_name='s')
+
+ class SampleConfig(SampleConfigBase):
+ prop2 = key(section_name='s')
+
+ c = SampleConfig()
+
+ c.add_source(DictConfigSource({'s': dict(
+ PROP1='abc',
+ PROP2='def',
+ )}))
+ assert 'abc' == c.prop1
+ assert 'def' == c.prop2
+
+
def test_least_verbose_config():
@section('X')
class SampleConfig(Config):
@@ -387,6 +404,109 @@ def test_set_sources():
assert config.config_sources[1] is new_sources[1]
+def test_property_is_read_only():
+ config = GrandchildConfig()
+ with pytest.raises(AttributeError):
+ config.prop1 = 'a'
+
+
+def test_post_read_hook():
+ @section('s')
+ class SampleConfig(Config):
+ prop1 = key(cast=str)
+ prop2 = key(cast=str, required=False)
+
+ def post_read_hook(self) -> dict:
+ return dict(prop2='x' + self.prop1)
+
+ config_source = DictConfigSource({
+ 's': {
+ 'prop1': 'a',
+ }
+ })
+ config = SampleConfig(sources=[config_source])
+ config.read()
+
+ assert config.prop1 == 'a'
+ assert config.prop2 == 'xa'
+
+
+def test_post_read_hook_different_key_name():
+ @section('s')
+ class SampleConfig(Config):
+ prop1 = key('s', 'key1', cast=str)
+ prop2 = key('s', 'key2', cast=str, required=False)
+
+ def post_read_hook(self) -> dict:
+ return dict(prop2='x' + self.prop1)
+
+ config_source = DictConfigSource({
+ 's': {
+ 'key1': 'a',
+ }
+ })
+ config = SampleConfig(sources=[config_source])
+ config.read()
+
+ assert config.prop1 == 'a'
+ assert config.prop2 == 'xa'
+
+
+def test_post_read_hook_modify_child():
+ class SampleChildConfig(Config):
+ prop3 = key('s', 'key3', cast=str)
+
+ class SampleConfig(Config):
+ prop3 = group_key(SampleChildConfig)
+
+ def post_read_hook(self) -> dict:
+ return dict(
+ prop3=dict(
+ prop3='new_value'
+ )
+ )
+
+ config_source = DictConfigSource({
+ 's': {
+ 'key3': 'b',
+ }
+ })
+ config = SampleConfig(sources=[config_source])
+ config.read()
+
+ assert config.prop3.prop3 == 'new_value'
+
+
+def test_post_read_hook_child_takes_priority():
+ class SampleChildConfig(Config):
+ prop3 = key('s', 'key3', cast=str)
+
+ def post_read_hook(self) -> dict:
+ return dict(
+ prop3='child_new_value'
+ )
+
+ class SampleConfig(Config):
+ prop3 = group_key(SampleChildConfig)
+
+ def post_read_hook(self) -> dict:
+ return dict(
+ prop3=dict(
+ prop3='new_value'
+ )
+ )
+
+ config_source = DictConfigSource({
+ 's': {
+ 'key3': 'b',
+ }
+ })
+ config = SampleConfig(sources=[config_source])
+ config.read()
+
+ assert config.prop3.prop3 == 'child_new_value'
+
+
def test_config_repr():
class SampleChildConfig(Config):
a = key(section_name='test', cast=str, required=False, default='A')
| Post configuration read transformations
I have a use case that has a very narrow set of supported values. Additionally, some configuration items may be inferred based on other values. Specifically, I'm looking at integrating [scrapy-redis](https://github.com/rmax/scrapy-redis) into a project and maintain the ability to swap between schedulers.
```
@section('my_config')
class Scrapy(Config):
scheduler = key(cast=str)
scheduler_persist = key(cast=bool)
scheduler_dupefilter_class = key(required=False, cast=str)
```
In the context of scrapy-redis, I would like to support a use case that allows just overwriting the scheduler, then automatically set scheduler_dupefilter_class to the right string if it isn't set.
The cleanest way I can think to handle this today would be to use a custom `ConfigSource` and do an initial application config read, add the custom `ConfigSource` with read config, and then read again.
```
class ScrapyTransformationConfigSource(ConfigSource):
def __init__(self, scrapy_config: Scrapy):
# override based on currently set config
config = Scrapy(sources=[
EnvironmentConfigSource(prefix="EXAMPLE")
])
config.read()
config.add_source(ScrapyTransformationConfigSource(config.scrapy))
config.read()
```
This isn't an ideal workflow, and doesn't scale great if this needs to be done for multiple configs. A built in post config read transformation would make this much more seamless.
| 0.0 | c733c3a34dc0e7310807fa181ffb4a4cf7b69afc | [
"test/test_configuration.py::test_post_read_hook",
"test/test_configuration.py::test_post_read_hook_different_key_name",
"test/test_configuration.py::test_post_read_hook_modify_child",
"test/test_configuration.py::test_post_read_hook_child_takes_priority"
]
| [
"test/test_configuration.py::test_subclass_config",
"test/test_configuration.py::test_register_properties",
"test/test_configuration.py::test_register_composed_config",
"test/test_configuration.py::test_read[ConfigMissing]",
"test/test_configuration.py::test_read[PropertiesMatch]",
"test/test_configuration.py::test_read[ExtraConfig]",
"test/test_configuration.py::test_key_getter[a-args0-a]",
"test/test_configuration.py::test_key_getter[None-args1-None]",
"test/test_configuration.py::test_key_getter[None-args2-3]",
"test/test_configuration.py::test_key_getter[None-args3-3]",
"test/test_configuration.py::test_key_getter[None-args4-KeyError]",
"test/test_configuration.py::test_key_getter[3-args5-3]",
"test/test_configuration.py::test_key_getter[3-args6-3]",
"test/test_configuration.py::test_key_getter[a-args7-ValueError]",
"test/test_configuration.py::test_get_key",
"test/test_configuration.py::test_caching",
"test/test_configuration.py::test_compose_configs",
"test/test_configuration.py::test_add_source",
"test/test_configuration.py::test_init_with_sources",
"test/test_configuration.py::test_section_decorator",
"test/test_configuration.py::test_key_name_inference",
"test/test_configuration.py::test_key_name_inference_multi_level",
"test/test_configuration.py::test_least_verbose_config",
"test/test_configuration.py::test_section_decorator_precedence",
"test/test_configuration.py::test_no_section_provided",
"test/test_configuration.py::test_multiple_group_keys_with_section_decorators",
"test/test_configuration.py::test_cast_with_default",
"test/test_configuration.py::test_provider_property_read",
"test/test_configuration.py::test_provider_property_is_readonly",
"test/test_configuration.py::test_construct_config_without_provider",
"test/test_configuration.py::test_construct_config_bad_provider_type",
"test/test_configuration.py::test_construct_config_with_provider",
"test/test_configuration.py::test_replace_source",
"test/test_configuration.py::test_replace_source_not_found",
"test/test_configuration.py::test_replace_source_bad_type",
"test/test_configuration.py::test_set_sources",
"test/test_configuration.py::test_property_is_read_only",
"test/test_configuration.py::test_config_repr"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-09-07 11:06:43+00:00 | mit | 1,456 |
|
bycycle-tools__bycycle-136 | diff --git a/bycycle/burst/amp.py b/bycycle/burst/amp.py
index ffc099c..10375c2 100644
--- a/bycycle/burst/amp.py
+++ b/bycycle/burst/amp.py
@@ -1,4 +1,5 @@
"""Detect bursts: amplitude threshold approach."""
+import numpy as np
from bycycle.utils.checks import check_param_range
from bycycle.burst.utils import check_min_burst_cycles
@@ -42,6 +43,7 @@ def detect_bursts_amp(df_features, burst_fraction_threshold=1, min_n_cycles=3):
# Determine cycles that are defined as bursting throughout the whole cycle
is_burst = [frac >= burst_fraction_threshold for frac in df_features['burst_fraction']]
+ is_burst = np.array(is_burst)
df_features['is_burst'] = check_min_burst_cycles(is_burst, min_n_cycles=min_n_cycles)
diff --git a/bycycle/burst/cycle.py b/bycycle/burst/cycle.py
index 6ff4c6b..008d101 100644
--- a/bycycle/burst/cycle.py
+++ b/bycycle/burst/cycle.py
@@ -90,6 +90,7 @@ def detect_bursts_cycles(df_features, amp_fraction_threshold=0., amp_consistency
# Set the burst status for each cycle as the answer across all criteria
is_burst = amp_fraction & amp_consistency & period_consistency & monotonicity
+ is_burst = is_burst.to_numpy()
# Set the first and last cycles to not be part of a burst
is_burst[0] = False
diff --git a/bycycle/burst/utils.py b/bycycle/burst/utils.py
index e57ea98..de82a8b 100644
--- a/bycycle/burst/utils.py
+++ b/bycycle/burst/utils.py
@@ -31,23 +31,28 @@ def check_min_burst_cycles(is_burst, min_n_cycles=3):
array([False, False, False, False, True, True, True, True, False])
"""
- # Ensure argument is within valid range
- check_param_range(min_n_cycles, 'min_n_cycles', (0, np.inf))
-
- temp_cycle_count = 0
+ if not isinstance(is_burst, np.ndarray):
+ raise ValueError("Argument 'is_burst' must be a numpy array!")
- for idx, bursting in enumerate(is_burst):
+ # handle special case where input array is empty
+ if len(is_burst) == 0:
+ return is_burst
- if bursting:
- temp_cycle_count += 1
+ # Ensure argument is within valid range
+ check_param_range(min_n_cycles, 'min_n_cycles', (0, np.inf))
- else:
+ # extract transition indices
+ diff = np.diff(is_burst, prepend=0, append=0)
+ transitions = np.flatnonzero(diff)
+ ons, offs = transitions[0::2], transitions[1::2]
- if temp_cycle_count < min_n_cycles:
- for c_rm in range(temp_cycle_count):
- is_burst[idx - 1 - c_rm] = False
+ # select only segments with long enough duration
+ durations = offs - ons
+ too_short = durations < min_n_cycles
- temp_cycle_count = 0
+ # construct bool time series from transition indices
+ for silence_on, silence_off in zip(ons[too_short], offs[too_short]):
+ is_burst[silence_on:silence_off] = False
return is_burst
| bycycle-tools/bycycle | 44c995d06fcd2e746fd4e1955d35f4a69c64fbaf | diff --git a/bycycle/tests/burst/test_utils.py b/bycycle/tests/burst/test_utils.py
index 81af247..c63498e 100644
--- a/bycycle/tests/burst/test_utils.py
+++ b/bycycle/tests/burst/test_utils.py
@@ -1,6 +1,8 @@
"""Test burst.utils."""
import numpy as np
+import pandas as pd
+import pytest
from bycycle.features import compute_features
from bycycle.burst.utils import *
@@ -9,19 +11,49 @@ from bycycle.burst.utils import *
###################################################################################################
###################################################################################################
-def test_check_min_burst_cycles():
[email protected]("min_n_cycles", [2, 3])
+def test_check_min_burst_cycles(min_n_cycles):
- is_burst = np.array([True, True, True, False])
- is_burst_check = check_min_burst_cycles(is_burst, min_n_cycles=3)
+ is_burst = np.array([False, True, True, False, False])
+
+ is_burst_check = check_min_burst_cycles(is_burst.copy(), min_n_cycles=min_n_cycles)
+
+ burst_should_be_kept = min_n_cycles < 3
+ burst_kept = (is_burst == is_burst_check).all()
+
+ assert burst_kept == burst_should_be_kept
+
+
[email protected]("side", ["start", "end"])
+def test_check_min_burst_cycles_bursting_at_side(side):
+
+ min_n_cycles = 5
+ is_burst = [True] * min_n_cycles + [False]
+ is_burst = np.flip(is_burst) if side == "end" else np.array(is_burst)
+
+ is_burst_check = check_min_burst_cycles(is_burst.copy(), min_n_cycles=min_n_cycles)
assert (is_burst == is_burst_check).all()
- is_burst = np.array([True, False, True, False])
- is_burst_check = check_min_burst_cycles(is_burst, min_n_cycles=3)
+
+def test_check_min_burst_cycles_no_bursts():
+
+ num_cycles = 5
+ is_burst = np.zeros(num_cycles, dtype=bool)
+
+ is_burst_check = check_min_burst_cycles(is_burst.copy(), min_n_cycles=3)
assert not any(is_burst_check)
+def test_check_min_burst_cycles_empty_input():
+
+ is_burst = np.array([])
+ is_burst_check = check_min_burst_cycles(is_burst.copy(), min_n_cycles=3)
+
+ assert not len(is_burst_check)
+
+
def test_recompute_edges(sim_args_comb):
# Grab sim arguments from fixture
| optimize function check_min_burst_cycles
Hi there!
I noticed that the `check_min_burst_cycles` function can be optimized a bit. I profiled a proposed refactor using my M1 Macbook Pro -- I'm curious what this looks like on other machines. Not sure how significantly this will impact most users -- in my simulated example, it starts to become noticeable at ~1e7 cycles. Perhaps a refactor would be useful for continuous EEG with lots of channels?


(omitting docstring and `check_param_range`):
```python
def check_min_burst_cycles_new(is_burst, min_n_cycles=3):
# extract transition indices
diff = np.diff(is_burst)
transitions = np.flatnonzero(diff) + 1
ons, offs = transitions[0::2], transitions[1::2]
# select only segments with long enough duration
durations = np.diff([ons, offs], axis=0)
durations = np.atleast_1d(durations.squeeze()) # careful with dimensions...
ons = ons[durations >= min_n_cycles]
offs = offs[durations >= min_n_cycles]
# construct bool time series from transition indices
out = np.zeros_like(is_burst, dtype=bool)
for on, off in zip(ons, offs):
out[on:off] = True
return out
```
If you'd like to repro:
```python
from time import time
import numpy as np
np.random.seed(0)
min_n_cycles = 3
old_times = []
new_times = []
cycle_count = np.power(10, range(1, 9))
for count in cycle_count:
old_times_tmp = []
new_times_tmp = []
for rep in range(10):
# simulate random data -- this is admittedly assuming a certain distribution
a = np.random.random(count) > .5
a[0] = False
a[-1] = False
t0 = time()
new = check_min_burst_cycles_new(a, min_n_cycles=min_n_cycles)
new_times_tmp.append(time() - t0)
t0 = time()
old = check_min_burst_cycles(a, min_n_cycles=min_n_cycles)
old_times_tmp.append(time() - t0)
# make sure I'm getting the same result...
np.testing.assert_equal(old, new)
new_times.append(np.median(new_times_tmp))
old_times.append(np.median(old_times_tmp))
```
... and plot
```python
import matplotlib.pyplot as plt
plt.loglog(cycle_count, old_times, label="old")
plt.loglog(cycle_count, new_times, label="new")
plt.xlabel("Cycle count")
plt.ylabel("Median compute time (s)")
plt.title(f"Profiling new vs old algo (min_n_cycles={min_n_cycles})")
plt.legend()
``` | 0.0 | 44c995d06fcd2e746fd4e1955d35f4a69c64fbaf | [
"bycycle/tests/burst/test_utils.py::test_recompute_edges"
]
| [
"bycycle/tests/burst/test_utils.py::test_check_min_burst_cycles[2]",
"bycycle/tests/burst/test_utils.py::test_check_min_burst_cycles_bursting_at_side[end]",
"bycycle/tests/burst/test_utils.py::test_check_min_burst_cycles_no_bursts",
"bycycle/tests/burst/test_utils.py::test_recompute_edge",
"bycycle/tests/burst/test_utils.py::test_check_min_burst_cycles[3]",
"bycycle/tests/burst/test_utils.py::test_check_min_burst_cycles_bursting_at_side[start]",
"bycycle/tests/burst/test_utils.py::test_check_min_burst_cycles_empty_input"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2023-06-11 16:04:52+00:00 | apache-2.0 | 1,457 |
|
byllyfish__precis_i18n-32 | diff --git a/precis_i18n/profile.py b/precis_i18n/profile.py
index 915e234..074f893 100644
--- a/precis_i18n/profile.py
+++ b/precis_i18n/profile.py
@@ -291,7 +291,7 @@ class Nickname(Profile):
def additional_mapping_rule(self, value):
# Override
temp = self.base.ucd.map_nonascii_space_to_ascii(value)
- return re.sub(r' +', ' ', temp.strip(' \t\n\r'))
+ return re.sub(r' +', ' ', temp.strip(' '))
def normalization_rule(self, value):
# Override
| byllyfish/precis_i18n | 6d39c4f14fa1d092b3b16a37a94d600a00cc7a49 | diff --git a/test/golden.json b/test/golden.json
index bfff879..72b8412 100644
--- a/test/golden.json
+++ b/test/golden.json
@@ -5896,19 +5896,19 @@
"profile": "NicknameCaseMapped",
"input": "\t",
"output": null,
- "error": "DISALLOWED/empty"
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped",
"input": "\n",
"output": null,
- "error": "DISALLOWED/empty"
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped",
"input": "\r",
"output": null,
- "error": "DISALLOWED/empty"
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped",
@@ -5997,14 +5997,14 @@
{
"profile": "NicknameCaseMapped",
"input": "\tA",
- "output": "a",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped",
"input": "A\t",
- "output": "a",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped",
@@ -6015,14 +6015,14 @@
{
"profile": "NicknameCaseMapped",
"input": "\nA",
- "output": "a",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped",
"input": "A\n",
- "output": "a",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped",
@@ -6033,14 +6033,14 @@
{
"profile": "NicknameCaseMapped",
"input": "\rA",
- "output": "a",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped",
"input": "A\r",
- "output": "a",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped",
@@ -9810,19 +9810,19 @@
"profile": "NicknameCasePreserved",
"input": "\t",
"output": null,
- "error": "DISALLOWED/empty"
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCasePreserved",
"input": "\n",
"output": null,
- "error": "DISALLOWED/empty"
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCasePreserved",
"input": "\r",
"output": null,
- "error": "DISALLOWED/empty"
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCasePreserved",
@@ -9911,14 +9911,14 @@
{
"profile": "NicknameCasePreserved",
"input": "\tA",
- "output": "A",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCasePreserved",
"input": "A\t",
- "output": "A",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCasePreserved",
@@ -9929,14 +9929,14 @@
{
"profile": "NicknameCasePreserved",
"input": "\nA",
- "output": "A",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCasePreserved",
"input": "A\n",
- "output": "A",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCasePreserved",
@@ -9947,14 +9947,14 @@
{
"profile": "NicknameCasePreserved",
"input": "\rA",
- "output": "A",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCasePreserved",
"input": "A\r",
- "output": "A",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCasePreserved",
@@ -11766,19 +11766,19 @@
"profile": "NicknameCaseMapped:ToLower",
"input": "\t",
"output": null,
- "error": "DISALLOWED/empty"
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped:ToLower",
"input": "\n",
"output": null,
- "error": "DISALLOWED/empty"
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped:ToLower",
"input": "\r",
"output": null,
- "error": "DISALLOWED/empty"
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped:ToLower",
@@ -11867,14 +11867,14 @@
{
"profile": "NicknameCaseMapped:ToLower",
"input": "\tA",
- "output": "a",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped:ToLower",
"input": "A\t",
- "output": "a",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped:ToLower",
@@ -11885,14 +11885,14 @@
{
"profile": "NicknameCaseMapped:ToLower",
"input": "\nA",
- "output": "a",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped:ToLower",
"input": "A\n",
- "output": "a",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped:ToLower",
@@ -11903,14 +11903,14 @@
{
"profile": "NicknameCaseMapped:ToLower",
"input": "\rA",
- "output": "a",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped:ToLower",
"input": "A\r",
- "output": "a",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped:ToLower",
@@ -19591,19 +19591,19 @@
"profile": "NicknameCaseMapped:CaseFold",
"input": "\t",
"output": null,
- "error": "DISALLOWED/empty"
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped:CaseFold",
"input": "\n",
"output": null,
- "error": "DISALLOWED/empty"
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped:CaseFold",
"input": "\r",
"output": null,
- "error": "DISALLOWED/empty"
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped:CaseFold",
@@ -19692,14 +19692,14 @@
{
"profile": "NicknameCaseMapped:CaseFold",
"input": "\tA",
- "output": "a",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped:CaseFold",
"input": "A\t",
- "output": "a",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped:CaseFold",
@@ -19710,14 +19710,14 @@
{
"profile": "NicknameCaseMapped:CaseFold",
"input": "\nA",
- "output": "a",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped:CaseFold",
"input": "A\n",
- "output": "a",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped:CaseFold",
@@ -19728,14 +19728,14 @@
{
"profile": "NicknameCaseMapped:CaseFold",
"input": "\rA",
- "output": "a",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped:CaseFold",
"input": "A\r",
- "output": "a",
- "error": null
+ "output": null,
+ "error": "DISALLOWED/controls"
},
{
"profile": "NicknameCaseMapped:CaseFold",
| Nickname profile strips more whitespace characters than it should.
The Nickname profile includes an "additional mapping rule" that strips white space characters from both the beginning and end of the string. The current implementation uses the str.strip() method without a parameter. Python's default behavior defines a number of control characters as white space. I believe this is a bug; the code should be str.strip(' ') to just strip the standard ascii space.
The result of the Nickname `enforce` step is not affected by this bug. The code is simply over-permissive; it corrects the input more than it should. A diagnostic should tell the person to clean up their nickname string. A proper fix will include a diagnostic error message that reports "invalid_white_space". This will be a new error message.
Current behavior that is not technically correct:
```
Nickname profile:
" a\n" -> "a"
"\x1fb\x1f" -> "b"
"\tc\r\n" -> "c"
```
Note that non-ascii `Zs` white space are converted to ASCII space 0x20 before any stripping takes place. The Precis spec only talks about stripping the `Zs` white space chars.
The following characters are stripped by Python:
- U+09 (Cc)
- U+0A (Cc)
- U+0B (Cc)
- U+0C (Cc)
- U+0D (Cc)
- U+1C (Cc)
- U+1D (Cc)
- U+1E (Cc)
- U+1F (Cc)
- U+85 (Cc)
- U+2028 (Zl)
- U+2029 (Zp)
Of these, I'm only worried about `\n` (U+0A). I might just continue to strip these, even though the spec doesn't say I have to. This is mostly for compatibility with existing software use. | 0.0 | 6d39c4f14fa1d092b3b16a37a94d600a00cc7a49 | [
"test/test_golden.py::TestGolden::test_golden_json"
]
| [
"test/test_codec.py::TestCodec::test_decode",
"test/test_codec.py::TestCodec::test_encode",
"test/test_codec.py::TestCodec::test_encode_errors",
"test/test_codec.py::TestCodec::test_search_function",
"test/test_codepointset.py::TestCodepointSet::test_coalesce",
"test/test_codepointset.py::TestCodepointSet::test_contains",
"test/test_codepointset.py::TestCodepointSet::test_equals",
"test/test_codepointset.py::TestCodepointSet::test_even_odd",
"test/test_codepointset.py::TestCodepointSet::test_len",
"test/test_codepointset.py::TestCodepointSet::test_malformed_range",
"test/test_codepointset.py::TestCodepointSet::test_parse",
"test/test_codepointset.py::TestCodepointSet::test_repr",
"test/test_derived_props.py::TestDerivedProperties::test_derived_props",
"test/test_derived_props_files.py::TestDerivedPropsFiles::test_derived_props",
"test/test_derived_props_files.py::TestDerivedPropsFiles::test_iana_derived_props",
"test/test_factory.py::TestGetProfile::test_missing",
"test/test_factory.py::TestGetProfile::test_unicodedata_arg",
"test/test_factory.py::TestUsernameCasePreserved::test_enforce",
"test/test_factory.py::TestUsernameCasePreserved::test_identifier_oddities",
"test/test_factory.py::TestUsernameCasePreserved::test_invalid_argument",
"test/test_factory.py::TestUsernameCaseMapped::test_enforce",
"test/test_factory.py::TestNickname::test_enforce",
"test/test_factory.py::TestNicknameCaseMapped::test_enforce",
"test/test_factory.py::TestUsername::test_constructor",
"test/test_factory.py::TestUsername::test_constructor_fail",
"test/test_idempotent.py::IdempotentTestCase::test_all_codepoints",
"test/test_idempotent.py::IdempotentTestCase::test_broken_profile",
"test/test_precis.py::TestBidiRule::test_bidi_rule_ltr",
"test/test_precis.py::TestBidiRule::test_bidi_rule_rtl",
"test/test_precis.py::TestBidiRule::test_has_rtl",
"test/test_precis.py::TestPrecisIdentifierClass::test_invalid_identifier",
"test/test_precis.py::TestPrecisIdentifierClass::test_valid_identifier",
"test/test_precis.py::TestPrecisFreeformClass::test_invalid_freeform",
"test/test_precis.py::TestPrecisFreeformClass::test_valid_freeform",
"test/test_precis.py::TestDerivedProperty::test_derived_property",
"test/test_precis.py::TestPrecisContextRule::test_arabic_indic",
"test/test_precis.py::TestPrecisContextRule::test_context_rule",
"test/test_precis.py::TestPrecisContextRule::test_extended_arabic_indic",
"test/test_precis.py::TestPrecisContextRule::test_katatana_middle_dot",
"test/test_precis.py::TestPrecisContextRule::test_rule_greek_keraia",
"test/test_precis.py::TestPrecisContextRule::test_rule_hebrew_punctuation",
"test/test_precis.py::TestPrecisContextRule::test_rule_middle_dot",
"test/test_precis.py::TestPrecisContextRule::test_rule_zero_width_joiner",
"test/test_precis.py::TestPrecisContextRule::test_rule_zero_width_nonjoiner",
"test/test_precis.py::TestPrecisUnicodeData::test_arabic_indic",
"test/test_precis.py::TestPrecisUnicodeData::test_combining_virama",
"test/test_precis.py::TestPrecisUnicodeData::test_control",
"test/test_precis.py::TestPrecisUnicodeData::test_default_ignorable_code_point",
"test/test_precis.py::TestPrecisUnicodeData::test_extended_arabic_indic",
"test/test_precis.py::TestPrecisUnicodeData::test_greek_script",
"test/test_precis.py::TestPrecisUnicodeData::test_has_compat",
"test/test_precis.py::TestPrecisUnicodeData::test_hebrew_script",
"test/test_precis.py::TestPrecisUnicodeData::test_hiragana_katakana_han_script",
"test/test_precis.py::TestPrecisUnicodeData::test_noncharacter",
"test/test_precis.py::TestPrecisUnicodeData::test_old_hangul_jamo",
"test/test_precis.py::TestPrecisUnicodeData::test_replace_whitespace",
"test/test_precis.py::TestPrecisUnicodeData::test_valid_join_type",
"test/test_precis.py::TestPrecisUnicodeData::test_version_to_float",
"test/test_precis.py::TestPrecisUnicodeData::test_width_map"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2023-10-31 03:22:36+00:00 | mit | 1,458 |
|
caleb531__automata-123 | diff --git a/automata/base/utils.py b/automata/base/utils.py
index 07611a8..98c8706 100644
--- a/automata/base/utils.py
+++ b/automata/base/utils.py
@@ -81,7 +81,7 @@ def refine(self, S):
Not a generator because we need to perform the partition
even if the caller doesn't iterate through the results.
"""
- hit = defaultdict(lambda: set())
+ hit = defaultdict(set)
output = []
for x in S:
diff --git a/automata/fa/gnfa.py b/automata/fa/gnfa.py
index b3f3363..e74a027 100644
--- a/automata/fa/gnfa.py
+++ b/automata/fa/gnfa.py
@@ -228,7 +228,7 @@ def to_regex(self):
if r2 is None:
r2 = ''
elif len(r2) == 1:
- r2 = f'{r1}*'
+ r2 = f'{r2}*'
else:
r2 = f'({r2})*'
| caleb531/automata | 5987d5d276cf10662956c2266eb088ffd95b9760 | diff --git a/tests/test_gnfa.py b/tests/test_gnfa.py
index bdb5ef3..63083f8 100644
--- a/tests/test_gnfa.py
+++ b/tests/test_gnfa.py
@@ -345,16 +345,28 @@ def test_to_regex(self):
then generate NFA from regex (already tested method)
and check for equivalence of NFA and previous DFA
"""
-
- nfa = NFA.from_regex('a(aaa*bbcd|abbcd)d*|aa*bb(dcc*|(d|c)b|a?bb(dcc*|(d|c)))ab(c|d)*(ccd)?')
- gnfa = GNFA.from_nfa(nfa)
- regex = gnfa.to_regex()
- nfa = NFA.from_regex(regex)
- dfa2 = DFA.from_nfa(nfa)
-
- dfa = DFA.from_nfa(nfa)
-
- self.assertEqual(dfa, dfa2)
+ regex_strings = [
+ 'a*',
+ 'aa*b|bba*|(cc*)(bb+)',
+ 'a(aaa*bbcd|abbcd)d*|aa*bb(dcc*|(d|c)b|a?bb(dcc*|(d|c)))ab(c|d)*(ccd)?'
+ ]
+
+ for regex_str in regex_strings:
+ nfa_1 = NFA.from_regex(regex_str)
+ gnfa_1 = GNFA.from_nfa(nfa_1)
+ regex_1 = gnfa_1.to_regex()
+ nfa_2 = NFA.from_regex(regex_1)
+
+ # Test equality under NFA regex conversion
+ self.assertEqual(nfa_1, nfa_2)
+
+ dfa_1 = DFA.from_nfa(nfa_1)
+ gnfa_2 = GNFA.from_dfa(dfa_1)
+ regex_2 = gnfa_2.to_regex()
+ dfa_2 = DFA.from_nfa(NFA.from_regex(regex_2))
+
+ # Test equality through DFA regex conversion
+ self.assertEqual(dfa_1, dfa_2)
def test_read_input_step_not_implemented(self):
"""Should not implement read_input_stepwise() for GNFA."""
| problems with GNFA.to_regex
GNFA.to_regex seems to give wrong results sometimes. For example, the following code:
```
from automata.fa.dfa import DFA
from automata.fa.nfa import NFA
from automata.fa.gnfa import GNFA
nfa = NFA.from_regex('a*')
dfa = DFA.from_nfa(nfa)
gnfa = GNFA.from_dfa(dfa)
print(gnfa.to_regex())
```
prints a single-character string `*`, which is not a valid regex.
| 0.0 | 5987d5d276cf10662956c2266eb088ffd95b9760 | [
"tests/test_gnfa.py::TestGNFA::test_to_regex"
]
| [
"tests/test_gnfa.py::TestGNFA::test_concatenate_not_implemented",
"tests/test_gnfa.py::TestGNFA::test_copy_gnfa",
"tests/test_gnfa.py::TestGNFA::test_eq_not_implemented",
"tests/test_gnfa.py::TestGNFA::test_from_dfa",
"tests/test_gnfa.py::TestGNFA::test_from_dfa_single_state",
"tests/test_gnfa.py::TestGNFA::test_from_nfa",
"tests/test_gnfa.py::TestGNFA::test_from_nfa_single_state",
"tests/test_gnfa.py::TestGNFA::test_gnfa_immutable_attr_del",
"tests/test_gnfa.py::TestGNFA::test_gnfa_immutable_attr_set",
"tests/test_gnfa.py::TestGNFA::test_init_dfa",
"tests/test_gnfa.py::TestGNFA::test_init_gnfa",
"tests/test_gnfa.py::TestGNFA::test_init_nfa",
"tests/test_gnfa.py::TestGNFA::test_init_nfa_missing_formal_params",
"tests/test_gnfa.py::TestGNFA::test_init_validation",
"tests/test_gnfa.py::TestGNFA::test_kleene_star_not_implemented",
"tests/test_gnfa.py::TestGNFA::test_option_not_implemented",
"tests/test_gnfa.py::TestGNFA::test_read_input_step_not_implemented",
"tests/test_gnfa.py::TestGNFA::test_reverse_not_implemented",
"tests/test_gnfa.py::TestGNFA::test_show_diagram",
"tests/test_gnfa.py::TestGNFA::test_show_diagram_showNone",
"tests/test_gnfa.py::TestGNFA::test_union_not_implemented",
"tests/test_gnfa.py::TestGNFA::test_validate_final_state_transition",
"tests/test_gnfa.py::TestGNFA::test_validate_incomplete_transitions",
"tests/test_gnfa.py::TestGNFA::test_validate_initial_state_transitions",
"tests/test_gnfa.py::TestGNFA::test_validate_invalid_final_state",
"tests/test_gnfa.py::TestGNFA::test_validate_invalid_initial_state",
"tests/test_gnfa.py::TestGNFA::test_validate_invalid_state",
"tests/test_gnfa.py::TestGNFA::test_validate_invalid_symbol",
"tests/test_gnfa.py::TestGNFA::test_validate_missing_state"
]
| {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2023-01-08 20:28:05+00:00 | mit | 1,460 |
|
caleb531__automata-136 | diff --git a/automata/regex/parser.py b/automata/regex/parser.py
index 61f0e10..098e3cb 100644
--- a/automata/regex/parser.py
+++ b/automata/regex/parser.py
@@ -492,8 +492,9 @@ def val(self) -> NFARegexBuilder:
return NFARegexBuilder.wildcard(self.input_symbols, self.counter)
-def add_concat_tokens(
+def add_concat_and_empty_string_tokens(
token_list: List[Token[NFARegexBuilder]],
+ state_name_counter: count,
) -> List[Token[NFARegexBuilder]]:
"""Add concat tokens to list of parsed infix tokens."""
@@ -509,6 +510,9 @@ def add_concat_tokens(
(PostfixOperator, LeftParen),
]
+ # Pairs of tokens to insert empty string literals between
+ empty_string_pairs = [(LeftParen, RightParen)]
+
for curr_token, next_token in zip_longest(token_list, token_list[1:]):
final_token_list.append(curr_token)
@@ -519,13 +523,20 @@ def add_concat_tokens(
):
final_token_list.append(ConcatToken(""))
+ for firstClass, secondClass in empty_string_pairs:
+ if isinstance(curr_token, firstClass) and isinstance(
+ next_token, secondClass
+ ):
+ final_token_list.append(StringToken("", state_name_counter))
+
return final_token_list
-def get_regex_lexer(input_symbols: AbstractSet[str]) -> Lexer[NFARegexBuilder]:
+def get_regex_lexer(
+ input_symbols: AbstractSet[str], state_name_counter: count
+) -> Lexer[NFARegexBuilder]:
"""Get lexer for parsing regular expressions."""
lexer: Lexer[NFARegexBuilder] = Lexer()
- state_name_counter = count(0)
lexer.register_token(LeftParen.from_match, r"\(")
lexer.register_token(RightParen.from_match, r"\)")
@@ -553,10 +564,14 @@ def parse_regex(regexstr: str, input_symbols: AbstractSet[str]) -> NFARegexBuild
if len(regexstr) == 0:
return NFARegexBuilder.from_string_literal(regexstr, count(0))
- lexer = get_regex_lexer(input_symbols)
+ state_name_counter = count(0)
+
+ lexer = get_regex_lexer(input_symbols, state_name_counter)
lexed_tokens = lexer.lex(regexstr)
validate_tokens(lexed_tokens)
- tokens_with_concats = add_concat_tokens(lexed_tokens)
+ tokens_with_concats = add_concat_and_empty_string_tokens(
+ lexed_tokens, state_name_counter
+ )
postfix = tokens_to_postfix(tokens_with_concats)
return parse_postfix_tokens(postfix)
diff --git a/automata/regex/postfix.py b/automata/regex/postfix.py
index a9ab999..2c173ea 100644
--- a/automata/regex/postfix.py
+++ b/automata/regex/postfix.py
@@ -103,7 +103,7 @@ def validate_tokens(token_list: List[Token]) -> None:
# No left parens right before infix or postfix operators, or right
# before a right paren
elif isinstance(prev_token, LeftParen):
- if isinstance(curr_token, (InfixOperator, PostfixOperator, RightParen)):
+ if isinstance(curr_token, (InfixOperator, PostfixOperator)):
raise exceptions.InvalidRegexError(
f"'{prev_token}' cannot appear immediately before '{curr_token}'."
)
diff --git a/automata/regex/regex.py b/automata/regex/regex.py
index 1fa02b6..9c9414c 100644
--- a/automata/regex/regex.py
+++ b/automata/regex/regex.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
"""Methods for working with regular expressions"""
+from itertools import count
from typing import AbstractSet, Literal, Optional
import automata.base.exceptions as exceptions
@@ -23,7 +24,7 @@ def validate(regex: str) -> Literal[True]:
"""Raise an error if the regular expression is invalid"""
input_symbols = set(regex) - RESERVED_CHARACTERS
- validate_tokens(get_regex_lexer(input_symbols).lex(regex))
+ validate_tokens(get_regex_lexer(input_symbols, count(0)).lex(regex))
return True
diff --git a/docs/regular-expressions.md b/docs/regular-expressions.md
index 325fa84..d1dc220 100644
--- a/docs/regular-expressions.md
+++ b/docs/regular-expressions.md
@@ -16,8 +16,9 @@ A regular expression with the following operations only are supported in this li
- `&`: Intersection. Ex: `a&b`
- `.`: Wildcard. Ex: `a.b`
- `^`: Shuffle. Ex: `a^b`
-- `{}` : Quantifiers expressing finite repetitions. Ex: `a{1,2}`,`a{3,}`
-- `()`: Grouping.
+- `{}`: Quantifiers expressing finite repetitions. Ex: `a{1,2}`,`a{3,}`
+- `()`: The empty string.
+- `(...)`: Grouping.
This is similar to the Python `re` module, but this library does not support any special
characters other than those given above. All regular languages can be written with these.
| caleb531/automata | af08ca85d0476fed25425a48cc811453b4bd2e91 | diff --git a/tests/test_nfa.py b/tests/test_nfa.py
index c33d690..0aff08f 100644
--- a/tests/test_nfa.py
+++ b/tests/test_nfa.py
@@ -571,7 +571,6 @@ def test_validate_regex(self):
exceptions.InvalidRegexError, NFA.from_regex, "((abc*)))((abd)"
)
self.assertRaises(exceptions.InvalidRegexError, NFA.from_regex, "*")
- self.assertRaises(exceptions.InvalidRegexError, NFA.from_regex, "abcd()")
self.assertRaises(
exceptions.InvalidRegexError, NFA.from_regex, "ab(bc)*((bbcd)"
)
diff --git a/tests/test_regex.py b/tests/test_regex.py
index cda0960..e115d3a 100644
--- a/tests/test_regex.py
+++ b/tests/test_regex.py
@@ -26,7 +26,6 @@ def test_validate_invalid(self):
self.assertRaises(exceptions.InvalidRegexError, re.validate, "a||b")
self.assertRaises(exceptions.InvalidRegexError, re.validate, "((abc*)))((abd)")
self.assertRaises(exceptions.InvalidRegexError, re.validate, "*")
- self.assertRaises(exceptions.InvalidRegexError, re.validate, "abcd()")
self.assertRaises(exceptions.InvalidRegexError, re.validate, "ab(bc)*((bbcd)")
self.assertRaises(exceptions.InvalidRegexError, re.validate, "a(*)")
self.assertRaises(exceptions.InvalidRegexError, re.validate, "a(|)")
@@ -203,6 +202,13 @@ def test_quantifier(self):
)
)
+ def test_blank(self):
+ """Should correctly parse blank"""
+ self.assertTrue(re.isequal("()", ""))
+ self.assertTrue(re.isequal("a|()", "a?"))
+ self.assertTrue(re.isequal("a()", "a"))
+ self.assertTrue(re.isequal("a()b()()c()", "abc"))
+
def test_invalid_symbols(self):
"""Should throw exception if reserved character is in input symbols"""
with self.assertRaises(exceptions.InvalidSymbolError):
| How to express the empty string in regexes?
Is there a way to specify the empty string as part of regexes? For example, Python's built-in re.compile allows expressions like `'()'` or `'|'`, but the parser in this library throws an error on both. | 0.0 | af08ca85d0476fed25425a48cc811453b4bd2e91 | [
"tests/test_regex.py::TestRegex::test_blank"
]
| [
"tests/test_nfa.py::TestNFA::test_accepts_input_false",
"tests/test_nfa.py::TestNFA::test_accepts_input_true",
"tests/test_nfa.py::TestNFA::test_add_new_state_type_integrity",
"tests/test_nfa.py::TestNFA::test_concatenate",
"tests/test_nfa.py::TestNFA::test_copy_nfa",
"tests/test_nfa.py::TestNFA::test_cyclic_lambda_transitions",
"tests/test_nfa.py::TestNFA::test_eliminate_lambda",
"tests/test_nfa.py::TestNFA::test_eliminate_lambda_other",
"tests/test_nfa.py::TestNFA::test_eliminate_lambda_regex",
"tests/test_nfa.py::TestNFA::test_from_regex",
"tests/test_nfa.py::TestNFA::test_from_regex_empty_string",
"tests/test_nfa.py::TestNFA::test_init_dfa",
"tests/test_nfa.py::TestNFA::test_init_nfa",
"tests/test_nfa.py::TestNFA::test_init_nfa_missing_formal_params",
"tests/test_nfa.py::TestNFA::test_init_validation",
"tests/test_nfa.py::TestNFA::test_intersection",
"tests/test_nfa.py::TestNFA::test_kleene_star",
"tests/test_nfa.py::TestNFA::test_left_quotient",
"tests/test_nfa.py::TestNFA::test_nfa_LCS_distance",
"tests/test_nfa.py::TestNFA::test_nfa_equal",
"tests/test_nfa.py::TestNFA::test_nfa_equality",
"tests/test_nfa.py::TestNFA::test_nfa_hamming_distance",
"tests/test_nfa.py::TestNFA::test_nfa_immutable_attr_del",
"tests/test_nfa.py::TestNFA::test_nfa_immutable_attr_set",
"tests/test_nfa.py::TestNFA::test_nfa_immutable_dict",
"tests/test_nfa.py::TestNFA::test_nfa_levenshtein_distance",
"tests/test_nfa.py::TestNFA::test_nfa_not_equal",
"tests/test_nfa.py::TestNFA::test_nfa_shuffle_product",
"tests/test_nfa.py::TestNFA::test_nfa_shuffle_product_set_laws",
"tests/test_nfa.py::TestNFA::test_non_str_states",
"tests/test_nfa.py::TestNFA::test_operations_other_type",
"tests/test_nfa.py::TestNFA::test_option",
"tests/test_nfa.py::TestNFA::test_quotient_properties",
"tests/test_nfa.py::TestNFA::test_read_input_accepted",
"tests/test_nfa.py::TestNFA::test_read_input_rejection",
"tests/test_nfa.py::TestNFA::test_read_input_rejection_invalid_symbol",
"tests/test_nfa.py::TestNFA::test_read_input_step",
"tests/test_nfa.py::TestNFA::test_reverse",
"tests/test_nfa.py::TestNFA::test_right_quotient",
"tests/test_nfa.py::TestNFA::test_show_diagram_initial_final_same",
"tests/test_nfa.py::TestNFA::test_union",
"tests/test_nfa.py::TestNFA::test_validate_initial_state_transitions",
"tests/test_nfa.py::TestNFA::test_validate_invalid_final_state",
"tests/test_nfa.py::TestNFA::test_validate_invalid_final_state_non_str",
"tests/test_nfa.py::TestNFA::test_validate_invalid_initial_state",
"tests/test_nfa.py::TestNFA::test_validate_invalid_state",
"tests/test_nfa.py::TestNFA::test_validate_invalid_symbol",
"tests/test_nfa.py::TestNFA::test_validate_missing_state",
"tests/test_nfa.py::TestNFA::test_validate_regex",
"tests/test_regex.py::TestRegex::test_helper_validate_invalid",
"tests/test_regex.py::TestRegex::test_intersection",
"tests/test_regex.py::TestRegex::test_invalid_symbols",
"tests/test_regex.py::TestRegex::test_invalid_token_creation",
"tests/test_regex.py::TestRegex::test_isequal",
"tests/test_regex.py::TestRegex::test_issubset",
"tests/test_regex.py::TestRegex::test_issuperset",
"tests/test_regex.py::TestRegex::test_kleene_plus",
"tests/test_regex.py::TestRegex::test_not_isequal",
"tests/test_regex.py::TestRegex::test_quantifier",
"tests/test_regex.py::TestRegex::test_shuffle",
"tests/test_regex.py::TestRegex::test_validate_invalid",
"tests/test_regex.py::TestRegex::test_validate_valid",
"tests/test_regex.py::TestRegex::test_wildcard"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2023-04-05 01:14:52+00:00 | mit | 1,461 |
|
caleb531__automata-63 | diff --git a/README.md b/README.md
index 2941653..6431318 100644
--- a/README.md
+++ b/README.md
@@ -317,9 +317,10 @@ Returns `True` if the DFA accepts a finite language, False otherwise.
dfa.isfinite()
```
-#### DFA.from_nfa(cls, nfa)
+#### DFA.from_nfa(cls, nfa, retain_names=False)
-Creates a DFA that is equivalent to the given NFA.
+Creates a DFA that is equivalent to the given NFA. States are renamed by
+default unless `retain_names` is set to `True`.
```python
from automata.fa.dfa import DFA
diff --git a/automata/fa/dfa.py b/automata/fa/dfa.py
index 766bf8b..a6d5b3c 100644
--- a/automata/fa/dfa.py
+++ b/automata/fa/dfa.py
@@ -4,7 +4,7 @@
import copy
from collections import deque
from enum import IntEnum
-from itertools import product
+from itertools import product, count
import networkx as nx
from pydot import Dot, Edge, Node
@@ -540,44 +540,61 @@ def _stringify_states(states):
return '{{{}}}'.format(','.join(sorted(str(state) for state in states)))
@classmethod
- def from_nfa(cls, target_nfa):
+ def from_nfa(cls, target_nfa, retain_names=False):
"""Initialize this DFA as one equivalent to the given NFA."""
dfa_states = set()
dfa_symbols = target_nfa.input_symbols
- dfa_transitions = dict()
+ dfa_transitions = {}
+
+ # Data structures for state renaming
+ new_state_name_dict = dict()
+ state_name_counter = count(0)
+
+ def get_name_renamed(states):
+ nonlocal state_name_counter, new_state_name_dict
+ return new_state_name_dict.setdefault(frozenset(states), next(state_name_counter))
+
+ def get_name_original(states):
+ return frozenset(states)
+
+ get_name = get_name_original if retain_names else get_name_renamed
# equivalent DFA states states
nfa_initial_states = target_nfa._get_lambda_closure(target_nfa.initial_state)
- dfa_initial_state = cls._stringify_states(nfa_initial_states)
+ dfa_initial_state = get_name(nfa_initial_states)
dfa_final_states = set()
state_queue = deque()
state_queue.append(nfa_initial_states)
while state_queue:
current_states = state_queue.popleft()
- current_state_name = cls._stringify_states(current_states)
+ current_state_name = get_name(current_states)
if current_state_name in dfa_states:
# We've been here before and nothing should have changed.
continue
+
# Add NFA states to DFA as it is constructed from NFA.
dfa_states.add(current_state_name)
dfa_transitions[current_state_name] = {}
if (current_states & target_nfa.final_states):
dfa_final_states.add(current_state_name)
+
# Enqueue the next set of current states for the generated DFA.
for input_symbol in target_nfa.input_symbols:
next_current_states = target_nfa._get_next_current_states(
current_states, input_symbol)
- dfa_transitions[current_state_name][input_symbol] = cls._stringify_states(next_current_states)
+ dfa_transitions[current_state_name][input_symbol] = get_name(next_current_states)
state_queue.append(next_current_states)
+
return cls(
states=dfa_states, input_symbols=dfa_symbols,
transitions=dfa_transitions, initial_state=dfa_initial_state,
final_states=dfa_final_states)
+
def show_diagram(self, path=None):
"""
Creates the graph associated with this DFA
| caleb531/automata | 80987709928431572be930d9242968276c3845f7 | diff --git a/tests/test_dfa.py b/tests/test_dfa.py
index 52e3b9d..e51e2e8 100644
--- a/tests/test_dfa.py
+++ b/tests/test_dfa.py
@@ -1144,17 +1144,17 @@ def test_init_nfa_simple(self):
initial_state='q0',
final_states={'q2'}
)
- dfa = DFA.from_nfa(nfa)
- self.assertEqual(dfa.states, {'{}', '{q0}', '{q0,q1}', '{q2}'})
+ dfa = DFA.from_nfa(nfa, retain_names=True)
+ self.assertEqual(dfa.states, {frozenset(), frozenset(('q0',)), frozenset(('q0','q1')), frozenset(('q2',))})
self.assertEqual(dfa.input_symbols, {'0', '1'})
self.assertEqual(dfa.transitions, {
- '{}': {'0': '{}', '1': '{}'},
- '{q0}': {'0': '{q0,q1}', '1': '{}'},
- '{q0,q1}': {'0': '{q0,q1}', '1': '{q2}'},
- '{q2}': {'0': '{}', '1': '{}'}
+ frozenset(): {'0': frozenset(), '1': frozenset()},
+ frozenset(('q0',)): {'0': frozenset(('q0','q1')), '1': frozenset()},
+ frozenset(('q0','q1')): {'0': frozenset(('q0','q1')), '1': frozenset(('q2',))},
+ frozenset(('q2',)): {'0': frozenset(), '1': frozenset()}
})
- self.assertEqual(dfa.initial_state, '{q0}')
- self.assertEqual(dfa.final_states, {'{q2}'})
+ self.assertEqual(dfa.initial_state, frozenset(('q0',)))
+ self.assertEqual(dfa.final_states, {frozenset(('q2',))})
def test_init_nfa_more_complex(self):
"""Should convert to a DFA a more complex NFA."""
@@ -1169,32 +1169,32 @@ def test_init_nfa_more_complex(self):
initial_state='q0',
final_states={'q2'}
)
- dfa = DFA.from_nfa(nfa)
+ dfa = DFA.from_nfa(nfa, retain_names=True)
self.assertEqual(dfa.states, {
- '{q0}', '{q0,q1}', '{q0,q2}', '{q0,q1,q2}'
+ frozenset(('q0',)), frozenset(('q0','q1')), frozenset(('q0','q2')), frozenset(('q0','q1','q2'))
})
self.assertEqual(dfa.input_symbols, {'0', '1'})
self.assertEqual(dfa.transitions, {
- '{q0}': {'1': '{q0}', '0': '{q0,q1}'},
- '{q0,q1}': {'1': '{q0,q2}', '0': '{q0,q1}'},
- '{q0,q2}': {'1': '{q0,q1}', '0': '{q0,q1,q2}'},
- '{q0,q1,q2}': {'1': '{q0,q1,q2}', '0': '{q0,q1,q2}'}
+ frozenset(('q0',)): {'1': frozenset(('q0',)), '0': frozenset(('q0','q1'))},
+ frozenset(('q0','q1')): {'1': frozenset(('q0','q2')), '0': frozenset(('q0','q1'))},
+ frozenset(('q0','q2')): {'1': frozenset(('q0','q1')), '0': frozenset(('q0','q1','q2'))},
+ frozenset(('q0','q1','q2')): {'1': frozenset(('q0','q1','q2')), '0': frozenset(('q0','q1','q2'))}
})
- self.assertEqual(dfa.initial_state, '{q0}')
- self.assertEqual(dfa.final_states, {'{q0,q1,q2}', '{q0,q2}'})
+ self.assertEqual(dfa.initial_state, frozenset(('q0',)))
+ self.assertEqual(dfa.final_states, {frozenset(('q0','q1','q2')), frozenset(('q0','q2'))})
def test_init_nfa_lambda_transition(self):
"""Should convert to a DFA an NFA with a lambda transition."""
- dfa = DFA.from_nfa(self.nfa)
- self.assertEqual(dfa.states, {'{}', '{q0}', '{q1,q2}'})
+ dfa = DFA.from_nfa(self.nfa, retain_names=True)
+ self.assertEqual(dfa.states, {frozenset(), frozenset(('q0',)), frozenset(('q1', 'q2'))})
self.assertEqual(dfa.input_symbols, {'a', 'b'})
self.assertEqual(dfa.transitions, {
- '{}': {'a': '{}', 'b': '{}'},
- '{q0}': {'a': '{q1,q2}', 'b': '{}'},
- '{q1,q2}': {'a': '{q1,q2}', 'b': '{q0}'},
+ frozenset(): {'a': frozenset(), 'b': frozenset()},
+ frozenset(('q0',)): {'a': frozenset(('q1', 'q2')), 'b': frozenset()},
+ frozenset(('q1', 'q2')): {'a': frozenset(('q1', 'q2')), 'b': frozenset(('q0',))},
})
- self.assertEqual(dfa.initial_state, '{q0}')
- self.assertEqual(dfa.final_states, {'{q1,q2}'})
+ self.assertEqual(dfa.initial_state, frozenset(('q0',)))
+ self.assertEqual(dfa.final_states, {frozenset(('q1', 'q2'))})
def test_nfa_to_dfa_with_lambda_transitions(self):
""" Test NFA->DFA when initial state has lambda transitions """
@@ -1209,8 +1209,8 @@ def test_nfa_to_dfa_with_lambda_transitions(self):
initial_state='q0',
final_states={'q1'}
)
- dfa = DFA.from_nfa(nfa) # returns an equivalent DFA
- self.assertEqual(dfa.read_input('a'), '{q1}')
+ dfa = DFA.from_nfa(nfa, retain_names=True) # returns an equivalent DFA
+ self.assertEqual(dfa.read_input('a'), frozenset(('q1',)))
def test_partial_dfa(self):
"""Should allow for partial DFA when flag is set"""
| Conversion from DFA to NFA does not properly handle state names of mixed data types
I found the following bug:
nfa._add_new_state introduces integers into the set of states.
Current code is:
```
def _add_new_state(state_set, start=0):
"""Adds new state to the state set and returns it"""
new_state = start
while new_state in state_set:
new_state += 1
state_set.add(new_state)
return new_state
```
which could be fixed, for example, in this way:
```
def _add_new_state(state_set, start=0):
"""Adds new state to the state set and returns it"""
new_state = start
while str(new_state) in state_set:
new_state += 1
state_set.add(str(new_state))
return str(new_state)
```
EDIT: this bugfix actually runs into other problems later - this is just an idea
_____________________
This can lead to logic problems later. For example the following code fails:
```
from automata.fa.dfa import DFA
from automata.fa.nfa import NFA
A = NFA(
states={'0', '1'},
input_symbols={'0'},
transitions={'0': {'0': {'1'}}, '1': {'0': {'1'}}},
initial_state='0',
final_states={'1'}
)
print(A.reverse().states)
B = DFA.from_nfa(A.reverse())
assert(A.accepts_input('00')==B.accepts_input('00'))
``` | 0.0 | 80987709928431572be930d9242968276c3845f7 | [
"tests/test_dfa.py::TestDFA::test_init_nfa_lambda_transition",
"tests/test_dfa.py::TestDFA::test_init_nfa_more_complex",
"tests/test_dfa.py::TestDFA::test_init_nfa_simple",
"tests/test_dfa.py::TestDFA::test_nfa_to_dfa_with_lambda_transitions"
]
| [
"tests/test_dfa.py::TestDFA::test_accepts_input_false",
"tests/test_dfa.py::TestDFA::test_accepts_input_true",
"tests/test_dfa.py::TestDFA::test_complement",
"tests/test_dfa.py::TestDFA::test_copy_dfa",
"tests/test_dfa.py::TestDFA::test_dfa_equal",
"tests/test_dfa.py::TestDFA::test_dfa_not_equal",
"tests/test_dfa.py::TestDFA::test_difference",
"tests/test_dfa.py::TestDFA::test_equivalence_minify",
"tests/test_dfa.py::TestDFA::test_equivalence_not_equal",
"tests/test_dfa.py::TestDFA::test_equivalence_two_non_minimal",
"tests/test_dfa.py::TestDFA::test_init_dfa",
"tests/test_dfa.py::TestDFA::test_init_dfa_missing_formal_params",
"tests/test_dfa.py::TestDFA::test_init_validation",
"tests/test_dfa.py::TestDFA::test_intersection",
"tests/test_dfa.py::TestDFA::test_isdisjoint",
"tests/test_dfa.py::TestDFA::test_isempty_empty",
"tests/test_dfa.py::TestDFA::test_isempty_non_empty",
"tests/test_dfa.py::TestDFA::test_isfinite_empty",
"tests/test_dfa.py::TestDFA::test_isfinite_finite",
"tests/test_dfa.py::TestDFA::test_isfinite_infinite",
"tests/test_dfa.py::TestDFA::test_isfinite_infinite_case_2",
"tests/test_dfa.py::TestDFA::test_isfinite_universe",
"tests/test_dfa.py::TestDFA::test_issubset",
"tests/test_dfa.py::TestDFA::test_issuperset",
"tests/test_dfa.py::TestDFA::test_minify_dfa",
"tests/test_dfa.py::TestDFA::test_minify_dfa_complex",
"tests/test_dfa.py::TestDFA::test_minify_dfa_initial_state",
"tests/test_dfa.py::TestDFA::test_minify_dfa_no_final_states",
"tests/test_dfa.py::TestDFA::test_minify_minimal_dfa",
"tests/test_dfa.py::TestDFA::test_operations_other_types",
"tests/test_dfa.py::TestDFA::test_partial_dfa",
"tests/test_dfa.py::TestDFA::test_read_input_accepted",
"tests/test_dfa.py::TestDFA::test_read_input_rejection",
"tests/test_dfa.py::TestDFA::test_read_input_rejection_invalid_symbol",
"tests/test_dfa.py::TestDFA::test_read_input_step",
"tests/test_dfa.py::TestDFA::test_set_laws",
"tests/test_dfa.py::TestDFA::test_show_diagram_initial_final_different",
"tests/test_dfa.py::TestDFA::test_show_diagram_initial_final_same",
"tests/test_dfa.py::TestDFA::test_symbol_mismatch",
"tests/test_dfa.py::TestDFA::test_symmetric_difference",
"tests/test_dfa.py::TestDFA::test_union",
"tests/test_dfa.py::TestDFA::test_validate_invalid_final_state",
"tests/test_dfa.py::TestDFA::test_validate_invalid_final_state_non_str",
"tests/test_dfa.py::TestDFA::test_validate_invalid_initial_state",
"tests/test_dfa.py::TestDFA::test_validate_invalid_state",
"tests/test_dfa.py::TestDFA::test_validate_invalid_symbol",
"tests/test_dfa.py::TestDFA::test_validate_missing_state",
"tests/test_dfa.py::TestDFA::test_validate_missing_symbol"
]
| {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2022-10-09 03:51:01+00:00 | mit | 1,462 |
|
canonical__charmcraft-1228 | diff --git a/charmcraft/commands/store/__init__.py b/charmcraft/commands/store/__init__.py
index fb7c8e9..efd9445 100644
--- a/charmcraft/commands/store/__init__.py
+++ b/charmcraft/commands/store/__init__.py
@@ -1298,11 +1298,11 @@ class CreateLibCommand(BaseCommand):
"characters and underscore, starting with alpha."
)
- charm_name = get_name_from_metadata()
+ charm_name = self.config.name or get_name_from_metadata()
if charm_name is None:
raise CraftError(
- "Cannot find a valid charm name in metadata.yaml. Check you are in a charm "
- "directory with metadata.yaml."
+ "Cannot find a valid charm name in charm definition. "
+ "Check that you are using the correct project directory."
)
# '-' is valid in charm names, but not in a python import
@@ -1310,11 +1310,11 @@ class CreateLibCommand(BaseCommand):
importable_charm_name = create_importable_name(charm_name)
# all libraries born with API version 0
- full_name = "charms.{}.v0.{}".format(importable_charm_name, lib_name)
+ full_name = f"charms.{importable_charm_name}.v0.{lib_name}"
lib_data = get_lib_info(full_name=full_name)
lib_path = lib_data.path
if lib_path.exists():
- raise CraftError("This library already exists: {!r}.".format(str(lib_path)))
+ raise CraftError(f"This library already exists: {str(lib_path)!r}.")
emit.progress(f"Creating library {lib_name}.")
store = Store(self.config.charmhub)
@@ -1323,12 +1323,12 @@ class CreateLibCommand(BaseCommand):
# create the new library file from the template
env = get_templates_environment("charmlibs")
template = env.get_template("new_library.py.j2")
- context = dict(lib_id=lib_id)
+ context = {"lib_id": lib_id}
try:
lib_path.parent.mkdir(parents=True, exist_ok=True)
lib_path.write_text(template.render(context))
except OSError as exc:
- raise CraftError("Error writing the library in {!r}: {!r}.".format(str(lib_path), exc))
+ raise CraftError(f"Error writing the library in {str(lib_path)!r}: {exc!r}.")
if parsed_args.format:
info = {"library_id": lib_id}
| canonical/charmcraft | f4dd212d03502944642b73b7949135ac9982029e | diff --git a/tests/commands/test_store_commands.py b/tests/commands/test_store_commands.py
index 110d454..2715f6b 100644
--- a/tests/commands/test_store_commands.py
+++ b/tests/commands/test_store_commands.py
@@ -2757,31 +2757,36 @@ def test_status_unreleased_track(emitter, store_mock, config):
@pytest.mark.skipif(sys.platform == "win32", reason="Windows not [yet] supported")
@pytest.mark.parametrize("formatted", [None, JSON_FORMAT])
-def test_createlib_simple(emitter, store_mock, tmp_path, monkeypatch, config, formatted):
[email protected]("charmcraft_yaml_name", [None, "test-charm"])
+def test_createlib_simple(
+ emitter, store_mock, tmp_path, monkeypatch, config, formatted, charmcraft_yaml_name
+):
"""Happy path with result from the Store."""
monkeypatch.chdir(tmp_path)
+ config.name = charmcraft_yaml_name
+
lib_id = "test-example-lib-id"
store_mock.create_library_id.return_value = lib_id
args = Namespace(name="testlib", format=formatted)
with patch("charmcraft.commands.store.get_name_from_metadata") as mock:
- mock.return_value = "testcharm"
+ mock.return_value = "test-charm"
CreateLibCommand(config).run(args)
assert store_mock.mock_calls == [
- call.create_library_id("testcharm", "testlib"),
+ call.create_library_id("test-charm", "testlib"),
]
if formatted:
expected = {"library_id": lib_id}
emitter.assert_json_output(expected)
else:
expected = [
- "Library charms.testcharm.v0.testlib created with id test-example-lib-id.",
- "Consider 'git add lib/charms/testcharm/v0/testlib.py'.",
+ "Library charms.test_charm.v0.testlib created with id test-example-lib-id.",
+ "Consider 'git add lib/charms/test_charm/v0/testlib.py'.",
]
emitter.assert_messages(expected)
- created_lib_file = tmp_path / "lib" / "charms" / "testcharm" / "v0" / "testlib.py"
+ created_lib_file = tmp_path / "lib" / "charms" / "test_charm" / "v0" / "testlib.py"
env = get_templates_environment("charmlibs")
expected_newlib_content = env.get_template("new_library.py.j2").render(lib_id=lib_id)
@@ -2791,13 +2796,14 @@ def test_createlib_simple(emitter, store_mock, tmp_path, monkeypatch, config, fo
def test_createlib_name_from_metadata_problem(store_mock, config):
"""The metadata wasn't there to get the name."""
args = Namespace(name="testlib", format=None)
+ config.name = None
with patch("charmcraft.commands.store.get_name_from_metadata") as mock:
mock.return_value = None
with pytest.raises(CraftError) as cm:
CreateLibCommand(config).run(args)
assert str(cm.value) == (
- "Cannot find a valid charm name in metadata.yaml. Check you are in a charm "
- "directory with metadata.yaml."
+ "Cannot find a valid charm name in charm definition. "
+ "Check that you are using the correct project directory."
)
@@ -2856,22 +2862,20 @@ def test_createlib_path_already_there(tmp_path, monkeypatch, config):
"""The intended-to-be-created library is already there."""
monkeypatch.chdir(tmp_path)
- factory.create_lib_filepath("test-charm-name", "testlib", api=0)
+ factory.create_lib_filepath("test-charm", "testlib", api=0)
args = Namespace(name="testlib", format=None)
- with patch("charmcraft.commands.store.get_name_from_metadata") as mock:
- mock.return_value = "test-charm-name"
- with pytest.raises(CraftError) as err:
- CreateLibCommand(config).run(args)
+ with pytest.raises(CraftError) as err:
+ CreateLibCommand(config).run(args)
assert str(err.value) == (
- "This library already exists: 'lib/charms/test_charm_name/v0/testlib.py'."
+ "This library already exists: 'lib/charms/test_charm/v0/testlib.py'."
)
@pytest.mark.skipif(sys.platform == "win32", reason="Windows not [yet] supported")
def test_createlib_path_can_not_write(tmp_path, monkeypatch, store_mock, add_cleanup, config):
"""Disk error when trying to write the new lib (bad permissions, name too long, whatever)."""
- lib_dir = tmp_path / "lib" / "charms" / "test_charm_name" / "v0"
+ lib_dir = tmp_path / "lib" / "charms" / "test_charm" / "v0"
lib_dir.mkdir(parents=True)
lib_dir.chmod(0o111)
add_cleanup(lib_dir.chmod, 0o777)
@@ -2880,10 +2884,8 @@ def test_createlib_path_can_not_write(tmp_path, monkeypatch, store_mock, add_cle
args = Namespace(name="testlib", format=None)
store_mock.create_library_id.return_value = "lib_id"
expected_error = "Error writing the library in .*: PermissionError.*"
- with patch("charmcraft.commands.store.get_name_from_metadata") as mock:
- mock.return_value = "test-charm-name"
- with pytest.raises(CraftError, match=expected_error):
- CreateLibCommand(config).run(args)
+ with pytest.raises(CraftError, match=expected_error):
+ CreateLibCommand(config).run(args)
def test_createlib_library_template_is_python(emitter, store_mock, tmp_path, monkeypatch):
| `create-lib` command breaks without a metadata.yaml file
### Bug Description
Without a `metadata.yaml` file, `charmcraft create-lib` fails. However, it only needs this to get the charm name.
### To Reproduce
`charmcraft init` (or with any explicit profile)
`charmcraft create-lib mylib`
### Environment
N/A
### charmcraft.yaml
```shell
N/A
```
### Relevant log output
```shell
(charmcraft) lengau@hyperion:~/Work/Test/charms/tmp$ charmcraft init --profile=kubernetes
Charmed operator package file and directory tree initialised.
Now edit the following package files to provide fundamental charm metadata
and other information:
charmcraft.yaml
src/charm.py
README.md
(charmcraft) lengau@hyperion:~/Work/Test/charms/tmp$ charmcraft create-lib mylib
Cannot find a valid charm name in metadata.yaml. Check you are in a charm directory with metadata.yaml.
Full execution log: '/home/lengau/.local/state/charmcraft/log/charmcraft-20230818-165535.506311.log'
```
| 0.0 | f4dd212d03502944642b73b7949135ac9982029e | [
"tests/commands/test_store_commands.py::test_createlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_createlib_path_already_there"
]
| [
"tests/commands/test_store_commands.py::test_login_simple",
"tests/commands/test_store_commands.py::test_login_exporting",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[charm]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[bundle]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[permission]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[channel]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[ttl]",
"tests/commands/test_store_commands.py::test_login_restricting_ttl",
"tests/commands/test_store_commands.py::test_login_restricting_channels",
"tests/commands/test_store_commands.py::test_login_restricting_permissions",
"tests/commands/test_store_commands.py::test_login_restricting_permission_invalid",
"tests/commands/test_store_commands.py::test_login_restricting_charms",
"tests/commands/test_store_commands.py::test_login_restricting_bundles",
"tests/commands/test_store_commands.py::test_login_restriction_mix",
"tests/commands/test_store_commands.py::test_logout",
"tests/commands/test_store_commands.py::test_logout_but_not_logged_in",
"tests/commands/test_store_commands.py::test_whoami[None]",
"tests/commands/test_store_commands.py::test_whoami[json]",
"tests/commands/test_store_commands.py::test_whoami_but_not_logged_in[None]",
"tests/commands/test_store_commands.py::test_whoami_but_not_logged_in[json]",
"tests/commands/test_store_commands.py::test_whoami_with_channels[None]",
"tests/commands/test_store_commands.py::test_whoami_with_channels[json]",
"tests/commands/test_store_commands.py::test_whoami_with_charms[None]",
"tests/commands/test_store_commands.py::test_whoami_with_charms[json]",
"tests/commands/test_store_commands.py::test_whoami_with_bundles[None]",
"tests/commands/test_store_commands.py::test_whoami_with_bundles[json]",
"tests/commands/test_store_commands.py::test_whoami_comprehensive",
"tests/commands/test_store_commands.py::test_register_charm_name",
"tests/commands/test_store_commands.py::test_register_bundle_name",
"tests/commands/test_store_commands.py::test_unregister_name",
"tests/commands/test_store_commands.py::test_list_registered_empty[None]",
"tests/commands/test_store_commands.py::test_list_registered_empty[json]",
"tests/commands/test_store_commands.py::test_list_registered_one_private[None]",
"tests/commands/test_store_commands.py::test_list_registered_one_private[json]",
"tests/commands/test_store_commands.py::test_list_registered_one_public[None]",
"tests/commands/test_store_commands.py::test_list_registered_one_public[json]",
"tests/commands/test_store_commands.py::test_list_registered_several[None]",
"tests/commands/test_store_commands.py::test_list_registered_several[json]",
"tests/commands/test_store_commands.py::test_list_registered_with_collaborations[None]",
"tests/commands/test_store_commands.py::test_list_registered_with_collaborations[json]",
"tests/commands/test_store_commands.py::test_get_name_bad_zip",
"tests/commands/test_store_commands.py::test_get_name_charm_ok",
"tests/commands/test_store_commands.py::test_get_name_charm_bad_metadata[=]",
"tests/commands/test_store_commands.py::test_get_name_charm_bad_metadata[foo:",
"tests/commands/test_store_commands.py::test_get_name_bundle_ok",
"tests/commands/test_store_commands.py::test_get_name_bundle_bad_data[=]",
"tests/commands/test_store_commands.py::test_get_name_bundle_bad_data[foo:",
"tests/commands/test_store_commands.py::test_get_name_nor_charm_nor_bundle",
"tests/commands/test_store_commands.py::test_upload_parameters_filepath_type",
"tests/commands/test_store_commands.py::test_upload_call_ok[None]",
"tests/commands/test_store_commands.py::test_upload_call_ok[json]",
"tests/commands/test_store_commands.py::test_upload_call_error[None]",
"tests/commands/test_store_commands.py::test_upload_call_error[json]",
"tests/commands/test_store_commands.py::test_upload_call_login_expired[None]",
"tests/commands/test_store_commands.py::test_upload_call_login_expired[json]",
"tests/commands/test_store_commands.py::test_upload_call_ok_including_release[None]",
"tests/commands/test_store_commands.py::test_upload_call_ok_including_release[json]",
"tests/commands/test_store_commands.py::test_upload_call_ok_including_release_multiple",
"tests/commands/test_store_commands.py::test_upload_including_release_with_resources[None]",
"tests/commands/test_store_commands.py::test_upload_including_release_with_resources[json]",
"tests/commands/test_store_commands.py::test_upload_options_resource",
"tests/commands/test_store_commands.py::test_upload_call_error_including_release",
"tests/commands/test_store_commands.py::test_upload_with_different_name_than_in_metadata",
"tests/commands/test_store_commands.py::test_revisions_simple[None]",
"tests/commands/test_store_commands.py::test_revisions_simple[json]",
"tests/commands/test_store_commands.py::test_revisions_empty[None]",
"tests/commands/test_store_commands.py::test_revisions_empty[json]",
"tests/commands/test_store_commands.py::test_revisions_ordered_by_revision[None]",
"tests/commands/test_store_commands.py::test_revisions_ordered_by_revision[json]",
"tests/commands/test_store_commands.py::test_revisions_version_null[None]",
"tests/commands/test_store_commands.py::test_revisions_version_null[json]",
"tests/commands/test_store_commands.py::test_revisions_errors_simple[None]",
"tests/commands/test_store_commands.py::test_revisions_errors_simple[json]",
"tests/commands/test_store_commands.py::test_revisions_errors_multiple[None]",
"tests/commands/test_store_commands.py::test_revisions_errors_multiple[json]",
"tests/commands/test_store_commands.py::test_release_simple_ok",
"tests/commands/test_store_commands.py::test_release_simple_multiple_channels",
"tests/commands/test_store_commands.py::test_release_including_resources",
"tests/commands/test_store_commands.py::test_release_options_resource",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs0-expected_parsed0]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs1-expected_parsed1]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs2-expected_parsed2]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs3-expected_parsed3]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs4-expected_parsed4]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs5-expected_parsed5]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs6-expected_parsed6]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs0]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs1]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs2]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs3]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs4]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs5]",
"tests/commands/test_store_commands.py::test_close_simple_ok",
"tests/commands/test_store_commands.py::test_status_simple_ok[None]",
"tests/commands/test_store_commands.py::test_status_simple_ok[json]",
"tests/commands/test_store_commands.py::test_status_empty[None]",
"tests/commands/test_store_commands.py::test_status_empty[json]",
"tests/commands/test_store_commands.py::test_status_channels_not_released_with_fallback[None]",
"tests/commands/test_store_commands.py::test_status_channels_not_released_with_fallback[json]",
"tests/commands/test_store_commands.py::test_status_channels_not_released_without_fallback[None]",
"tests/commands/test_store_commands.py::test_status_channels_not_released_without_fallback[json]",
"tests/commands/test_store_commands.py::test_status_multiple_tracks[None]",
"tests/commands/test_store_commands.py::test_status_multiple_tracks[json]",
"tests/commands/test_store_commands.py::test_status_tracks_order",
"tests/commands/test_store_commands.py::test_status_with_one_branch[None]",
"tests/commands/test_store_commands.py::test_status_with_one_branch[json]",
"tests/commands/test_store_commands.py::test_status_with_multiple_branches",
"tests/commands/test_store_commands.py::test_status_with_resources[None]",
"tests/commands/test_store_commands.py::test_status_with_resources[json]",
"tests/commands/test_store_commands.py::test_status_with_resources_missing_after_closed_channel",
"tests/commands/test_store_commands.py::test_status_with_resources_and_branches",
"tests/commands/test_store_commands.py::test_status_multiplebases_single_track[None]",
"tests/commands/test_store_commands.py::test_status_multiplebases_single_track[json]",
"tests/commands/test_store_commands.py::test_status_multiplebases_multiple_tracks",
"tests/commands/test_store_commands.py::test_status_multiplebases_everything_combined",
"tests/commands/test_store_commands.py::test_status_multiplebases_multiplebranches",
"tests/commands/test_store_commands.py::test_status_with_base_in_none[None]",
"tests/commands/test_store_commands.py::test_status_with_base_in_none[json]",
"tests/commands/test_store_commands.py::test_status_unreleased_track",
"tests/commands/test_store_commands.py::test_createlib_simple[None-None]",
"tests/commands/test_store_commands.py::test_createlib_simple[None-json]",
"tests/commands/test_store_commands.py::test_createlib_simple[test-charm-None]",
"tests/commands/test_store_commands.py::test_createlib_simple[test-charm-json]",
"tests/commands/test_store_commands.py::test_createlib_name_contains_dash",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[foo.bar]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[foo/bar]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[Foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[123foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[_foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[]",
"tests/commands/test_store_commands.py::test_createlib_library_template_is_python",
"tests/commands/test_store_commands.py::test_publishlib_simple[None]",
"tests/commands/test_store_commands.py::test_publishlib_simple[json]",
"tests/commands/test_store_commands.py::test_publishlib_contains_dash",
"tests/commands/test_store_commands.py::test_publishlib_all[None]",
"tests/commands/test_store_commands.py::test_publishlib_all[json]",
"tests/commands/test_store_commands.py::test_publishlib_not_found",
"tests/commands/test_store_commands.py::test_publishlib_not_from_current_charm",
"tests/commands/test_store_commands.py::test_publishlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_publishlib_store_is_advanced[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_advanced[json]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_exactly_behind_ok",
"tests/commands/test_store_commands.py::test_publishlib_store_is_exactly_behind_same_hash[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_exactly_behind_same_hash[json]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_too_behind[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_too_behind[json]",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_same_hash[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_same_hash[json]",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_other_hash[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_other_hash[json]",
"tests/commands/test_store_commands.py::test_fetchlib_simple_downloaded[None]",
"tests/commands/test_store_commands.py::test_fetchlib_simple_downloaded[json]",
"tests/commands/test_store_commands.py::test_fetchlib_simple_dash_in_name",
"tests/commands/test_store_commands.py::test_fetchlib_simple_dash_in_name_on_disk",
"tests/commands/test_store_commands.py::test_fetchlib_simple_updated",
"tests/commands/test_store_commands.py::test_fetchlib_all[None]",
"tests/commands/test_store_commands.py::test_fetchlib_all[json]",
"tests/commands/test_store_commands.py::test_fetchlib_store_not_found[None]",
"tests/commands/test_store_commands.py::test_fetchlib_store_not_found[json]",
"tests/commands/test_store_commands.py::test_fetchlib_store_is_old[None]",
"tests/commands/test_store_commands.py::test_fetchlib_store_is_old[json]",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_same_hash[None]",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_same_hash[json]",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_different_hash[None]",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_different_hash[json]",
"tests/commands/test_store_commands.py::test_listlib_simple[None]",
"tests/commands/test_store_commands.py::test_listlib_simple[json]",
"tests/commands/test_store_commands.py::test_listlib_charm_from_metadata",
"tests/commands/test_store_commands.py::test_listlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_listlib_empty[None]",
"tests/commands/test_store_commands.py::test_listlib_empty[json]",
"tests/commands/test_store_commands.py::test_listlib_properly_sorted[None]",
"tests/commands/test_store_commands.py::test_listlib_properly_sorted[json]",
"tests/commands/test_store_commands.py::test_resources_simple[None]",
"tests/commands/test_store_commands.py::test_resources_simple[json]",
"tests/commands/test_store_commands.py::test_resources_empty[None]",
"tests/commands/test_store_commands.py::test_resources_empty[json]",
"tests/commands/test_store_commands.py::test_resources_ordered_and_grouped[None]",
"tests/commands/test_store_commands.py::test_resources_ordered_and_grouped[json]",
"tests/commands/test_store_commands.py::test_uploadresource_options_filepath_type",
"tests/commands/test_store_commands.py::test_uploadresource_options_image_type",
"tests/commands/test_store_commands.py::test_uploadresource_options_good_combinations[sysargs0]",
"tests/commands/test_store_commands.py::test_uploadresource_options_good_combinations[sysargs1]",
"tests/commands/test_store_commands.py::test_uploadresource_options_bad_combinations[sysargs0]",
"tests/commands/test_store_commands.py::test_uploadresource_options_bad_combinations[sysargs1]",
"tests/commands/test_store_commands.py::test_uploadresource_filepath_call_ok[None]",
"tests/commands/test_store_commands.py::test_uploadresource_filepath_call_ok[json]",
"tests/commands/test_store_commands.py::test_uploadresource_image_digest_already_uploaded[None]",
"tests/commands/test_store_commands.py::test_uploadresource_image_digest_already_uploaded[json]",
"tests/commands/test_store_commands.py::test_uploadresource_image_digest_upload_from_local",
"tests/commands/test_store_commands.py::test_uploadresource_image_id_upload_from_local",
"tests/commands/test_store_commands.py::test_uploadresource_image_digest_missing_everywhere",
"tests/commands/test_store_commands.py::test_uploadresource_image_id_missing",
"tests/commands/test_store_commands.py::test_uploadresource_call_error[None]",
"tests/commands/test_store_commands.py::test_uploadresource_call_error[json]",
"tests/commands/test_store_commands.py::test_resourcerevisions_simple[None]",
"tests/commands/test_store_commands.py::test_resourcerevisions_simple[json]",
"tests/commands/test_store_commands.py::test_resourcerevisions_empty[None]",
"tests/commands/test_store_commands.py::test_resourcerevisions_empty[json]",
"tests/commands/test_store_commands.py::test_resourcerevisions_ordered_by_revision[None]",
"tests/commands/test_store_commands.py::test_resourcerevisions_ordered_by_revision[json]"
]
| {
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2023-08-18 21:48:39+00:00 | apache-2.0 | 1,463 |
|
canonical__charmcraft-403 | diff --git a/charmcraft/commands/store/__init__.py b/charmcraft/commands/store/__init__.py
index ac668f5..e595b03 100644
--- a/charmcraft/commands/store/__init__.py
+++ b/charmcraft/commands/store/__init__.py
@@ -567,11 +567,11 @@ class StatusCommand(BaseCommand):
For example:
$ charmcraft status
- Track Channel Version Revision
- latest stable - -
- candidate - -
- beta - -
- edge 1 1
+ Track Base Channel Version Revision
+ latest ubuntu 20.04 (amd64) stable - -
+ candidate - -
+ beta - -
+ edge 1 1
Showing channels will take you through login if needed.
"""
@@ -598,20 +598,22 @@ class StatusCommand(BaseCommand):
logger.info("Nothing has been released yet.")
return
- # build easier to access structures
- releases_by_channel = {item.channel: item for item in channel_map}
+ # group released revision by track and base
+ releases_by_track = {}
+ for item in channel_map:
+ track = item.channel.split("/")[0]
+ by_base = releases_by_track.setdefault(track, {})
+ base_str = "{0.name} {0.channel} ({0.architecture})".format(item.base)
+ by_channel = by_base.setdefault(base_str, {})
+ by_channel[item.channel] = item
+
+ # groupe revision objects by revision number
revisions_by_revno = {item.revision: item for item in revisions}
# process and order the channels, while preserving the tracks order
- all_tracks = []
per_track = {}
branch_present = False
for channel in channels:
- # it's super rare to have a more than just a bunch of tracks (furthermore, normally
- # there's only one), so it's ok to do this sequential search
- if channel.track not in all_tracks:
- all_tracks.append(channel.track)
-
nonbranches_list, branches_list = per_track.setdefault(
channel.track, ([], [])
)
@@ -627,55 +629,65 @@ class StatusCommand(BaseCommand):
branches_list.append(channel)
branch_present = True
- headers = ["Track", "Channel", "Version", "Revision"]
+ headers = ["Track", "Base", "Channel", "Version", "Revision"]
resources_present = any(release.resources for release in channel_map)
if resources_present:
headers.append("Resources")
if branch_present:
headers.append("Expires at")
- # show everything, grouped by tracks, with regular channels at first and
+ # show everything, grouped by tracks and bases, with regular channels at first and
# branches (if any) after those
data = []
- for track in all_tracks:
- release_shown_for_this_track = False
+ for track, (channels, branches) in per_track.items():
+ releases_by_base = releases_by_track[track]
shown_track = track
- channels, branches = per_track[track]
-
- for channel in channels:
- description = channel.risk
- # get the release of the channel, fallbacking accordingly
- release = releases_by_channel.get(channel.name)
- if release is None:
- version = revno = resources = (
- "↑" if release_shown_for_this_track else "-"
- )
- else:
- release_shown_for_this_track = True
- revno = release.revision
- revision = revisions_by_revno[revno]
- version = revision.version
- resources = self._build_resources_repr(release.resources)
-
- datum = [shown_track, description, version, revno]
- if resources_present:
- datum.append(resources)
- data.append(datum)
-
- # stop showing the track name for the rest of the track
- shown_track = ""
-
- for branch in branches:
- description = "/".join((branch.risk, branch.branch))
- release = releases_by_channel[branch.name]
- expiration = release.expires_at.isoformat()
- revision = revisions_by_revno[release.revision]
- datum = ["", description, revision.version, release.revision]
- if resources_present:
- datum.append(self._build_resources_repr(release.resources))
- datum.append(expiration)
- data.append(datum)
+ # bases are shown alphabetically ordered
+ for base in sorted(releases_by_base):
+ releases_by_channel = releases_by_base[base]
+ shown_base = base
+
+ release_shown_for_this_track_base = False
+
+ for channel in channels:
+ description = channel.risk
+
+ # get the release of the channel, fallbacking accordingly
+ release = releases_by_channel.get(channel.name)
+ if release is None:
+ version = revno = resources = (
+ "↑" if release_shown_for_this_track_base else "-"
+ )
+ else:
+ release_shown_for_this_track_base = True
+ revno = release.revision
+ revision = revisions_by_revno[revno]
+ version = revision.version
+ resources = self._build_resources_repr(release.resources)
+
+ datum = [shown_track, shown_base, description, version, revno]
+ if resources_present:
+ datum.append(resources)
+ data.append(datum)
+
+ # stop showing the track and base for the rest of the struct
+ shown_track = ""
+ shown_base = ""
+
+ for branch in branches:
+ release = releases_by_channel.get(branch.name)
+ if release is None:
+ # not for this base!
+ continue
+ description = "/".join((branch.risk, branch.branch))
+ expiration = release.expires_at.isoformat()
+ revision = revisions_by_revno[release.revision]
+ datum = ["", "", description, revision.version, release.revision]
+ if resources_present:
+ datum.append(self._build_resources_repr(release.resources))
+ datum.append(expiration)
+ data.append(datum)
table = tabulate(data, headers=headers, tablefmt="plain", numalign="left")
for line in table.splitlines():
| canonical/charmcraft | c7fb8cfeade37a38a4c2536c9ec9aa87913de1aa | diff --git a/tests/commands/test_store_commands.py b/tests/commands/test_store_commands.py
index b670f2c..c3e53c8 100644
--- a/tests/commands/test_store_commands.py
+++ b/tests/commands/test_store_commands.py
@@ -958,11 +958,11 @@ def test_status_simple_ok(caplog, store_mock, config):
]
expected = [
- "Track Channel Version Revision",
- "latest stable v7 7",
- " candidate v7 7",
- " beta 2.0 80",
- " edge git-0db35ea1 156",
+ "Track Base Channel Version Revision",
+ "latest ubuntu 20.04 (amd64) stable v7 7",
+ " candidate v7 7",
+ " beta 2.0 80",
+ " edge git-0db35ea1 156",
]
assert expected == [rec.message for rec in caplog.records]
@@ -1002,11 +1002,11 @@ def test_status_channels_not_released_with_fallback(caplog, store_mock, config):
]
expected = [
- "Track Channel Version Revision",
- "latest stable v7 7",
- " candidate ↑ ↑",
- " beta ↑ ↑",
- " edge 2.0 80",
+ "Track Base Channel Version Revision",
+ "latest ubuntu 20.04 (amd64) stable v7 7",
+ " candidate ↑ ↑",
+ " beta ↑ ↑",
+ " edge 2.0 80",
]
assert expected == [rec.message for rec in caplog.records]
@@ -1034,11 +1034,11 @@ def test_status_channels_not_released_without_fallback(caplog, store_mock, confi
]
expected = [
- "Track Channel Version Revision",
- "latest stable - -",
- " candidate - -",
- " beta 5.1 5",
- " edge almostready 12",
+ "Track Base Channel Version Revision",
+ "latest ubuntu 20.04 (amd64) stable - -",
+ " candidate - -",
+ " beta 5.1 5",
+ " edge almostready 12",
]
assert expected == [rec.message for rec in caplog.records]
@@ -1068,15 +1068,15 @@ def test_status_multiple_tracks(caplog, store_mock, config):
]
expected = [
- "Track Channel Version Revision",
- "latest stable 7.5.3 503",
- " candidate ↑ ↑",
- " beta ↑ ↑",
- " edge ↑ ↑",
- "2.0 stable - -",
- " candidate - -",
- " beta - -",
- " edge 1 1",
+ "Track Base Channel Version Revision",
+ "latest ubuntu 20.04 (amd64) stable 7.5.3 503",
+ " candidate ↑ ↑",
+ " beta ↑ ↑",
+ " edge ↑ ↑",
+ "2.0 ubuntu 20.04 (amd64) stable - -",
+ " candidate - -",
+ " beta - -",
+ " edge 1 1",
]
assert expected == [rec.message for rec in caplog.records]
@@ -1112,23 +1112,23 @@ def test_status_tracks_order(caplog, store_mock, config):
]
expected = [
- "Track Channel Version Revision",
- "latest stable - -",
- " candidate - -",
- " beta - -",
- " edge v1 1",
- "zzz stable - -",
- " candidate - -",
- " beta - -",
- " edge v4 4",
- "2.0 stable - -",
- " candidate - -",
- " beta - -",
- " edge v3 3",
- "aaa stable - -",
- " candidate - -",
- " beta - -",
- " edge v2 2",
+ "Track Base Channel Version Revision",
+ "latest ubuntu 20.04 (amd64) stable - -",
+ " candidate - -",
+ " beta - -",
+ " edge v1 1",
+ "zzz ubuntu 20.04 (amd64) stable - -",
+ " candidate - -",
+ " beta - -",
+ " edge v4 4",
+ "2.0 ubuntu 20.04 (amd64) stable - -",
+ " candidate - -",
+ " beta - -",
+ " edge v3 3",
+ "aaa ubuntu 20.04 (amd64) stable - -",
+ " candidate - -",
+ " beta - -",
+ " edge v2 2",
]
assert expected == [rec.message for rec in caplog.records]
@@ -1170,12 +1170,12 @@ def test_status_with_one_branch(caplog, store_mock, config):
]
expected = [
- "Track Channel Version Revision Expires at",
- "latest stable - -",
- " candidate - -",
- " beta 5.1 5",
- " edge ↑ ↑",
- " beta/mybranch ver.12 12 2020-07-03T20:30:40+00:00",
+ "Track Base Channel Version Revision Expires at",
+ "latest ubuntu 20.04 (amd64) stable - -",
+ " candidate - -",
+ " beta 5.1 5",
+ " edge ↑ ↑",
+ " beta/mybranch ver.12 12 2020-07-03T20:30:40+00:00", # NOQA
]
assert expected == [rec.message for rec in caplog.records]
@@ -1224,13 +1224,13 @@ def test_status_with_multiple_branches(caplog, store_mock, config):
]
expected = [
- "Track Channel Version Revision Expires at",
- "latest stable - -",
- " candidate - -",
- " beta 5.1 5",
- " edge ↑ ↑",
- " beta/branch-1 ver.12 12 2020-07-03T20:30:40+00:00",
- " beta/branch-2 15.0.0 15 2020-07-03T20:30:40+00:00",
+ "Track Base Channel Version Revision Expires at",
+ "latest ubuntu 20.04 (amd64) stable - -",
+ " candidate - -",
+ " beta 5.1 5",
+ " edge ↑ ↑",
+ " beta/branch-1 ver.12 12 2020-07-03T20:30:40+00:00", # NOQA
+ " beta/branch-2 15.0.0 15 2020-07-03T20:30:40+00:00", # NOQA
]
assert expected == [rec.message for rec in caplog.records]
@@ -1255,11 +1255,11 @@ def test_status_with_resources(caplog, store_mock, config):
StatusCommand("group", config).run(args)
expected = [
- "Track Channel Version Revision Resources",
- "latest stable - - -",
- " candidate 5.1 5 resource1 (r1), resource2 (r54)",
- " beta 5.1 5 resource1 (r1)",
- " edge ↑ ↑ ↑",
+ "Track Base Channel Version Revision Resources",
+ "latest ubuntu 20.04 (amd64) stable - - -",
+ " candidate 5.1 5 resource1 (r1), resource2 (r54)", # NOQA
+ " beta 5.1 5 resource1 (r1)",
+ " edge ↑ ↑ ↑",
]
assert expected == [rec.message for rec in caplog.records]
@@ -1286,11 +1286,11 @@ def test_status_with_resources_missing_after_closed_channel(caplog, store_mock,
StatusCommand("group", config).run(args)
expected = [
- "Track Channel Version Revision Resources",
- "latest stable 5.1 5 resource (r1)",
- " candidate ↑ ↑ ↑",
- " beta 5.1 5 -",
- " edge 5.1 5 resource (r1)",
+ "Track Base Channel Version Revision Resources",
+ "latest ubuntu 20.04 (amd64) stable 5.1 5 resource (r1)",
+ " candidate ↑ ↑ ↑",
+ " beta 5.1 5 -",
+ " edge 5.1 5 resource (r1)",
]
assert expected == [rec.message for rec in caplog.records]
@@ -1331,12 +1331,205 @@ def test_status_with_resources_and_branches(caplog, store_mock, config):
StatusCommand("group", config).run(args)
expected = [
- "Track Channel Version Revision Resources Expires at",
- "latest stable - - -",
- " candidate - - -",
- " beta 7.0.0 23 testres (r14)",
- " edge ↑ ↑ ↑",
- " edge/mybranch 5.1 5 testres (r1) 2020-07-03T20:30:40+00:00",
+ "Track Base Channel Version Revision Resources Expires at", # NOQA
+ "latest ubuntu 20.04 (amd64) stable - - -",
+ " candidate - - -",
+ " beta 7.0.0 23 testres (r14)",
+ " edge ↑ ↑ ↑",
+ " edge/mybranch 5.1 5 testres (r1) 2020-07-03T20:30:40+00:00", # NOQA
+ ]
+ assert expected == [rec.message for rec in caplog.records]
+
+
+def test_status_multiplebases_single_track(caplog, store_mock, config):
+ """Multiple bases with one track."""
+ caplog.set_level(logging.INFO, logger="charmcraft.commands")
+
+ other_base = Base(architecture="16b", channel="1", name="xz")
+ channel_map = [
+ _build_release(revision=7, channel="latest/stable", base=other_base),
+ _build_release(revision=7, channel="latest/candidate"),
+ _build_release(revision=80, channel="latest/beta", base=other_base),
+ _build_release(revision=156, channel="latest/edge"),
+ ]
+ channels = _build_channels()
+ revisions = [
+ _build_revision(revno=7, version="v7"),
+ _build_revision(revno=80, version="2.0"),
+ _build_revision(revno=156, version="git-0db35ea1"),
+ ]
+ store_mock.list_releases.return_value = (channel_map, channels, revisions)
+
+ args = Namespace(name="testcharm")
+ StatusCommand("group", config).run(args)
+
+ assert store_mock.mock_calls == [
+ call.list_releases("testcharm"),
+ ]
+
+ expected = [
+ "Track Base Channel Version Revision",
+ "latest ubuntu 20.04 (amd64) stable - -",
+ " candidate v7 7",
+ " beta ↑ ↑",
+ " edge git-0db35ea1 156",
+ " xz 1 (16b) stable v7 7",
+ " candidate ↑ ↑",
+ " beta 2.0 80",
+ " edge ↑ ↑",
+ ]
+ assert expected == [rec.message for rec in caplog.records]
+
+
+def test_status_multiplebases_multiple_tracks(caplog, store_mock, config):
+ """Multiple bases with several tracks."""
+ caplog.set_level(logging.INFO, logger="charmcraft.commands")
+
+ other_base = Base(architecture="16b", channel="1", name="xz")
+ channel_map = [
+ _build_release(revision=7, channel="latest/stable", base=other_base),
+ _build_release(revision=7, channel="latest/candidate"),
+ _build_release(revision=80, channel="latest/beta", base=other_base),
+ _build_release(revision=156, channel="latest/edge"),
+ _build_release(revision=7, channel="2.0/stable", base=other_base),
+ _build_release(revision=7, channel="2.0/candidate"),
+ _build_release(revision=80, channel="2.0/beta", base=other_base),
+ _build_release(revision=156, channel="2.0/edge"),
+ _build_release(revision=156, channel="3.0/edge"),
+ ]
+ channels = (
+ _build_channels() + _build_channels(track="2.0") + _build_channels(track="3.0")
+ )
+ revisions = [
+ _build_revision(revno=7, version="v7"),
+ _build_revision(revno=80, version="2.0"),
+ _build_revision(revno=156, version="git-0db35ea1"),
+ ]
+ store_mock.list_releases.return_value = (channel_map, channels, revisions)
+
+ args = Namespace(name="testcharm")
+ StatusCommand("group", config).run(args)
+
+ assert store_mock.mock_calls == [
+ call.list_releases("testcharm"),
+ ]
+
+ expected = [
+ "Track Base Channel Version Revision",
+ "latest ubuntu 20.04 (amd64) stable - -",
+ " candidate v7 7",
+ " beta ↑ ↑",
+ " edge git-0db35ea1 156",
+ " xz 1 (16b) stable v7 7",
+ " candidate ↑ ↑",
+ " beta 2.0 80",
+ " edge ↑ ↑",
+ "2.0 ubuntu 20.04 (amd64) stable - -",
+ " candidate v7 7",
+ " beta ↑ ↑",
+ " edge git-0db35ea1 156",
+ " xz 1 (16b) stable v7 7",
+ " candidate ↑ ↑",
+ " beta 2.0 80",
+ " edge ↑ ↑",
+ "3.0 ubuntu 20.04 (amd64) stable - -",
+ " candidate - -",
+ " beta - -",
+ " edge git-0db35ea1 156",
+ ]
+ assert expected == [rec.message for rec in caplog.records]
+
+
+def test_status_multiplebases_everything_combined(caplog, store_mock, config):
+ """Multiple bases with several other modifiers, just a sanity check."""
+ caplog.set_level(logging.INFO, logger="charmcraft.commands")
+
+ other_base = Base(architecture="16b", channel="1", name="xz")
+ tstamp = dateutil.parser.parse("2020-07-03T20:30:40Z")
+ resource = Resource(name="testres", optional=True, revision=1, resource_type="file")
+ channel_map = [
+ _build_release(revision=7, channel="latest/candidate"),
+ _build_release(revision=156, channel="latest/edge"),
+ _build_release(revision=7, channel="latest/candidate/br1", expires_at=tstamp),
+ _build_release(revision=7, channel="latest/stable", base=other_base),
+ _build_release(revision=80, channel="latest/beta", base=other_base),
+ _build_release(
+ revision=99,
+ channel="latest/beta/br2",
+ base=other_base,
+ expires_at=tstamp,
+ resources=[resource],
+ ),
+ _build_release(revision=7, channel="2.0/candidate"),
+ _build_release(revision=80, channel="2.0/beta"),
+ _build_release(revision=7, channel="2.0/stable", base=other_base),
+ _build_release(revision=80, channel="2.0/edge", base=other_base),
+ _build_release(
+ revision=80, channel="2.0/edge/foobar", base=other_base, expires_at=tstamp
+ ),
+ ]
+ channels = _build_channels() + _build_channels(track="2.0")
+ channels.extend(
+ [
+ Channel(
+ name="latest/candidate/br1",
+ fallback="latest/candidate",
+ track="latest",
+ risk="candidate",
+ branch="br1",
+ ),
+ Channel(
+ name="latest/beta/br2",
+ fallback="latest/beta",
+ track="latest",
+ risk="beta",
+ branch="br2",
+ ),
+ Channel(
+ name="2.0/edge/foobar",
+ fallback="2.0/edge",
+ track="2.0",
+ risk="edge",
+ branch="foobar",
+ ),
+ ]
+ )
+ revisions = [
+ _build_revision(revno=7, version="v7"),
+ _build_revision(revno=80, version="2.0"),
+ _build_revision(revno=156, version="git-0db35ea1"),
+ _build_revision(revno=99, version="weird"),
+ ]
+ store_mock.list_releases.return_value = (channel_map, channels, revisions)
+
+ args = Namespace(name="testcharm")
+ StatusCommand("group", config).run(args)
+
+ assert store_mock.mock_calls == [
+ call.list_releases("testcharm"),
+ ]
+
+ expected = [
+ "Track Base Channel Version Revision Resources Expires at", # NOQA
+ "latest ubuntu 20.04 (amd64) stable - - -",
+ " candidate v7 7 -",
+ " beta ↑ ↑ ↑",
+ " edge git-0db35ea1 156 -",
+ " candidate/br1 v7 7 - 2020-07-03T20:30:40+00:00", # NOQA
+ " xz 1 (16b) stable v7 7 -",
+ " candidate ↑ ↑ ↑",
+ " beta 2.0 80 -",
+ " edge ↑ ↑ ↑",
+ " beta/br2 weird 99 testres (r1) 2020-07-03T20:30:40+00:00", # NOQA
+ "2.0 ubuntu 20.04 (amd64) stable - - -",
+ " candidate v7 7 -",
+ " beta 2.0 80 -",
+ " edge ↑ ↑ ↑",
+ " xz 1 (16b) stable v7 7 -",
+ " candidate ↑ ↑ ↑",
+ " beta ↑ ↑ ↑",
+ " edge 2.0 80 -",
+ " edge/foobar 2.0 80 - 2020-07-03T20:30:40+00:00", # NOQA
]
assert expected == [rec.message for rec in caplog.records]
| Handle multiple platforms for channel map in status
If a charm support multiple series (platforms), there could be different released revisions for each of them (eg. revision 1 released to (`stable`, `xenial`) and revision 2 to (`stable`, `focal`)). FWIW, this is similar for snaps, snapcraft and multiple architectures
`charmcraft` is currently rendering one released value for each channel, ignoring the platform. | 0.0 | c7fb8cfeade37a38a4c2536c9ec9aa87913de1aa | [
"tests/commands/test_store_commands.py::test_status_simple_ok",
"tests/commands/test_store_commands.py::test_status_channels_not_released_with_fallback",
"tests/commands/test_store_commands.py::test_status_channels_not_released_without_fallback",
"tests/commands/test_store_commands.py::test_status_multiple_tracks",
"tests/commands/test_store_commands.py::test_status_tracks_order",
"tests/commands/test_store_commands.py::test_status_with_one_branch",
"tests/commands/test_store_commands.py::test_status_with_multiple_branches",
"tests/commands/test_store_commands.py::test_status_with_resources",
"tests/commands/test_store_commands.py::test_status_with_resources_missing_after_closed_channel",
"tests/commands/test_store_commands.py::test_status_with_resources_and_branches",
"tests/commands/test_store_commands.py::test_status_multiplebases_single_track",
"tests/commands/test_store_commands.py::test_status_multiplebases_multiple_tracks",
"tests/commands/test_store_commands.py::test_status_multiplebases_everything_combined"
]
| [
"tests/commands/test_store_commands.py::test_get_name_from_metadata_ok",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_no_file",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_bad_content_garbage",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_bad_content_no_name",
"tests/commands/test_store_commands.py::test_login",
"tests/commands/test_store_commands.py::test_logout",
"tests/commands/test_store_commands.py::test_whoami",
"tests/commands/test_store_commands.py::test_register_charm_name",
"tests/commands/test_store_commands.py::test_register_bundle_name",
"tests/commands/test_store_commands.py::test_list_registered_empty",
"tests/commands/test_store_commands.py::test_list_registered_one_private",
"tests/commands/test_store_commands.py::test_list_registered_one_public",
"tests/commands/test_store_commands.py::test_list_registered_several",
"tests/commands/test_store_commands.py::test_get_name_bad_zip",
"tests/commands/test_store_commands.py::test_get_name_charm_ok",
"tests/commands/test_store_commands.py::test_get_name_charm_bad_metadata[=]",
"tests/commands/test_store_commands.py::test_get_name_charm_bad_metadata[foo:",
"tests/commands/test_store_commands.py::test_get_name_bundle_ok",
"tests/commands/test_store_commands.py::test_get_name_bundle_bad_data[=]",
"tests/commands/test_store_commands.py::test_get_name_bundle_bad_data[foo:",
"tests/commands/test_store_commands.py::test_get_name_nor_charm_nor_bundle",
"tests/commands/test_store_commands.py::test_upload_parameters_filepath_type",
"tests/commands/test_store_commands.py::test_upload_call_ok",
"tests/commands/test_store_commands.py::test_upload_call_error",
"tests/commands/test_store_commands.py::test_upload_call_ok_including_release",
"tests/commands/test_store_commands.py::test_upload_call_ok_including_release_multiple",
"tests/commands/test_store_commands.py::test_upload_call_error_including_release",
"tests/commands/test_store_commands.py::test_upload_charm_with_init_template_todo_token",
"tests/commands/test_store_commands.py::test_revisions_simple",
"tests/commands/test_store_commands.py::test_revisions_empty",
"tests/commands/test_store_commands.py::test_revisions_ordered_by_revision",
"tests/commands/test_store_commands.py::test_revisions_version_null",
"tests/commands/test_store_commands.py::test_revisions_errors_simple",
"tests/commands/test_store_commands.py::test_revisions_errors_multiple",
"tests/commands/test_store_commands.py::test_release_simple_ok",
"tests/commands/test_store_commands.py::test_release_simple_multiple_channels",
"tests/commands/test_store_commands.py::test_release_including_resources",
"tests/commands/test_store_commands.py::test_release_options_resource",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs0-expected_parsed0]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs1-expected_parsed1]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs2-expected_parsed2]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs3-expected_parsed3]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs4-expected_parsed4]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs5-expected_parsed5]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs6-expected_parsed6]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs0]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs1]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs2]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs3]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs4]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs5]",
"tests/commands/test_store_commands.py::test_status_empty",
"tests/commands/test_store_commands.py::test_createlib_simple",
"tests/commands/test_store_commands.py::test_createlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_createlib_name_contains_dash",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[foo.bar]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[foo/bar]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[Foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[123foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[_foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[]",
"tests/commands/test_store_commands.py::test_createlib_path_already_there",
"tests/commands/test_store_commands.py::test_createlib_library_template_is_python",
"tests/commands/test_store_commands.py::test_publishlib_simple",
"tests/commands/test_store_commands.py::test_publishlib_contains_dash",
"tests/commands/test_store_commands.py::test_publishlib_all",
"tests/commands/test_store_commands.py::test_publishlib_not_found",
"tests/commands/test_store_commands.py::test_publishlib_not_from_current_charm",
"tests/commands/test_store_commands.py::test_publishlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_publishlib_store_is_advanced",
"tests/commands/test_store_commands.py::test_publishlib_store_is_exactly_behind_ok",
"tests/commands/test_store_commands.py::test_publishlib_store_is_exactly_behind_same_hash",
"tests/commands/test_store_commands.py::test_publishlib_store_is_too_behind",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_same_hash",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_other_hash",
"tests/commands/test_store_commands.py::test_getlibinfo_success_simple",
"tests/commands/test_store_commands.py::test_getlibinfo_success_content",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[charms.testcharm.v3.testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[charms.testcharm.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[testcharm.v2.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[mycharms.testcharm.v2.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/v3/testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/v3/testlib.html]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/v3/testlib.]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[testcharm/v2/testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[mycharms/testcharm/v2/testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.v-three.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.v-3.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.3.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.vX.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_missing_library_from_name",
"tests/commands/test_store_commands.py::test_getlibinfo_missing_library_from_path",
"tests/commands/test_store_commands.py::test_getlibinfo_malformed_metadata_field",
"tests/commands/test_store_commands.py::test_getlibinfo_missing_metadata_field",
"tests/commands/test_store_commands.py::test_getlibinfo_api_not_int",
"tests/commands/test_store_commands.py::test_getlibinfo_api_negative",
"tests/commands/test_store_commands.py::test_getlibinfo_patch_not_int",
"tests/commands/test_store_commands.py::test_getlibinfo_patch_negative",
"tests/commands/test_store_commands.py::test_getlibinfo_api_patch_both_zero",
"tests/commands/test_store_commands.py::test_getlibinfo_metadata_api_different_path_api",
"tests/commands/test_store_commands.py::test_getlibinfo_libid_non_string",
"tests/commands/test_store_commands.py::test_getlibinfo_libid_non_ascii",
"tests/commands/test_store_commands.py::test_getlibinfo_libid_empty",
"tests/commands/test_store_commands.py::test_fetchlib_simple_downloaded",
"tests/commands/test_store_commands.py::test_fetchlib_simple_dash_in_name",
"tests/commands/test_store_commands.py::test_fetchlib_simple_dash_in_name_on_disk",
"tests/commands/test_store_commands.py::test_fetchlib_simple_updated",
"tests/commands/test_store_commands.py::test_fetchlib_all",
"tests/commands/test_store_commands.py::test_fetchlib_store_not_found",
"tests/commands/test_store_commands.py::test_fetchlib_store_is_old",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_same_hash",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_different_hash",
"tests/commands/test_store_commands.py::test_listlib_simple",
"tests/commands/test_store_commands.py::test_listlib_charm_from_metadata",
"tests/commands/test_store_commands.py::test_listlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_listlib_empty",
"tests/commands/test_store_commands.py::test_listlib_properly_sorted",
"tests/commands/test_store_commands.py::test_resources_simple",
"tests/commands/test_store_commands.py::test_resources_empty",
"tests/commands/test_store_commands.py::test_resources_ordered_and_grouped",
"tests/commands/test_store_commands.py::test_uploadresource_options_filepath_type",
"tests/commands/test_store_commands.py::test_uploadresource_options_image_type",
"tests/commands/test_store_commands.py::test_uploadresource_options_good_combinations[sysargs0]",
"tests/commands/test_store_commands.py::test_uploadresource_options_good_combinations[sysargs1]",
"tests/commands/test_store_commands.py::test_uploadresource_options_bad_combinations[sysargs0]",
"tests/commands/test_store_commands.py::test_uploadresource_options_bad_combinations[sysargs1]",
"tests/commands/test_store_commands.py::test_uploadresource_filepath_call_ok",
"tests/commands/test_store_commands.py::test_uploadresource_image_call_already_uploaded",
"tests/commands/test_store_commands.py::test_uploadresource_image_call_upload_from_local",
"tests/commands/test_store_commands.py::test_uploadresource_image_call_missing_everywhere",
"tests/commands/test_store_commands.py::test_uploadresource_call_error",
"tests/commands/test_store_commands.py::test_resourcerevisions_simple",
"tests/commands/test_store_commands.py::test_resourcerevisions_empty",
"tests/commands/test_store_commands.py::test_resourcerevisions_ordered_by_revision"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2021-06-23 13:12:33+00:00 | apache-2.0 | 1,464 |
|
canonical__charmcraft-530 | diff --git a/charmcraft/commands/store/__init__.py b/charmcraft/commands/store/__init__.py
index 010e273..eeec82b 100644
--- a/charmcraft/commands/store/__init__.py
+++ b/charmcraft/commands/store/__init__.py
@@ -507,7 +507,7 @@ class ReleaseCommand(BaseCommand):
charmcraft release mycharm --revision=14 \\
--channel=beta --resource=thedb:4
- Listing revisions will take you through login if needed.
+ Releasing a revision will take you through login if needed.
"""
)
common = True
@@ -560,6 +560,43 @@ class ReleaseCommand(BaseCommand):
logger.info(msg, *args)
+class CloseCommand(BaseCommand):
+ """Close a channel for a charm or bundle."""
+
+ name = "close"
+ help_msg = "Close a channel for a charm or bundle"
+ overview = textwrap.dedent(
+ """
+ Close the specified channel for a charm or bundle.
+
+ The channel is made up of `track/risk/branch` with both the track and
+ the branch as optional items, so formally:
+
+ [track/]risk[/branch]
+
+ Channel risk must be one of stable, candidate, beta or edge. The
+ track defaults to `latest` and branch has no default.
+
+ Closing a channel will take you through login if needed.
+ """
+ )
+ common = True
+
+ def fill_parser(self, parser):
+ """Add own parameters to the general parser."""
+ parser.add_argument("name", help="The name of charm or bundle")
+ parser.add_argument("channel", help="The channel to close")
+
+ def run(self, parsed_args):
+ """Run the command."""
+ store = Store(self.config.charmhub)
+ revision = None # revision None will actually close the channel
+ channels = [parsed_args.channel] # the API accepts multiple channels, we have only one
+ resources = [] # not really used when closing channels
+ store.release(parsed_args.name, revision, channels, resources)
+ logger.info("Closed %r channel for %r.", parsed_args.channel, parsed_args.name)
+
+
class StatusCommand(BaseCommand):
"""Show channel status for a charm or bundle."""
diff --git a/charmcraft/main.py b/charmcraft/main.py
index 3fba456..ccdfffe 100644
--- a/charmcraft/main.py
+++ b/charmcraft/main.py
@@ -122,6 +122,7 @@ COMMAND_GROUPS = [
# release process, and show status
store.ReleaseCommand,
store.StatusCommand,
+ store.CloseCommand,
# libraries support
store.CreateLibCommand,
store.PublishLibCommand,
diff --git a/completion.bash b/completion.bash
index 1c1aa31..e524618 100644
--- a/completion.bash
+++ b/completion.bash
@@ -23,6 +23,7 @@ _charmcraft()
analyze
build
clean
+ close
create-lib
fetch-lib
help init
| canonical/charmcraft | 81f7b3f2358060a9502b9c7e3301c71c531879ea | diff --git a/tests/commands/test_store_commands.py b/tests/commands/test_store_commands.py
index ee9c0b8..4d83108 100644
--- a/tests/commands/test_store_commands.py
+++ b/tests/commands/test_store_commands.py
@@ -32,6 +32,7 @@ import yaml
from charmcraft.config import CharmhubConfig
from charmcraft.cmdbase import CommandError
from charmcraft.commands.store import (
+ CloseCommand,
CreateLibCommand,
EntityType,
FetchLibCommand,
@@ -919,6 +920,24 @@ def test_release_parameters_bad(config, sysargs):
parser.parse_args(sysargs)
+# -- tests for the close command
+
+
+def test_close_simple_ok(caplog, store_mock, config):
+ """Simple case of closing a channel."""
+ caplog.set_level(logging.INFO, logger="charmcraft.commands")
+
+ args = Namespace(name="testcharm", channel="somechannel")
+ CloseCommand("group", config).run(args)
+
+ assert store_mock.mock_calls == [
+ call.release("testcharm", None, ["somechannel"], []),
+ ]
+
+ expected = "Closed 'somechannel' channel for 'testcharm'."
+ assert [expected] == [rec.message for rec in caplog.records]
+
+
# -- tests for the status command
| Implement `charmcraft close`
There needs to be a way to close channels, bringing over syntax from snapcraft, the following syntax should be possible:
charmcraft close <name> <channel>...
| 0.0 | 81f7b3f2358060a9502b9c7e3301c71c531879ea | [
"tests/commands/test_store_commands.py::test_get_name_from_metadata_ok",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_no_file",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_bad_content_garbage",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_bad_content_no_name",
"tests/commands/test_store_commands.py::test_login",
"tests/commands/test_store_commands.py::test_logout",
"tests/commands/test_store_commands.py::test_whoami",
"tests/commands/test_store_commands.py::test_register_charm_name",
"tests/commands/test_store_commands.py::test_register_bundle_name",
"tests/commands/test_store_commands.py::test_list_registered_empty",
"tests/commands/test_store_commands.py::test_list_registered_one_private",
"tests/commands/test_store_commands.py::test_list_registered_one_public",
"tests/commands/test_store_commands.py::test_list_registered_several",
"tests/commands/test_store_commands.py::test_get_name_bad_zip",
"tests/commands/test_store_commands.py::test_get_name_charm_ok",
"tests/commands/test_store_commands.py::test_get_name_charm_bad_metadata[=]",
"tests/commands/test_store_commands.py::test_get_name_charm_bad_metadata[foo:",
"tests/commands/test_store_commands.py::test_get_name_bundle_ok",
"tests/commands/test_store_commands.py::test_get_name_bundle_bad_data[=]",
"tests/commands/test_store_commands.py::test_get_name_bundle_bad_data[foo:",
"tests/commands/test_store_commands.py::test_get_name_nor_charm_nor_bundle",
"tests/commands/test_store_commands.py::test_upload_parameters_filepath_type",
"tests/commands/test_store_commands.py::test_upload_call_ok",
"tests/commands/test_store_commands.py::test_upload_call_error",
"tests/commands/test_store_commands.py::test_upload_call_ok_including_release",
"tests/commands/test_store_commands.py::test_upload_call_ok_including_release_multiple",
"tests/commands/test_store_commands.py::test_upload_including_release_with_resources",
"tests/commands/test_store_commands.py::test_upload_options_resource",
"tests/commands/test_store_commands.py::test_upload_call_error_including_release",
"tests/commands/test_store_commands.py::test_upload_charm_with_init_template_todo_token",
"tests/commands/test_store_commands.py::test_revisions_simple",
"tests/commands/test_store_commands.py::test_revisions_empty",
"tests/commands/test_store_commands.py::test_revisions_ordered_by_revision",
"tests/commands/test_store_commands.py::test_revisions_version_null",
"tests/commands/test_store_commands.py::test_revisions_errors_simple",
"tests/commands/test_store_commands.py::test_revisions_errors_multiple",
"tests/commands/test_store_commands.py::test_release_simple_ok",
"tests/commands/test_store_commands.py::test_release_simple_multiple_channels",
"tests/commands/test_store_commands.py::test_release_including_resources",
"tests/commands/test_store_commands.py::test_release_options_resource",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs0-expected_parsed0]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs1-expected_parsed1]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs2-expected_parsed2]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs3-expected_parsed3]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs4-expected_parsed4]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs5-expected_parsed5]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs6-expected_parsed6]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs0]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs1]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs2]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs3]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs4]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs5]",
"tests/commands/test_store_commands.py::test_close_simple_ok",
"tests/commands/test_store_commands.py::test_status_simple_ok",
"tests/commands/test_store_commands.py::test_status_empty",
"tests/commands/test_store_commands.py::test_status_channels_not_released_with_fallback",
"tests/commands/test_store_commands.py::test_status_channels_not_released_without_fallback",
"tests/commands/test_store_commands.py::test_status_multiple_tracks",
"tests/commands/test_store_commands.py::test_status_tracks_order",
"tests/commands/test_store_commands.py::test_status_with_one_branch",
"tests/commands/test_store_commands.py::test_status_with_multiple_branches",
"tests/commands/test_store_commands.py::test_status_with_resources",
"tests/commands/test_store_commands.py::test_status_with_resources_missing_after_closed_channel",
"tests/commands/test_store_commands.py::test_status_with_resources_and_branches",
"tests/commands/test_store_commands.py::test_status_multiplebases_single_track",
"tests/commands/test_store_commands.py::test_status_multiplebases_multiple_tracks",
"tests/commands/test_store_commands.py::test_status_multiplebases_everything_combined",
"tests/commands/test_store_commands.py::test_status_with_base_in_none",
"tests/commands/test_store_commands.py::test_status_ureleased_track",
"tests/commands/test_store_commands.py::test_createlib_simple",
"tests/commands/test_store_commands.py::test_createlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_createlib_name_contains_dash",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[foo.bar]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[foo/bar]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[Foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[123foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[_foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[]",
"tests/commands/test_store_commands.py::test_createlib_path_already_there",
"tests/commands/test_store_commands.py::test_createlib_library_template_is_python",
"tests/commands/test_store_commands.py::test_publishlib_simple",
"tests/commands/test_store_commands.py::test_publishlib_contains_dash",
"tests/commands/test_store_commands.py::test_publishlib_all",
"tests/commands/test_store_commands.py::test_publishlib_not_found",
"tests/commands/test_store_commands.py::test_publishlib_not_from_current_charm",
"tests/commands/test_store_commands.py::test_publishlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_publishlib_store_is_advanced",
"tests/commands/test_store_commands.py::test_publishlib_store_is_exactly_behind_ok",
"tests/commands/test_store_commands.py::test_publishlib_store_is_exactly_behind_same_hash",
"tests/commands/test_store_commands.py::test_publishlib_store_is_too_behind",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_same_hash",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_other_hash",
"tests/commands/test_store_commands.py::test_getlibinfo_success_simple",
"tests/commands/test_store_commands.py::test_getlibinfo_success_content",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[charms.testcharm.v3.testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[charms.testcharm.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[testcharm.v2.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[mycharms.testcharm.v2.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/v3/testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/v3/testlib.html]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/v3/testlib.]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[testcharm/v2/testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[mycharms/testcharm/v2/testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.v-three.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.v-3.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.3.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.vX.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_missing_library_from_name",
"tests/commands/test_store_commands.py::test_getlibinfo_missing_library_from_path",
"tests/commands/test_store_commands.py::test_getlibinfo_malformed_metadata_field",
"tests/commands/test_store_commands.py::test_getlibinfo_missing_metadata_field",
"tests/commands/test_store_commands.py::test_getlibinfo_api_not_int",
"tests/commands/test_store_commands.py::test_getlibinfo_api_negative",
"tests/commands/test_store_commands.py::test_getlibinfo_patch_not_int",
"tests/commands/test_store_commands.py::test_getlibinfo_patch_negative",
"tests/commands/test_store_commands.py::test_getlibinfo_api_patch_both_zero",
"tests/commands/test_store_commands.py::test_getlibinfo_metadata_api_different_path_api",
"tests/commands/test_store_commands.py::test_getlibinfo_libid_non_string",
"tests/commands/test_store_commands.py::test_getlibinfo_libid_non_ascii",
"tests/commands/test_store_commands.py::test_getlibinfo_libid_empty",
"tests/commands/test_store_commands.py::test_fetchlib_simple_downloaded",
"tests/commands/test_store_commands.py::test_fetchlib_simple_dash_in_name",
"tests/commands/test_store_commands.py::test_fetchlib_simple_dash_in_name_on_disk",
"tests/commands/test_store_commands.py::test_fetchlib_simple_updated",
"tests/commands/test_store_commands.py::test_fetchlib_all",
"tests/commands/test_store_commands.py::test_fetchlib_store_not_found",
"tests/commands/test_store_commands.py::test_fetchlib_store_is_old",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_same_hash",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_different_hash",
"tests/commands/test_store_commands.py::test_listlib_simple",
"tests/commands/test_store_commands.py::test_listlib_charm_from_metadata",
"tests/commands/test_store_commands.py::test_listlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_listlib_empty",
"tests/commands/test_store_commands.py::test_listlib_properly_sorted",
"tests/commands/test_store_commands.py::test_resources_simple",
"tests/commands/test_store_commands.py::test_resources_empty",
"tests/commands/test_store_commands.py::test_resources_ordered_and_grouped",
"tests/commands/test_store_commands.py::test_uploadresource_options_filepath_type",
"tests/commands/test_store_commands.py::test_uploadresource_options_image_type",
"tests/commands/test_store_commands.py::test_uploadresource_options_good_combinations[sysargs0]",
"tests/commands/test_store_commands.py::test_uploadresource_options_good_combinations[sysargs1]",
"tests/commands/test_store_commands.py::test_uploadresource_options_bad_combinations[sysargs0]",
"tests/commands/test_store_commands.py::test_uploadresource_options_bad_combinations[sysargs1]",
"tests/commands/test_store_commands.py::test_uploadresource_filepath_call_ok",
"tests/commands/test_store_commands.py::test_uploadresource_image_call_already_uploaded",
"tests/commands/test_store_commands.py::test_uploadresource_image_call_upload_from_local",
"tests/commands/test_store_commands.py::test_uploadresource_image_call_missing_everywhere",
"tests/commands/test_store_commands.py::test_uploadresource_call_error",
"tests/commands/test_store_commands.py::test_resourcerevisions_simple",
"tests/commands/test_store_commands.py::test_resourcerevisions_empty",
"tests/commands/test_store_commands.py::test_resourcerevisions_ordered_by_revision"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-09-07 12:04:29+00:00 | apache-2.0 | 1,465 |
|
canonical__charmcraft-545 | diff --git a/charmcraft/config.py b/charmcraft/config.py
index ad62de2..79262ec 100644
--- a/charmcraft/config.py
+++ b/charmcraft/config.py
@@ -343,6 +343,17 @@ class Config(ModelConfigDefaults, validate_all=False):
validate_part(item)
return item
+ @pydantic.validator("bases", pre=True)
+ def validate_bases_presence(cls, bases, values):
+ """Forbid 'bases' in bundles.
+
+ This is to avoid a posible confusion of expecting the bundle
+ to be built in a specific environment
+ """
+ if values.get("type") == "bundle":
+ raise ValueError("Field not allowed when type=bundle")
+ return bases
+
@classmethod
def expand_short_form_bases(cls, bases: List[Dict[str, Any]]) -> None:
"""Expand short-form base configuration into long-form in-place."""
@@ -390,7 +401,8 @@ class Config(ModelConfigDefaults, validate_all=False):
# type will simplify user facing errors.
bases = obj.get("bases")
if bases is None:
- notify_deprecation("dn03")
+ if obj["type"] in (None, "charm"):
+ notify_deprecation("dn03")
# Set default bases to Ubuntu 20.04 to match strict snap's
# effective behavior.
bases = [
| canonical/charmcraft | d21282d93786a34f788a313d7d4cc599e4f67a95 | diff --git a/tests/test_config.py b/tests/test_config.py
index 30a73dd..6b6db1b 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -537,6 +537,40 @@ def test_no_bases_defaults_to_ubuntu_20_04_with_dn03(caplog, create_config, tmp_
]
+def test_no_bases_is_ok_for_bundles(caplog, create_config, tmp_path):
+ """Do not send a deprecation message if it is a bundle."""
+ caplog.set_level(logging.WARNING, logger="charmcraft")
+ create_config(
+ """
+ type: bundle
+ """
+ )
+
+ load(tmp_path)
+ assert not caplog.records
+
+
+def test_bases_forbidden_for_bundles(create_config, check_schema_error):
+ """Do not allow a bases configuration for bundles."""
+ create_config(
+ """
+ type: bundle
+ bases:
+ - build-on:
+ - name: test-build-name
+ channel: test-build-channel
+ """
+ )
+
+ check_schema_error(
+ dedent(
+ """\
+ Bad charmcraft.yaml content:
+ - Field not allowed when type=bundle in field 'bases'"""
+ )
+ )
+
+
def test_bases_minimal_long_form(create_config):
tmp_path = create_config(
"""
| The control of `bases` in `charmcraft.yaml` should be different for bundles
Currently it complains that there's no `bases`:
```
$ charmcraft revisions facutest-bundle-1
DEPRECATED: Bases configuration is now required.
See https://discourse.charmhub.io/t/4652#heading--dn03 for more information.
Revision Version Created at Status
1 1 2021-02-03 released
```
But bundles do NOT have bases. So this check should be removed. And ANOTHER check should be implemented, to actually error out if the bundle's config has `bases` included (to avoid confusions).
Thanks! | 0.0 | d21282d93786a34f788a313d7d4cc599e4f67a95 | [
"tests/test_config.py::test_no_bases_is_ok_for_bundles",
"tests/test_config.py::test_bases_forbidden_for_bundles"
]
| [
"tests/test_config.py::test_load_current_directory",
"tests/test_config.py::test_load_managed_mode_directory",
"tests/test_config.py::test_load_specific_directory_ok",
"tests/test_config.py::test_load_optional_charmcraft_missing",
"tests/test_config.py::test_load_optional_charmcraft_bad_directory",
"tests/test_config.py::test_load_specific_directory_resolved",
"tests/test_config.py::test_load_specific_directory_expanded",
"tests/test_config.py::test_schema_top_level_no_extra_properties",
"tests/test_config.py::test_schema_type_missing",
"tests/test_config.py::test_schema_type_bad_type",
"tests/test_config.py::test_schema_type_limited_values",
"tests/test_config.py::test_schema_charmhub_api_url_bad_type",
"tests/test_config.py::test_schema_charmhub_api_url_bad_format",
"tests/test_config.py::test_schema_charmhub_storage_url_bad_type",
"tests/test_config.py::test_schema_charmhub_storage_url_bad_format",
"tests/test_config.py::test_schema_charmhub_registry_url_bad_type",
"tests/test_config.py::test_schema_charmhub_registry_url_bad_format",
"tests/test_config.py::test_schema_charmhub_no_extra_properties",
"tests/test_config.py::test_schema_basicprime_bad_init_structure",
"tests/test_config.py::test_schema_basicprime_bad_bundle_structure",
"tests/test_config.py::test_schema_basicprime_bad_prime_structure",
"tests/test_config.py::test_schema_basicprime_bad_prime_type",
"tests/test_config.py::test_schema_basicprime_bad_prime_type_empty",
"tests/test_config.py::test_schema_basicprime_bad_content_format",
"tests/test_config.py::test_schema_additional_part",
"tests/test_config.py::test_charmhub_frozen",
"tests/test_config.py::test_charmhub_underscore_backwards_compatibility",
"tests/test_config.py::test_no_bases_defaults_to_ubuntu_20_04_with_dn03",
"tests/test_config.py::test_bases_minimal_long_form",
"tests/test_config.py::test_bases_extra_field_error",
"tests/test_config.py::test_bases_underscores_error",
"tests/test_config.py::test_channel_is_yaml_number",
"tests/test_config.py::test_minimal_long_form_bases",
"tests/test_config.py::test_complex_long_form_bases",
"tests/test_config.py::test_multiple_long_form_bases",
"tests/test_config.py::test_bases_minimal_short_form",
"tests/test_config.py::test_bases_short_form_extra_field_error",
"tests/test_config.py::test_bases_short_form_missing_field_error",
"tests/test_config.py::test_bases_mixed_form_errors",
"tests/test_config.py::test_schema_analysis_missing",
"tests/test_config.py::test_schema_analysis_full_struct_just_empty",
"tests/test_config.py::test_schema_analysis_ignore_attributes",
"tests/test_config.py::test_schema_analysis_ignore_linters",
"tests/test_config.py::test_schema_analysis_ignore_attribute_missing",
"tests/test_config.py::test_schema_analysis_ignore_linter_missing"
]
| {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-09-20 14:22:00+00:00 | apache-2.0 | 1,466 |
|
canonical__charmcraft-556 | diff --git a/charmcraft/linters.py b/charmcraft/linters.py
index d75624d..8d579b2 100644
--- a/charmcraft/linters.py
+++ b/charmcraft/linters.py
@@ -23,6 +23,8 @@ import shlex
from collections import namedtuple
from typing import List, Generator, Union
+import yaml
+
from charmcraft import config
from charmcraft.metadata import parse_metadata_yaml
@@ -233,10 +235,86 @@ class JujuMetadata:
return result
+class JujuActions:
+ """Check that the actions.yaml file is valid YAML if it exists."""
+
+ check_type = CheckType.lint
+ name = "juju-actions"
+ url = "https://juju.is/docs/sdk/charmcraft-analyze#heading--juju-actions"
+ text = "The actions.yaml file is not a valid YAML file."
+
+ # different result constants
+ Result = namedtuple("Result", "ok errors")(ok=OK, errors=ERRORS)
+
+ def run(self, basedir: pathlib.Path) -> str:
+ """Run the proper verifications."""
+ filepath = basedir / "actions.yaml"
+ if not filepath.exists():
+ # it's optional
+ return self.Result.ok
+
+ try:
+ with filepath.open("rt", encoding="utf8") as fh:
+ yaml.safe_load(fh)
+ except Exception:
+ return self.Result.errors
+
+ return self.Result.ok
+
+
+class JujuConfig:
+ """Check that the config.yaml file (if it exists) is valid.
+
+ The file is considered valid if the following checks are true:
+
+ - has an 'options' key
+ - it is a dictionary
+ - each item inside has the mandatory 'type' key
+ """
+
+ check_type = CheckType.lint
+ name = "juju-config"
+ url = "https://juju.is/docs/sdk/charmcraft-analyze#heading--juju-config"
+
+ # different result constants
+ Result = namedtuple("Result", "ok errors")(ok=OK, errors=ERRORS)
+
+ def __init__(self):
+ self.text = None
+
+ def run(self, basedir: pathlib.Path) -> str:
+ """Run the proper verifications."""
+ filepath = basedir / "config.yaml"
+ if not filepath.exists():
+ # it's optional
+ return self.Result.ok
+
+ try:
+ with filepath.open("rt", encoding="utf8") as fh:
+ content = yaml.safe_load(fh)
+ except Exception:
+ self.text = "The config.yaml file is not a valid YAML file."
+ return self.Result.errors
+
+ options = content.get("options")
+ if not isinstance(options, dict):
+ self.text = "Error in config.yaml: must have an 'options' dictionary."
+ return self.Result.errors
+
+ for value in options.values():
+ if "type" not in value:
+ self.text = "Error in config.yaml: items under 'options' must have a 'type' key."
+ return self.Result.errors
+
+ return self.Result.ok
+
+
# all checkers to run; the order here is important, as some checkers depend on the
# results from others
CHECKERS = [
Language,
+ JujuActions,
+ JujuConfig,
JujuMetadata,
Framework,
]
@@ -263,7 +341,7 @@ def analyze(
name=cls.name,
result=IGNORED,
url=cls.url,
- text=cls.text,
+ text="",
)
)
continue
| canonical/charmcraft | 60202e4cb4d31f3aa4e92cd6a35225096537fe1b | diff --git a/tests/test_linters.py b/tests/test_linters.py
index c743c3e..4040d1b 100644
--- a/tests/test_linters.py
+++ b/tests/test_linters.py
@@ -28,6 +28,8 @@ from charmcraft.linters import (
FATAL,
Framework,
IGNORED,
+ JujuActions,
+ JujuConfig,
JujuMetadata,
Language,
UNKNOWN,
@@ -639,7 +641,7 @@ def test_analyze_ignore_attribute(config):
assert res1.check_type == CheckType.attribute
assert res1.name == "name1"
assert res1.result == IGNORED
- assert res1.text == "text1"
+ assert res1.text == ""
assert res1.url == "url1"
assert res2.check_type == CheckType.lint
assert res2.name == "name2"
@@ -678,7 +680,7 @@ def test_analyze_ignore_linter(config):
assert res2.check_type == CheckType.lint
assert res2.name == "name2"
assert res2.result == IGNORED
- assert res2.text == "text2"
+ assert res2.text == ""
assert res2.url == "url2"
@@ -755,3 +757,121 @@ def test_analyze_all_can_be_ignored(config):
)
result = analyze(config, "somepath")
assert all(r.result == IGNORED for r in result)
+
+
+# --- tests for JujuActions checker
+
+
+def test_jujuactions_ok(tmp_path):
+ """The actions.yaml file is valid."""
+ actions_file = tmp_path / "actions.yaml"
+ actions_file.write_text("stuff: foobar")
+ result = JujuActions().run(tmp_path)
+ assert result == JujuActions.Result.ok
+
+
+def test_jujuactions_missing_file(tmp_path):
+ """No actions.yaml file at all."""
+ result = JujuActions().run(tmp_path)
+ assert result == JujuActions.Result.ok
+
+
+def test_jujuactions_file_corrupted(tmp_path):
+ """The actions.yaml file is not valid YAML."""
+ actions_file = tmp_path / "actions.yaml"
+ actions_file.write_text(" - \n-")
+ result = JujuActions().run(tmp_path)
+ assert result == JujuActions.Result.errors
+
+
+# --- tests for JujuConfig checker
+
+
+def test_jujuconfig_ok(tmp_path):
+ """The config.yaml file is valid."""
+ config_file = tmp_path / "config.yaml"
+ config_file.write_text(
+ """
+ options:
+ foo:
+ type: buzz
+ """
+ )
+ result = JujuConfig().run(tmp_path)
+ assert result == JujuConfig.Result.ok
+
+
+def test_jujuconfig_missing_file(tmp_path):
+ """No config.yaml file at all."""
+ result = JujuConfig().run(tmp_path)
+ assert result == JujuConfig.Result.ok
+
+
+def test_jujuconfig_file_corrupted(tmp_path):
+ """The config.yaml file is not valid YAML."""
+ config_file = tmp_path / "config.yaml"
+ config_file.write_text(" - \n-")
+ linter = JujuConfig()
+ result = linter.run(tmp_path)
+ assert result == JujuConfig.Result.errors
+ assert linter.text == "The config.yaml file is not a valid YAML file."
+
+
+def test_jujuconfig_no_options(tmp_path):
+ """The config.yaml file does not have an options key."""
+ config_file = tmp_path / "config.yaml"
+ config_file.write_text(
+ """
+ summary: Small text.
+ """
+ )
+ linter = JujuConfig()
+ result = linter.run(tmp_path)
+ assert result == JujuConfig.Result.errors
+ assert linter.text == "Error in config.yaml: must have an 'options' dictionary."
+
+
+def test_jujuconfig_empty_options(tmp_path):
+ """The config.yaml file has an empty options key."""
+ config_file = tmp_path / "config.yaml"
+ config_file.write_text(
+ """
+ options:
+ """
+ )
+ linter = JujuConfig()
+ result = linter.run(tmp_path)
+ assert result == JujuConfig.Result.errors
+ assert linter.text == "Error in config.yaml: must have an 'options' dictionary."
+
+
+def test_jujuconfig_options_not_dict(tmp_path):
+ """The config.yaml file has an options key that is not a dict."""
+ config_file = tmp_path / "config.yaml"
+ config_file.write_text(
+ """
+ options:
+ - foo
+ - bar
+ """
+ )
+ linter = JujuConfig()
+ result = linter.run(tmp_path)
+ assert result == JujuConfig.Result.errors
+ assert linter.text == "Error in config.yaml: must have an 'options' dictionary."
+
+
+def test_jujuconfig_no_type_in_options_items(tmp_path):
+ """The items under 'options' must have a 'type' key."""
+ config_file = tmp_path / "config.yaml"
+ config_file.write_text(
+ """
+ options:
+ foo:
+ description: something missing
+ """
+ )
+ linter = JujuConfig()
+ result = linter.run(tmp_path)
+ assert result == JujuConfig.Result.errors
+ assert linter.text == "Error in config.yaml: items under 'options' must have a 'type' key."
| `charmcraft analyze` - check for invalid `config.yaml`
Follow on from [canonical/operator#618](https://github.com/canonical/operator/pull/618).
At present, it is possible to successfully `charmcraft pack` on a charm with an empty `config.yaml`, i.e. one that does not contain an `options:` map. Given that Juju will refuse to deploy a charm with such a `config.yaml`, we should probably try to catch this using the `charmcraft analyze` machinery that is invoked when a charm is packed.
As I understand, you can completely omit `config.yaml`, but if it exists it must have a top level key named `options`. | 0.0 | 60202e4cb4d31f3aa4e92cd6a35225096537fe1b | [
"tests/test_linters.py::test_checkdispatchpython_python",
"tests/test_linters.py::test_checkdispatchpython_no_dispatch",
"tests/test_linters.py::test_checkdispatchpython_inaccessible_dispatch",
"tests/test_linters.py::test_checkdispatchpython_broken_dispatch",
"tests/test_linters.py::test_checkdispatchpython_empty_dispatch",
"tests/test_linters.py::test_checkdispatchpython_no_entrypoint",
"tests/test_linters.py::test_checkdispatchpython_entrypoint_is_not_python",
"tests/test_linters.py::test_checkdispatchpython_entrypoint_no_exec",
"tests/test_linters.py::test_language_python",
"tests/test_linters.py::test_language_no_dispatch",
"tests/test_linters.py::test_framework_run_operator",
"tests/test_linters.py::test_framework_run_reactive",
"tests/test_linters.py::test_framework_run_unknown",
"tests/test_linters.py::test_framework_operator_used_ok[import",
"tests/test_linters.py::test_framework_operator_used_ok[from",
"tests/test_linters.py::test_framework_operator_language_not_python",
"tests/test_linters.py::test_framework_operator_venv_directory_missing",
"tests/test_linters.py::test_framework_operator_no_venv_ops_directory",
"tests/test_linters.py::test_framework_operator_venv_ops_directory_is_not_a_dir",
"tests/test_linters.py::test_framework_operator_corrupted_entrypoint",
"tests/test_linters.py::test_framework_operator_no_ops_imported[import",
"tests/test_linters.py::test_framework_operator_no_ops_imported[from",
"tests/test_linters.py::test_framework_reactive_used_ok[import",
"tests/test_linters.py::test_framework_reactive_used_ok[from",
"tests/test_linters.py::test_framework_reactive_no_metadata",
"tests/test_linters.py::test_framework_reactive_no_entrypoint",
"tests/test_linters.py::test_framework_reactive_corrupted_entrypoint",
"tests/test_linters.py::test_framework_reactive_no_wheelhouse",
"tests/test_linters.py::test_framework_reactive_no_reactive_lib",
"tests/test_linters.py::test_framework_reactive_no_reactive_imported[import",
"tests/test_linters.py::test_framework_reactive_no_reactive_imported[from",
"tests/test_linters.py::test_jujumetadata_all_ok",
"tests/test_linters.py::test_jujumetadata_missing_file",
"tests/test_linters.py::test_jujumetadata_file_corrupted",
"tests/test_linters.py::test_jujumetadata_missing_name",
"tests/test_linters.py::test_jujumetadata_missing_field[\\n",
"tests/test_linters.py::test_analyze_run_everything",
"tests/test_linters.py::test_analyze_ignore_attribute",
"tests/test_linters.py::test_analyze_ignore_linter",
"tests/test_linters.py::test_analyze_override_ignore",
"tests/test_linters.py::test_analyze_crash_attribute",
"tests/test_linters.py::test_analyze_crash_lint",
"tests/test_linters.py::test_analyze_all_can_be_ignored",
"tests/test_linters.py::test_jujuactions_ok",
"tests/test_linters.py::test_jujuactions_missing_file",
"tests/test_linters.py::test_jujuactions_file_corrupted",
"tests/test_linters.py::test_jujuconfig_ok",
"tests/test_linters.py::test_jujuconfig_missing_file",
"tests/test_linters.py::test_jujuconfig_file_corrupted",
"tests/test_linters.py::test_jujuconfig_no_options",
"tests/test_linters.py::test_jujuconfig_empty_options",
"tests/test_linters.py::test_jujuconfig_options_not_dict",
"tests/test_linters.py::test_jujuconfig_no_type_in_options_items"
]
| []
| {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-09-28 12:07:48+00:00 | apache-2.0 | 1,467 |
|
canonical__charmcraft-784 | diff --git a/charmcraft/commands/store/__init__.py b/charmcraft/commands/store/__init__.py
index 9102dcc..bb4e784 100644
--- a/charmcraft/commands/store/__init__.py
+++ b/charmcraft/commands/store/__init__.py
@@ -1650,12 +1650,27 @@ class ListResourcesCommand(BaseCommand):
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
+ self.include_format_option(parser)
parser.add_argument("charm_name", metavar="charm-name", help="The name of the charm")
def run(self, parsed_args):
"""Run the command."""
store = Store(self.config.charmhub)
result = store.list_resources(parsed_args.charm_name)
+
+ if parsed_args.format:
+ info = [
+ {
+ "charm_revision": item.revision,
+ "name": item.name,
+ "type": item.resource_type,
+ "optional": item.optional,
+ }
+ for item in result
+ ]
+ emit.message(self.format_content(parsed_args.format, info))
+ return
+
if not result:
emit.message(f"No resources associated to {parsed_args.charm_name}.")
return
@@ -1701,6 +1716,7 @@ class UploadResourceCommand(BaseCommand):
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
+ self.include_format_option(parser)
parser.add_argument(
"charm_name",
metavar="charm-name",
@@ -1751,24 +1767,22 @@ class UploadResourceCommand(BaseCommand):
# check if the specific image is already in Canonical's registry
already_uploaded = ih.check_in_registry(image_digest)
+ # XXX Facundo 2022-06-13: converting the following four messages to progress ones
+ # so they don't interfere with the programmatic output; we need to add the
+ # permanent=True flag once is available from Craft CLI
if already_uploaded:
- emit.message("Using OCI image from Canonical's registry.", intermediate=True)
+ emit.progress("Using OCI image from Canonical's registry.")
else:
# upload it from local registry
- emit.message(
- "Remote image not found, uploading from local registry.", intermediate=True
- )
+ emit.progress("Remote image not found, uploading from local registry.")
image_digest = ih.upload_from_local(image_digest)
if image_digest is None:
- emit.message(
+ emit.progress(
f"Image with digest {parsed_args.image} is not available in "
"the Canonical's registry nor locally.",
- intermediate=True,
)
return
- emit.message(
- f"Image uploaded, new remote digest: {image_digest}.", intermediate=True
- )
+ emit.progress(f"Image uploaded, new remote digest: {image_digest}.")
# all is green, get the blob to upload to Charmhub
content = store.get_oci_image_blob(
@@ -1793,15 +1807,27 @@ class UploadResourceCommand(BaseCommand):
resource_filepath.unlink()
if result.ok:
- emit.message(
- f"Revision {result.revision} created of "
- f"resource {parsed_args.resource_name!r} for charm {parsed_args.charm_name!r}.",
- )
+ if parsed_args.format:
+ info = {"revision": result.revision}
+ emit.message(self.format_content(parsed_args.format, info))
+ else:
+ emit.message(
+ f"Revision {result.revision} created of resource "
+ f"{parsed_args.resource_name!r} for charm {parsed_args.charm_name!r}.",
+ )
retcode = 0
else:
- emit.message(f"Upload failed with status {result.status!r}:")
- for error in result.errors:
- emit.message(f"- {error.code}: {error.message}")
+ if parsed_args.format:
+ info = {
+ "errors": [
+ {"code": error.code, "message": error.message} for error in result.errors
+ ]
+ }
+ emit.message(self.format_content(parsed_args.format, info))
+ else:
+ emit.message(f"Upload failed with status {result.status!r}:")
+ for error in result.errors:
+ emit.message(f"- {error.code}: {error.message}")
retcode = 1
return retcode
@@ -1827,6 +1853,7 @@ class ListResourceRevisionsCommand(BaseCommand):
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
+ self.include_format_option(parser)
parser.add_argument(
"charm_name",
metavar="charm-name",
@@ -1838,6 +1865,19 @@ class ListResourceRevisionsCommand(BaseCommand):
"""Run the command."""
store = Store(self.config.charmhub)
result = store.list_resource_revisions(parsed_args.charm_name, parsed_args.resource_name)
+
+ if parsed_args.format:
+ info = [
+ {
+ "revision": item.revision,
+ "created at": format_timestamp(item.created_at),
+ "size": item.size,
+ }
+ for item in result
+ ]
+ emit.message(self.format_content(parsed_args.format, info))
+ return
+
if not result:
emit.message("No revisions found.")
return
| canonical/charmcraft | 9150bf310d9676d7047b1303177fbd8c4e1c766c | diff --git a/tests/commands/test_store_commands.py b/tests/commands/test_store_commands.py
index 3a9e8cb..f44db47 100644
--- a/tests/commands/test_store_commands.py
+++ b/tests/commands/test_store_commands.py
@@ -4180,60 +4180,112 @@ def test_listlib_properly_sorted(emitter, store_mock, config, formatted):
# -- tests for list resources command
-def test_resources_simple(emitter, store_mock, config):
[email protected]("formatted", [None, JSON_FORMAT])
+def test_resources_simple(emitter, store_mock, config, formatted):
"""Happy path of one result from the Store."""
store_response = [
Resource(name="testresource", optional=True, revision=1, resource_type="file"),
]
store_mock.list_resources.return_value = store_response
- args = Namespace(charm_name="testcharm")
+ args = Namespace(charm_name="testcharm", format=formatted)
ListResourcesCommand(config).run(args)
assert store_mock.mock_calls == [
call.list_resources("testcharm"),
]
- expected = [
- "Charm Rev Resource Type Optional",
- "1 testresource file True",
- ]
- emitter.assert_messages(expected)
+ if formatted:
+ expected = [
+ {
+ "charm_revision": 1,
+ "name": "testresource",
+ "type": "file",
+ "optional": True,
+ }
+ ]
+ emitter.assert_json_output(expected)
+ else:
+ expected = [
+ "Charm Rev Resource Type Optional",
+ "1 testresource file True",
+ ]
+ emitter.assert_messages(expected)
-def test_resources_empty(emitter, store_mock, config):
[email protected]("formatted", [None, JSON_FORMAT])
+def test_resources_empty(emitter, store_mock, config, formatted):
"""No results from the store."""
store_response = []
store_mock.list_resources.return_value = store_response
- args = Namespace(charm_name="testcharm")
+ args = Namespace(charm_name="testcharm", format=formatted)
ListResourcesCommand(config).run(args)
- emitter.assert_message("No resources associated to testcharm.")
+ if formatted:
+ emitter.assert_json_output([])
+ else:
+ emitter.assert_message("No resources associated to testcharm.")
-def test_resources_ordered_and_grouped(emitter, store_mock, config):
[email protected]("formatted", [None, JSON_FORMAT])
+def test_resources_ordered_and_grouped(emitter, store_mock, config, formatted):
"""Results are presented ordered by name in the table."""
store_response = [
Resource(name="bbb-resource", optional=True, revision=2, resource_type="file"),
Resource(name="ccc-resource", optional=True, revision=1, resource_type="file"),
- Resource(name="bbb-resource", optional=True, revision=3, resource_type="file"),
- Resource(name="aaa-resource", optional=True, revision=2, resource_type="file"),
+ Resource(name="bbb-resource", optional=False, revision=3, resource_type="file"),
+ Resource(name="aaa-resource", optional=True, revision=2, resource_type="oci-image"),
Resource(name="bbb-resource", optional=True, revision=5, resource_type="file"),
]
store_mock.list_resources.return_value = store_response
- args = Namespace(charm_name="testcharm")
+ args = Namespace(charm_name="testcharm", format=formatted)
ListResourcesCommand(config).run(args)
- expected = [
- "Charm Rev Resource Type Optional",
- "5 bbb-resource file True",
- "3 bbb-resource file True",
- "2 aaa-resource file True",
- " bbb-resource file True",
- "1 ccc-resource file True",
- ]
- emitter.assert_messages(expected)
+ if formatted:
+ expected = [
+ {
+ "charm_revision": 2,
+ "name": "bbb-resource",
+ "type": "file",
+ "optional": True,
+ },
+ {
+ "charm_revision": 1,
+ "name": "ccc-resource",
+ "type": "file",
+ "optional": True,
+ },
+ {
+ "charm_revision": 3,
+ "name": "bbb-resource",
+ "type": "file",
+ "optional": False,
+ },
+ {
+ "charm_revision": 2,
+ "name": "aaa-resource",
+ "type": "oci-image",
+ "optional": True,
+ },
+ {
+ "charm_revision": 5,
+ "name": "bbb-resource",
+ "type": "file",
+ "optional": True,
+ },
+ ]
+ emitter.assert_json_output(expected)
+ else:
+ expected = [
+ "Charm Rev Resource Type Optional",
+ "5 bbb-resource file True",
+ "3 bbb-resource file False",
+ "2 aaa-resource oci-image True",
+ " bbb-resource file True",
+ "1 ccc-resource file True",
+ ]
+ emitter.assert_messages(expected)
# -- tests for upload resources command
@@ -4302,7 +4354,8 @@ def test_uploadresource_options_bad_combinations(config, sysargs, tmp_path, monk
cmd.parsed_args_post_verification(parser, parsed_args)
-def test_uploadresource_filepath_call_ok(emitter, store_mock, config, tmp_path):
[email protected]("formatted", [None, JSON_FORMAT])
+def test_uploadresource_filepath_call_ok(emitter, store_mock, config, tmp_path, formatted):
"""Simple upload, success result."""
store_response = Uploaded(ok=True, status=200, revision=7, errors=[])
store_mock.upload_resource.return_value = store_response
@@ -4314,6 +4367,7 @@ def test_uploadresource_filepath_call_ok(emitter, store_mock, config, tmp_path):
resource_name="myresource",
filepath=test_resource,
image=None,
+ format=formatted,
)
retcode = UploadResourceCommand(config).run(args)
assert retcode == 0
@@ -4321,17 +4375,24 @@ def test_uploadresource_filepath_call_ok(emitter, store_mock, config, tmp_path):
assert store_mock.mock_calls == [
call.upload_resource("mycharm", "myresource", "file", test_resource)
]
- emitter.assert_interactions(
- [
- call("progress", f"Uploading resource directly from file {str(test_resource)!r}."),
- call("message", "Revision 7 created of resource 'myresource' for charm 'mycharm'."),
- ]
- )
+ if formatted:
+ expected = {"revision": 7}
+ emitter.assert_json_output(expected)
+ else:
+ emitter.assert_interactions(
+ [
+ call("progress", f"Uploading resource directly from file {str(test_resource)!r}."),
+ call(
+ "message", "Revision 7 created of resource 'myresource' for charm 'mycharm'."
+ ),
+ ]
+ )
assert test_resource.exists() # provided by the user, don't touch it
@pytest.mark.skipif(sys.platform == "win32", reason="Windows not [yet] supported")
-def test_uploadresource_image_call_already_uploaded(emitter, store_mock, config):
[email protected]("formatted", [None, JSON_FORMAT])
+def test_uploadresource_image_call_already_uploaded(emitter, store_mock, config, formatted):
"""Upload an oci-image resource, the image itself already being in the registry."""
# fake credentials for the charm/resource, and the final json content
store_mock.get_oci_registry_credentials.return_value = RegistryCredentials(
@@ -4368,6 +4429,7 @@ def test_uploadresource_image_call_already_uploaded(emitter, store_mock, config)
resource_name="myresource",
filepath=None,
image=original_image_digest,
+ format=formatted,
)
with patch("charmcraft.commands.store.ImageHandler", autospec=True) as im_class_mock:
with patch("charmcraft.commands.store.OCIRegistry", autospec=True) as reg_class_mock:
@@ -4402,17 +4464,23 @@ def test_uploadresource_image_call_already_uploaded(emitter, store_mock, config)
call.upload_resource("mycharm", "myresource", "oci-image", uploaded_resource_filepath),
]
- emitter.assert_interactions(
- [
- call(
- "progress",
- "Uploading resource from image "
- "charm/charm-id/test-image-name @ test-digest-given-by-user.",
- ),
- call("message", "Using OCI image from Canonical's registry.", intermediate=True),
- call("message", "Revision 7 created of resource 'myresource' for charm 'mycharm'."),
- ]
- )
+ if formatted:
+ expected = {"revision": 7}
+ emitter.assert_json_output(expected)
+ else:
+ emitter.assert_interactions(
+ [
+ call(
+ "progress",
+ "Uploading resource from image "
+ "charm/charm-id/test-image-name @ test-digest-given-by-user.",
+ ),
+ call("progress", "Using OCI image from Canonical's registry."),
+ call(
+ "message", "Revision 7 created of resource 'myresource' for charm 'mycharm'."
+ ),
+ ]
+ )
@pytest.mark.skipif(sys.platform == "win32", reason="Windows not [yet] supported")
@@ -4437,6 +4505,7 @@ def test_uploadresource_image_call_upload_from_local(emitter, store_mock, config
resource_name="myresource",
filepath=None,
image=original_image_digest,
+ format=False,
)
with patch("charmcraft.commands.store.ImageHandler", autospec=True) as im_class_mock:
with patch("charmcraft.commands.store.OCIRegistry", autospec=True) as reg_class_mock:
@@ -4471,14 +4540,12 @@ def test_uploadresource_image_call_upload_from_local(emitter, store_mock, config
"charm/charm-id/test-image-name @ test-digest-given-by-user.",
),
call(
- "message",
+ "progress",
"Remote image not found, uploading from local registry.",
- intermediate=True,
),
call(
- "message",
+ "progress",
"Image uploaded, new remote digest: new-digest-after-upload.",
- intermediate=True,
),
call("message", "Revision 7 created of resource 'myresource' for charm 'mycharm'."),
]
@@ -4501,6 +4568,7 @@ def test_uploadresource_image_call_missing_everywhere(emitter, store_mock, confi
resource_name="myresource",
filepath=None,
image=original_image_digest,
+ format=False,
)
with patch("charmcraft.commands.store.ImageHandler", autospec=True) as im_class_mock:
with patch("charmcraft.commands.store.OCIRegistry", autospec=True) as reg_class_mock:
@@ -4532,21 +4600,20 @@ def test_uploadresource_image_call_missing_everywhere(emitter, store_mock, confi
"image charm/charm-id/test-image-name @ test-digest-given-by-user.",
),
call(
- "message",
+ "progress",
"Remote image not found, uploading from local registry.",
- intermediate=True,
),
call(
- "message",
+ "progress",
"Image with digest test-digest-given-by-user is not available in "
"the Canonical's registry nor locally.",
- intermediate=True,
),
]
)
-def test_uploadresource_call_error(emitter, store_mock, config, tmp_path):
[email protected]("formatted", [None, JSON_FORMAT])
+def test_uploadresource_call_error(emitter, store_mock, config, tmp_path, formatted):
"""Simple upload but with a response indicating an error."""
errors = [
Error(message="text 1", code="missing-stuff"),
@@ -4557,54 +4624,81 @@ def test_uploadresource_call_error(emitter, store_mock, config, tmp_path):
test_resource = tmp_path / "mystuff.bin"
test_resource.write_text("sample stuff")
- args = Namespace(charm_name="mycharm", resource_name="myresource", filepath=test_resource)
+ args = Namespace(
+ charm_name="mycharm", resource_name="myresource", filepath=test_resource, format=formatted
+ )
retcode = UploadResourceCommand(config).run(args)
assert retcode == 1
- emitter.assert_messages(
- [
- "Upload failed with status 400:",
- "- missing-stuff: text 1",
- "- broken: other long error text",
- ]
- )
+ if formatted:
+ expected = {
+ "errors": [
+ {"code": "missing-stuff", "message": "text 1"},
+ {"code": "broken", "message": "other long error text"},
+ ]
+ }
+ emitter.assert_json_output(expected)
+ else:
+ emitter.assert_messages(
+ [
+ "Upload failed with status 400:",
+ "- missing-stuff: text 1",
+ "- broken: other long error text",
+ ]
+ )
# -- tests for list resource revisions command
-def test_resourcerevisions_simple(emitter, store_mock, config):
[email protected]("formatted", [None, JSON_FORMAT])
+def test_resourcerevisions_simple(emitter, store_mock, config, formatted):
"""Happy path of one result from the Store."""
store_response = [
ResourceRevision(revision=1, size=50, created_at=datetime.datetime(2020, 7, 3, 2, 30, 40)),
]
store_mock.list_resource_revisions.return_value = store_response
- args = Namespace(charm_name="testcharm", resource_name="testresource")
+ args = Namespace(charm_name="testcharm", resource_name="testresource", format=formatted)
ListResourceRevisionsCommand(config).run(args)
assert store_mock.mock_calls == [
call.list_resource_revisions("testcharm", "testresource"),
]
- expected = [
- "Revision Created at Size",
- "1 2020-07-03T02:30:40Z 50B",
- ]
- emitter.assert_messages(expected)
+ if formatted:
+ expected = [
+ {
+ "revision": 1,
+ "created at": "2020-07-03T02:30:40Z",
+ "size": 50,
+ },
+ ]
+ emitter.assert_json_output(expected)
+ else:
+ expected = [
+ "Revision Created at Size",
+ "1 2020-07-03T02:30:40Z 50B",
+ ]
+ emitter.assert_messages(expected)
-def test_resourcerevisions_empty(emitter, store_mock, config):
[email protected]("formatted", [None, JSON_FORMAT])
+def test_resourcerevisions_empty(emitter, store_mock, config, formatted):
"""No results from the store."""
store_response = []
store_mock.list_resource_revisions.return_value = store_response
- args = Namespace(charm_name="testcharm", resource_name="testresource")
+ args = Namespace(charm_name="testcharm", resource_name="testresource", format=formatted)
ListResourceRevisionsCommand(config).run(args)
- emitter.assert_message("No revisions found.")
+ if formatted:
+ emitter.assert_json_output([])
+ else:
+ emitter.assert_message("No revisions found.")
-def test_resourcerevisions_ordered_by_revision(emitter, store_mock, config):
[email protected]("formatted", [None, JSON_FORMAT])
+def test_resourcerevisions_ordered_by_revision(emitter, store_mock, config, formatted):
"""Results are presented ordered by revision in the table."""
# three Revisions with all values weirdly similar, the only difference is revision, so
# we really assert later that it was used for ordering
@@ -4617,14 +4711,39 @@ def test_resourcerevisions_ordered_by_revision(emitter, store_mock, config):
]
store_mock.list_resource_revisions.return_value = store_response
- args = Namespace(charm_name="testcharm", resource_name="testresource")
+ args = Namespace(charm_name="testcharm", resource_name="testresource", format=formatted)
ListResourceRevisionsCommand(config).run(args)
- expected = [
- "Revision Created at Size",
- "4 2020-07-03T20:30:40Z 856.0K",
- "3 2020-07-03T20:30:40Z 32.9M",
- "2 2020-07-03T20:30:40Z 50B",
- "1 2020-07-03T20:30:40Z 4.9K",
- ]
- emitter.assert_messages(expected)
+ if formatted:
+ expected = [
+ {
+ "revision": 1,
+ "created at": "2020-07-03T20:30:40Z",
+ "size": 5000,
+ },
+ {
+ "revision": 3,
+ "created at": "2020-07-03T20:30:40Z",
+ "size": 34450520,
+ },
+ {
+ "revision": 4,
+ "created at": "2020-07-03T20:30:40Z",
+ "size": 876543,
+ },
+ {
+ "revision": 2,
+ "created at": "2020-07-03T20:30:40Z",
+ "size": 50,
+ },
+ ]
+ emitter.assert_json_output(expected)
+ else:
+ expected = [
+ "Revision Created at Size",
+ "4 2020-07-03T20:30:40Z 856.0K",
+ "3 2020-07-03T20:30:40Z 32.9M",
+ "2 2020-07-03T20:30:40Z 50B",
+ "1 2020-07-03T20:30:40Z 4.9K",
+ ]
+ emitter.assert_messages(expected)
| approach for using store commands programmatically
Hello,
We are trying to create some release infrastructure to help us automate releasing charms to the store. Is anyone currently using store commands programmatically? It seems we currently need to auth against the ubuntu one from the browser. Is there a way to do this without a human?
Thanks | 0.0 | 9150bf310d9676d7047b1303177fbd8c4e1c766c | [
"tests/commands/test_store_commands.py::test_resources_simple[json]",
"tests/commands/test_store_commands.py::test_resources_empty[json]",
"tests/commands/test_store_commands.py::test_resources_ordered_and_grouped[json]",
"tests/commands/test_store_commands.py::test_uploadresource_filepath_call_ok[json]",
"tests/commands/test_store_commands.py::test_uploadresource_image_call_already_uploaded[None]",
"tests/commands/test_store_commands.py::test_uploadresource_image_call_already_uploaded[json]",
"tests/commands/test_store_commands.py::test_uploadresource_image_call_upload_from_local",
"tests/commands/test_store_commands.py::test_uploadresource_image_call_missing_everywhere",
"tests/commands/test_store_commands.py::test_uploadresource_call_error[json]",
"tests/commands/test_store_commands.py::test_resourcerevisions_simple[json]",
"tests/commands/test_store_commands.py::test_resourcerevisions_empty[json]",
"tests/commands/test_store_commands.py::test_resourcerevisions_ordered_by_revision[json]"
]
| [
"tests/commands/test_store_commands.py::test_get_name_from_metadata_ok",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_no_file",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_bad_content_garbage",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_bad_content_no_name",
"tests/commands/test_store_commands.py::test_login_simple",
"tests/commands/test_store_commands.py::test_login_exporting",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[charm]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[bundle]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[permission]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[channel]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[ttl]",
"tests/commands/test_store_commands.py::test_login_restricting_ttl",
"tests/commands/test_store_commands.py::test_login_restricting_channels",
"tests/commands/test_store_commands.py::test_login_restricting_permissions",
"tests/commands/test_store_commands.py::test_login_restricting_permission_invalid",
"tests/commands/test_store_commands.py::test_login_restricting_charms",
"tests/commands/test_store_commands.py::test_login_restricting_bundles",
"tests/commands/test_store_commands.py::test_login_restriction_mix",
"tests/commands/test_store_commands.py::test_logout",
"tests/commands/test_store_commands.py::test_logout_but_not_logged_in",
"tests/commands/test_store_commands.py::test_whoami[None]",
"tests/commands/test_store_commands.py::test_whoami[json]",
"tests/commands/test_store_commands.py::test_whoami_but_not_logged_in[None]",
"tests/commands/test_store_commands.py::test_whoami_but_not_logged_in[json]",
"tests/commands/test_store_commands.py::test_whoami_with_channels[None]",
"tests/commands/test_store_commands.py::test_whoami_with_channels[json]",
"tests/commands/test_store_commands.py::test_whoami_with_charms[None]",
"tests/commands/test_store_commands.py::test_whoami_with_charms[json]",
"tests/commands/test_store_commands.py::test_whoami_with_bundles[None]",
"tests/commands/test_store_commands.py::test_whoami_with_bundles[json]",
"tests/commands/test_store_commands.py::test_whoami_comprehensive",
"tests/commands/test_store_commands.py::test_register_charm_name",
"tests/commands/test_store_commands.py::test_register_bundle_name",
"tests/commands/test_store_commands.py::test_list_registered_empty[None]",
"tests/commands/test_store_commands.py::test_list_registered_empty[json]",
"tests/commands/test_store_commands.py::test_list_registered_one_private[None]",
"tests/commands/test_store_commands.py::test_list_registered_one_private[json]",
"tests/commands/test_store_commands.py::test_list_registered_one_public[None]",
"tests/commands/test_store_commands.py::test_list_registered_one_public[json]",
"tests/commands/test_store_commands.py::test_list_registered_several[None]",
"tests/commands/test_store_commands.py::test_list_registered_several[json]",
"tests/commands/test_store_commands.py::test_get_name_bad_zip",
"tests/commands/test_store_commands.py::test_get_name_charm_ok",
"tests/commands/test_store_commands.py::test_get_name_charm_bad_metadata[=]",
"tests/commands/test_store_commands.py::test_get_name_charm_bad_metadata[foo:",
"tests/commands/test_store_commands.py::test_get_name_bundle_ok",
"tests/commands/test_store_commands.py::test_get_name_bundle_bad_data[=]",
"tests/commands/test_store_commands.py::test_get_name_bundle_bad_data[foo:",
"tests/commands/test_store_commands.py::test_get_name_nor_charm_nor_bundle",
"tests/commands/test_store_commands.py::test_upload_parameters_filepath_type",
"tests/commands/test_store_commands.py::test_upload_call_ok[None]",
"tests/commands/test_store_commands.py::test_upload_call_ok[json]",
"tests/commands/test_store_commands.py::test_upload_call_error[None]",
"tests/commands/test_store_commands.py::test_upload_call_error[json]",
"tests/commands/test_store_commands.py::test_upload_call_ok_including_release[None]",
"tests/commands/test_store_commands.py::test_upload_call_ok_including_release[json]",
"tests/commands/test_store_commands.py::test_upload_call_ok_including_release_multiple",
"tests/commands/test_store_commands.py::test_upload_including_release_with_resources[None]",
"tests/commands/test_store_commands.py::test_upload_including_release_with_resources[json]",
"tests/commands/test_store_commands.py::test_upload_options_resource",
"tests/commands/test_store_commands.py::test_upload_call_error_including_release",
"tests/commands/test_store_commands.py::test_upload_charm_with_init_template_todo_token",
"tests/commands/test_store_commands.py::test_upload_with_different_name_than_in_metadata",
"tests/commands/test_store_commands.py::test_revisions_simple[None]",
"tests/commands/test_store_commands.py::test_revisions_simple[json]",
"tests/commands/test_store_commands.py::test_revisions_empty[None]",
"tests/commands/test_store_commands.py::test_revisions_empty[json]",
"tests/commands/test_store_commands.py::test_revisions_ordered_by_revision[None]",
"tests/commands/test_store_commands.py::test_revisions_ordered_by_revision[json]",
"tests/commands/test_store_commands.py::test_revisions_version_null[None]",
"tests/commands/test_store_commands.py::test_revisions_version_null[json]",
"tests/commands/test_store_commands.py::test_revisions_errors_simple[None]",
"tests/commands/test_store_commands.py::test_revisions_errors_simple[json]",
"tests/commands/test_store_commands.py::test_revisions_errors_multiple[None]",
"tests/commands/test_store_commands.py::test_revisions_errors_multiple[json]",
"tests/commands/test_store_commands.py::test_release_simple_ok",
"tests/commands/test_store_commands.py::test_release_simple_multiple_channels",
"tests/commands/test_store_commands.py::test_release_including_resources",
"tests/commands/test_store_commands.py::test_release_options_resource",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs0-expected_parsed0]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs1-expected_parsed1]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs2-expected_parsed2]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs3-expected_parsed3]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs4-expected_parsed4]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs5-expected_parsed5]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs6-expected_parsed6]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs0]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs1]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs2]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs3]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs4]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs5]",
"tests/commands/test_store_commands.py::test_close_simple_ok",
"tests/commands/test_store_commands.py::test_status_simple_ok[None]",
"tests/commands/test_store_commands.py::test_status_simple_ok[json]",
"tests/commands/test_store_commands.py::test_status_empty[None]",
"tests/commands/test_store_commands.py::test_status_empty[json]",
"tests/commands/test_store_commands.py::test_status_channels_not_released_with_fallback[None]",
"tests/commands/test_store_commands.py::test_status_channels_not_released_with_fallback[json]",
"tests/commands/test_store_commands.py::test_status_channels_not_released_without_fallback[None]",
"tests/commands/test_store_commands.py::test_status_channels_not_released_without_fallback[json]",
"tests/commands/test_store_commands.py::test_status_multiple_tracks[None]",
"tests/commands/test_store_commands.py::test_status_multiple_tracks[json]",
"tests/commands/test_store_commands.py::test_status_tracks_order",
"tests/commands/test_store_commands.py::test_status_with_one_branch[None]",
"tests/commands/test_store_commands.py::test_status_with_one_branch[json]",
"tests/commands/test_store_commands.py::test_status_with_multiple_branches",
"tests/commands/test_store_commands.py::test_status_with_resources[None]",
"tests/commands/test_store_commands.py::test_status_with_resources[json]",
"tests/commands/test_store_commands.py::test_status_with_resources_missing_after_closed_channel",
"tests/commands/test_store_commands.py::test_status_with_resources_and_branches",
"tests/commands/test_store_commands.py::test_status_multiplebases_single_track[None]",
"tests/commands/test_store_commands.py::test_status_multiplebases_single_track[json]",
"tests/commands/test_store_commands.py::test_status_multiplebases_multiple_tracks",
"tests/commands/test_store_commands.py::test_status_multiplebases_everything_combined",
"tests/commands/test_store_commands.py::test_status_with_base_in_none[None]",
"tests/commands/test_store_commands.py::test_status_with_base_in_none[json]",
"tests/commands/test_store_commands.py::test_status_unreleased_track",
"tests/commands/test_store_commands.py::test_createlib_simple[None]",
"tests/commands/test_store_commands.py::test_createlib_simple[json]",
"tests/commands/test_store_commands.py::test_createlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_createlib_name_contains_dash",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[foo.bar]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[foo/bar]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[Foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[123foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[_foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[]",
"tests/commands/test_store_commands.py::test_createlib_path_already_there",
"tests/commands/test_store_commands.py::test_createlib_library_template_is_python",
"tests/commands/test_store_commands.py::test_publishlib_simple[None]",
"tests/commands/test_store_commands.py::test_publishlib_simple[json]",
"tests/commands/test_store_commands.py::test_publishlib_contains_dash",
"tests/commands/test_store_commands.py::test_publishlib_all[None]",
"tests/commands/test_store_commands.py::test_publishlib_all[json]",
"tests/commands/test_store_commands.py::test_publishlib_not_found",
"tests/commands/test_store_commands.py::test_publishlib_not_from_current_charm",
"tests/commands/test_store_commands.py::test_publishlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_publishlib_store_is_advanced[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_advanced[json]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_exactly_behind_ok",
"tests/commands/test_store_commands.py::test_publishlib_store_is_exactly_behind_same_hash[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_exactly_behind_same_hash[json]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_too_behind[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_too_behind[json]",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_same_hash[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_same_hash[json]",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_other_hash[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_other_hash[json]",
"tests/commands/test_store_commands.py::test_getlibinfo_success_simple",
"tests/commands/test_store_commands.py::test_getlibinfo_success_content",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[charms.testcharm.v3.testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[charms.testcharm.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[testcharm.v2.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[mycharms.testcharm.v2.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_not_importable_charm_name",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/v3/testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/v3/testlib.html]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/v3/testlib.]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[testcharm/v2/testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[mycharms/testcharm/v2/testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.v-three.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.v-3.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.3.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.vX.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_missing_library_from_name",
"tests/commands/test_store_commands.py::test_getlibinfo_missing_library_from_path",
"tests/commands/test_store_commands.py::test_getlibinfo_malformed_metadata_field",
"tests/commands/test_store_commands.py::test_getlibinfo_missing_metadata_field",
"tests/commands/test_store_commands.py::test_getlibinfo_api_not_int",
"tests/commands/test_store_commands.py::test_getlibinfo_api_negative",
"tests/commands/test_store_commands.py::test_getlibinfo_patch_not_int",
"tests/commands/test_store_commands.py::test_getlibinfo_patch_negative",
"tests/commands/test_store_commands.py::test_getlibinfo_api_patch_both_zero",
"tests/commands/test_store_commands.py::test_getlibinfo_metadata_api_different_path_api",
"tests/commands/test_store_commands.py::test_getlibinfo_libid_non_string",
"tests/commands/test_store_commands.py::test_getlibinfo_libid_non_ascii",
"tests/commands/test_store_commands.py::test_getlibinfo_libid_empty",
"tests/commands/test_store_commands.py::test_fetchlib_simple_downloaded[None]",
"tests/commands/test_store_commands.py::test_fetchlib_simple_downloaded[json]",
"tests/commands/test_store_commands.py::test_fetchlib_simple_dash_in_name",
"tests/commands/test_store_commands.py::test_fetchlib_simple_dash_in_name_on_disk",
"tests/commands/test_store_commands.py::test_fetchlib_simple_updated",
"tests/commands/test_store_commands.py::test_fetchlib_all[None]",
"tests/commands/test_store_commands.py::test_fetchlib_all[json]",
"tests/commands/test_store_commands.py::test_fetchlib_store_not_found[None]",
"tests/commands/test_store_commands.py::test_fetchlib_store_not_found[json]",
"tests/commands/test_store_commands.py::test_fetchlib_store_is_old[None]",
"tests/commands/test_store_commands.py::test_fetchlib_store_is_old[json]",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_same_hash[None]",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_same_hash[json]",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_different_hash[None]",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_different_hash[json]",
"tests/commands/test_store_commands.py::test_listlib_simple[None]",
"tests/commands/test_store_commands.py::test_listlib_simple[json]",
"tests/commands/test_store_commands.py::test_listlib_charm_from_metadata",
"tests/commands/test_store_commands.py::test_listlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_listlib_empty[None]",
"tests/commands/test_store_commands.py::test_listlib_empty[json]",
"tests/commands/test_store_commands.py::test_listlib_properly_sorted[None]",
"tests/commands/test_store_commands.py::test_listlib_properly_sorted[json]",
"tests/commands/test_store_commands.py::test_resources_simple[None]",
"tests/commands/test_store_commands.py::test_resources_empty[None]",
"tests/commands/test_store_commands.py::test_resources_ordered_and_grouped[None]",
"tests/commands/test_store_commands.py::test_uploadresource_options_filepath_type",
"tests/commands/test_store_commands.py::test_uploadresource_options_image_type",
"tests/commands/test_store_commands.py::test_uploadresource_options_good_combinations[sysargs0]",
"tests/commands/test_store_commands.py::test_uploadresource_options_good_combinations[sysargs1]",
"tests/commands/test_store_commands.py::test_uploadresource_options_bad_combinations[sysargs0]",
"tests/commands/test_store_commands.py::test_uploadresource_options_bad_combinations[sysargs1]",
"tests/commands/test_store_commands.py::test_uploadresource_filepath_call_ok[None]",
"tests/commands/test_store_commands.py::test_uploadresource_call_error[None]",
"tests/commands/test_store_commands.py::test_resourcerevisions_simple[None]",
"tests/commands/test_store_commands.py::test_resourcerevisions_empty[None]",
"tests/commands/test_store_commands.py::test_resourcerevisions_ordered_by_revision[None]"
]
| {
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2022-06-13 13:10:05+00:00 | apache-2.0 | 1,468 |
|
canonical__charmcraft-802 | diff --git a/charmcraft/commands/store/__init__.py b/charmcraft/commands/store/__init__.py
index bb4e784..5ae755b 100644
--- a/charmcraft/commands/store/__init__.py
+++ b/charmcraft/commands/store/__init__.py
@@ -896,7 +896,7 @@ class StatusCommand(BaseCommand):
unreleased_track = {None: {}} # base in None with no releases at all
for track, (channels, branches) in per_track.items():
prog_channels_info = []
- prog_data.append({"track": track, "channels": prog_channels_info})
+ prog_data.append({"track": track, "mappings": prog_channels_info})
releases_by_base = releases_by_track.get(track, unreleased_track)
shown_track = track
@@ -924,8 +924,6 @@ class StatusCommand(BaseCommand):
release_shown_for_this_track_base = False
for channel in channels:
- description = channel.risk
-
# get the release of the channel, fallbacking accordingly
release = releases_by_channel.get(channel.name)
if release is None:
@@ -943,7 +941,7 @@ class StatusCommand(BaseCommand):
prog_resources = self._build_resources_prog(release.resources)
prog_status = "open"
- datum = [shown_track, shown_base, description, version, revno]
+ datum = [shown_track, shown_base, channel.risk, version, revno]
if resources_present:
datum.append(resources)
human_data.append(datum)
@@ -951,7 +949,7 @@ class StatusCommand(BaseCommand):
prog_releases_info.append(
{
"status": prog_status,
- "channel": description,
+ "channel": channel.name,
"version": prog_version,
"revision": prog_revno,
"resources": prog_resources,
@@ -980,7 +978,7 @@ class StatusCommand(BaseCommand):
prog_releases_info.append(
{
"status": "open",
- "channel": description,
+ "channel": branch.name,
"version": revision.version,
"revision": release.revision,
"resources": self._build_resources_prog(release.resources),
| canonical/charmcraft | 46cd23688b8d4ad0589ca95a987d597689d7b84d | diff --git a/tests/commands/test_store_commands.py b/tests/commands/test_store_commands.py
index ef21c2d..b5edb894 100644
--- a/tests/commands/test_store_commands.py
+++ b/tests/commands/test_store_commands.py
@@ -1533,7 +1533,7 @@ def test_status_simple_ok(emitter, store_mock, config, formatted):
expected = [
{
"track": "latest",
- "channels": [
+ "mappings": [
{
"base": {
"name": "ubuntu",
@@ -1543,7 +1543,7 @@ def test_status_simple_ok(emitter, store_mock, config, formatted):
"releases": [
{
"status": "open",
- "channel": "stable",
+ "channel": "latest/stable",
"version": "v7",
"revision": 7,
"resources": [],
@@ -1551,7 +1551,7 @@ def test_status_simple_ok(emitter, store_mock, config, formatted):
},
{
"status": "open",
- "channel": "candidate",
+ "channel": "latest/candidate",
"version": "v7",
"revision": 7,
"resources": [],
@@ -1559,7 +1559,7 @@ def test_status_simple_ok(emitter, store_mock, config, formatted):
},
{
"status": "open",
- "channel": "beta",
+ "channel": "latest/beta",
"version": "2.0",
"revision": 80,
"resources": [],
@@ -1567,7 +1567,7 @@ def test_status_simple_ok(emitter, store_mock, config, formatted):
},
{
"status": "open",
- "channel": "edge",
+ "channel": "latest/edge",
"version": "git-0db35ea1",
"revision": 156,
"resources": [],
@@ -1629,7 +1629,7 @@ def test_status_channels_not_released_with_fallback(emitter, store_mock, config,
expected = [
{
"track": "latest",
- "channels": [
+ "mappings": [
{
"base": {
"name": "ubuntu",
@@ -1639,7 +1639,7 @@ def test_status_channels_not_released_with_fallback(emitter, store_mock, config,
"releases": [
{
"status": "open",
- "channel": "stable",
+ "channel": "latest/stable",
"version": "v7",
"revision": 7,
"resources": [],
@@ -1647,7 +1647,7 @@ def test_status_channels_not_released_with_fallback(emitter, store_mock, config,
},
{
"status": "tracking",
- "channel": "candidate",
+ "channel": "latest/candidate",
"version": None,
"revision": None,
"resources": None,
@@ -1655,7 +1655,7 @@ def test_status_channels_not_released_with_fallback(emitter, store_mock, config,
},
{
"status": "tracking",
- "channel": "beta",
+ "channel": "latest/beta",
"version": None,
"revision": None,
"resources": None,
@@ -1663,7 +1663,7 @@ def test_status_channels_not_released_with_fallback(emitter, store_mock, config,
},
{
"status": "open",
- "channel": "edge",
+ "channel": "latest/edge",
"version": "2.0",
"revision": 80,
"resources": [],
@@ -1711,7 +1711,7 @@ def test_status_channels_not_released_without_fallback(emitter, store_mock, conf
expected = [
{
"track": "latest",
- "channels": [
+ "mappings": [
{
"base": {
"name": "ubuntu",
@@ -1721,7 +1721,7 @@ def test_status_channels_not_released_without_fallback(emitter, store_mock, conf
"releases": [
{
"status": "closed",
- "channel": "stable",
+ "channel": "latest/stable",
"version": None,
"revision": None,
"resources": None,
@@ -1729,7 +1729,7 @@ def test_status_channels_not_released_without_fallback(emitter, store_mock, conf
},
{
"status": "closed",
- "channel": "candidate",
+ "channel": "latest/candidate",
"version": None,
"revision": None,
"resources": None,
@@ -1737,7 +1737,7 @@ def test_status_channels_not_released_without_fallback(emitter, store_mock, conf
},
{
"status": "open",
- "channel": "beta",
+ "channel": "latest/beta",
"version": "5.1",
"revision": 5,
"resources": [],
@@ -1745,7 +1745,7 @@ def test_status_channels_not_released_without_fallback(emitter, store_mock, conf
},
{
"status": "open",
- "channel": "edge",
+ "channel": "latest/edge",
"version": "almostready",
"revision": 12,
"resources": [],
@@ -1795,7 +1795,7 @@ def test_status_multiple_tracks(emitter, store_mock, config, formatted):
expected = [
{
"track": "latest",
- "channels": [
+ "mappings": [
{
"base": {
"name": "ubuntu",
@@ -1805,7 +1805,7 @@ def test_status_multiple_tracks(emitter, store_mock, config, formatted):
"releases": [
{
"status": "open",
- "channel": "stable",
+ "channel": "latest/stable",
"version": "7.5.3",
"revision": 503,
"resources": [],
@@ -1813,7 +1813,7 @@ def test_status_multiple_tracks(emitter, store_mock, config, formatted):
},
{
"status": "tracking",
- "channel": "candidate",
+ "channel": "latest/candidate",
"version": None,
"revision": None,
"resources": None,
@@ -1821,7 +1821,7 @@ def test_status_multiple_tracks(emitter, store_mock, config, formatted):
},
{
"status": "tracking",
- "channel": "beta",
+ "channel": "latest/beta",
"version": None,
"revision": None,
"resources": None,
@@ -1829,7 +1829,7 @@ def test_status_multiple_tracks(emitter, store_mock, config, formatted):
},
{
"status": "tracking",
- "channel": "edge",
+ "channel": "latest/edge",
"version": None,
"revision": None,
"resources": None,
@@ -1841,7 +1841,7 @@ def test_status_multiple_tracks(emitter, store_mock, config, formatted):
},
{
"track": "2.0",
- "channels": [
+ "mappings": [
{
"base": {
"name": "ubuntu",
@@ -1851,7 +1851,7 @@ def test_status_multiple_tracks(emitter, store_mock, config, formatted):
"releases": [
{
"status": "closed",
- "channel": "stable",
+ "channel": "2.0/stable",
"version": None,
"revision": None,
"resources": None,
@@ -1859,7 +1859,7 @@ def test_status_multiple_tracks(emitter, store_mock, config, formatted):
},
{
"status": "closed",
- "channel": "candidate",
+ "channel": "2.0/candidate",
"version": None,
"revision": None,
"resources": None,
@@ -1867,7 +1867,7 @@ def test_status_multiple_tracks(emitter, store_mock, config, formatted):
},
{
"status": "closed",
- "channel": "beta",
+ "channel": "2.0/beta",
"version": None,
"revision": None,
"resources": None,
@@ -1875,7 +1875,7 @@ def test_status_multiple_tracks(emitter, store_mock, config, formatted):
},
{
"status": "open",
- "channel": "edge",
+ "channel": "2.0/edge",
"version": "1",
"revision": 1,
"resources": [],
@@ -1991,7 +1991,7 @@ def test_status_with_one_branch(emitter, store_mock, config, formatted):
expected = [
{
"track": "latest",
- "channels": [
+ "mappings": [
{
"base": {
"name": "ubuntu",
@@ -2001,7 +2001,7 @@ def test_status_with_one_branch(emitter, store_mock, config, formatted):
"releases": [
{
"status": "closed",
- "channel": "stable",
+ "channel": "latest/stable",
"version": None,
"revision": None,
"resources": None,
@@ -2009,7 +2009,7 @@ def test_status_with_one_branch(emitter, store_mock, config, formatted):
},
{
"status": "closed",
- "channel": "candidate",
+ "channel": "latest/candidate",
"version": None,
"revision": None,
"resources": None,
@@ -2017,7 +2017,7 @@ def test_status_with_one_branch(emitter, store_mock, config, formatted):
},
{
"status": "open",
- "channel": "beta",
+ "channel": "latest/beta",
"version": "5.1",
"revision": 5,
"resources": [],
@@ -2025,7 +2025,7 @@ def test_status_with_one_branch(emitter, store_mock, config, formatted):
},
{
"status": "tracking",
- "channel": "edge",
+ "channel": "latest/edge",
"version": None,
"revision": None,
"resources": None,
@@ -2033,7 +2033,7 @@ def test_status_with_one_branch(emitter, store_mock, config, formatted):
},
{
"status": "open",
- "channel": "beta/mybranch",
+ "channel": "latest/beta/mybranch",
"version": "ver.12",
"revision": 12,
"resources": [],
@@ -2132,7 +2132,7 @@ def test_status_with_resources(emitter, store_mock, config, formatted):
expected = [
{
"track": "latest",
- "channels": [
+ "mappings": [
{
"base": {
"name": "ubuntu",
@@ -2142,7 +2142,7 @@ def test_status_with_resources(emitter, store_mock, config, formatted):
"releases": [
{
"status": "closed",
- "channel": "stable",
+ "channel": "latest/stable",
"version": None,
"revision": None,
"resources": None,
@@ -2150,7 +2150,7 @@ def test_status_with_resources(emitter, store_mock, config, formatted):
},
{
"status": "open",
- "channel": "candidate",
+ "channel": "latest/candidate",
"version": "5.1",
"revision": 5,
"resources": [
@@ -2161,7 +2161,7 @@ def test_status_with_resources(emitter, store_mock, config, formatted):
},
{
"status": "open",
- "channel": "beta",
+ "channel": "latest/beta",
"version": "5.1",
"revision": 5,
"resources": [{"name": "resource1", "revision": 1}],
@@ -2169,7 +2169,7 @@ def test_status_with_resources(emitter, store_mock, config, formatted):
},
{
"status": "tracking",
- "channel": "edge",
+ "channel": "latest/edge",
"version": None,
"revision": None,
"resources": None,
@@ -2292,7 +2292,7 @@ def test_status_multiplebases_single_track(emitter, store_mock, config, formatte
expected = [
{
"track": "latest",
- "channels": [
+ "mappings": [
{
"base": {
"name": "ubuntu",
@@ -2302,7 +2302,7 @@ def test_status_multiplebases_single_track(emitter, store_mock, config, formatte
"releases": [
{
"status": "closed",
- "channel": "stable",
+ "channel": "latest/stable",
"version": None,
"revision": None,
"resources": None,
@@ -2310,7 +2310,7 @@ def test_status_multiplebases_single_track(emitter, store_mock, config, formatte
},
{
"status": "open",
- "channel": "candidate",
+ "channel": "latest/candidate",
"version": "v7",
"revision": 7,
"resources": [],
@@ -2318,7 +2318,7 @@ def test_status_multiplebases_single_track(emitter, store_mock, config, formatte
},
{
"status": "tracking",
- "channel": "beta",
+ "channel": "latest/beta",
"version": None,
"revision": None,
"resources": None,
@@ -2326,7 +2326,7 @@ def test_status_multiplebases_single_track(emitter, store_mock, config, formatte
},
{
"status": "open",
- "channel": "edge",
+ "channel": "latest/edge",
"version": "git-0db35ea1",
"revision": 156,
"resources": [],
@@ -2343,7 +2343,7 @@ def test_status_multiplebases_single_track(emitter, store_mock, config, formatte
"releases": [
{
"status": "open",
- "channel": "stable",
+ "channel": "latest/stable",
"version": "v7",
"revision": 7,
"resources": [],
@@ -2351,7 +2351,7 @@ def test_status_multiplebases_single_track(emitter, store_mock, config, formatte
},
{
"status": "tracking",
- "channel": "candidate",
+ "channel": "latest/candidate",
"version": None,
"revision": None,
"resources": None,
@@ -2359,7 +2359,7 @@ def test_status_multiplebases_single_track(emitter, store_mock, config, formatte
},
{
"status": "open",
- "channel": "beta",
+ "channel": "latest/beta",
"version": "2.0",
"revision": 80,
"resources": [],
@@ -2367,7 +2367,7 @@ def test_status_multiplebases_single_track(emitter, store_mock, config, formatte
},
{
"status": "tracking",
- "channel": "edge",
+ "channel": "latest/edge",
"version": None,
"revision": None,
"resources": None,
@@ -2561,13 +2561,13 @@ def test_status_with_base_in_none(emitter, store_mock, config, formatted):
expected = [
{
"track": "latest",
- "channels": [
+ "mappings": [
{
"base": None,
"releases": [
{
"status": "open",
- "channel": "stable",
+ "channel": "latest/stable",
"version": "v7",
"revision": 7,
"resources": [],
@@ -2575,7 +2575,7 @@ def test_status_with_base_in_none(emitter, store_mock, config, formatted):
},
{
"status": "open",
- "channel": "candidate",
+ "channel": "latest/candidate",
"version": "v7",
"revision": 7,
"resources": [],
@@ -2583,7 +2583,7 @@ def test_status_with_base_in_none(emitter, store_mock, config, formatted):
},
{
"status": "tracking",
- "channel": "beta",
+ "channel": "latest/beta",
"version": None,
"revision": None,
"resources": None,
@@ -2591,7 +2591,7 @@ def test_status_with_base_in_none(emitter, store_mock, config, formatted):
},
{
"status": "tracking",
- "channel": "edge",
+ "channel": "latest/edge",
"version": None,
"revision": None,
"resources": None,
| Update `charmcraft status charmname --format json` output terminology
An example (truncated) output from `charmcraft status metacontroller-operator --format json` is given by:
```
charmcraft status metacontroller-operator --format json
[
{
"track": "latest",
"channels": [ # <------------ A) Not sure what this should be
{
"base": {
"name": "ubuntu",
"channel": "20.04",
"architecture": "amd64"
},
"releases": [
{
"status": "open",
"channel": "stable", # <---------- B) Should this be "risk"?
"version": "2",
"revision": 2,
"resources": [],
"expires_at": null
},
...
}
]
}
]
},
]
```
The name `channel` is used twice, neither of which afaik describes a channel ([snapcraft definition](https://snapcraft.io/docs/channels), [charmhub definition](https://discourse.charmhub.io/t/channel/6562). Pointing to the letters added above:
* A) I'm not sure what this would be, but I don't think it is a channel because it excludes `track`
* B) I think this is `risk`, not a `channel` | 0.0 | 46cd23688b8d4ad0589ca95a987d597689d7b84d | [
"tests/commands/test_store_commands.py::test_status_simple_ok[json]",
"tests/commands/test_store_commands.py::test_status_channels_not_released_with_fallback[json]",
"tests/commands/test_store_commands.py::test_status_channels_not_released_without_fallback[json]",
"tests/commands/test_store_commands.py::test_status_multiple_tracks[json]",
"tests/commands/test_store_commands.py::test_status_with_one_branch[json]",
"tests/commands/test_store_commands.py::test_status_with_resources[json]",
"tests/commands/test_store_commands.py::test_status_multiplebases_single_track[json]",
"tests/commands/test_store_commands.py::test_status_with_base_in_none[json]"
]
| [
"tests/commands/test_store_commands.py::test_get_name_from_metadata_ok",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_no_file",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_bad_content_garbage",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_bad_content_no_name",
"tests/commands/test_store_commands.py::test_login_simple",
"tests/commands/test_store_commands.py::test_login_exporting",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[charm]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[bundle]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[permission]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[channel]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[ttl]",
"tests/commands/test_store_commands.py::test_login_restricting_ttl",
"tests/commands/test_store_commands.py::test_login_restricting_channels",
"tests/commands/test_store_commands.py::test_login_restricting_permissions",
"tests/commands/test_store_commands.py::test_login_restricting_permission_invalid",
"tests/commands/test_store_commands.py::test_login_restricting_charms",
"tests/commands/test_store_commands.py::test_login_restricting_bundles",
"tests/commands/test_store_commands.py::test_login_restriction_mix",
"tests/commands/test_store_commands.py::test_logout",
"tests/commands/test_store_commands.py::test_logout_but_not_logged_in",
"tests/commands/test_store_commands.py::test_whoami[None]",
"tests/commands/test_store_commands.py::test_whoami[json]",
"tests/commands/test_store_commands.py::test_whoami_but_not_logged_in[None]",
"tests/commands/test_store_commands.py::test_whoami_but_not_logged_in[json]",
"tests/commands/test_store_commands.py::test_whoami_with_channels[None]",
"tests/commands/test_store_commands.py::test_whoami_with_channels[json]",
"tests/commands/test_store_commands.py::test_whoami_with_charms[None]",
"tests/commands/test_store_commands.py::test_whoami_with_charms[json]",
"tests/commands/test_store_commands.py::test_whoami_with_bundles[None]",
"tests/commands/test_store_commands.py::test_whoami_with_bundles[json]",
"tests/commands/test_store_commands.py::test_whoami_comprehensive",
"tests/commands/test_store_commands.py::test_register_charm_name",
"tests/commands/test_store_commands.py::test_register_bundle_name",
"tests/commands/test_store_commands.py::test_list_registered_empty[None]",
"tests/commands/test_store_commands.py::test_list_registered_empty[json]",
"tests/commands/test_store_commands.py::test_list_registered_one_private[None]",
"tests/commands/test_store_commands.py::test_list_registered_one_private[json]",
"tests/commands/test_store_commands.py::test_list_registered_one_public[None]",
"tests/commands/test_store_commands.py::test_list_registered_one_public[json]",
"tests/commands/test_store_commands.py::test_list_registered_several[None]",
"tests/commands/test_store_commands.py::test_list_registered_several[json]",
"tests/commands/test_store_commands.py::test_get_name_bad_zip",
"tests/commands/test_store_commands.py::test_get_name_charm_ok",
"tests/commands/test_store_commands.py::test_get_name_charm_bad_metadata[=]",
"tests/commands/test_store_commands.py::test_get_name_charm_bad_metadata[foo:",
"tests/commands/test_store_commands.py::test_get_name_bundle_ok",
"tests/commands/test_store_commands.py::test_get_name_bundle_bad_data[=]",
"tests/commands/test_store_commands.py::test_get_name_bundle_bad_data[foo:",
"tests/commands/test_store_commands.py::test_get_name_nor_charm_nor_bundle",
"tests/commands/test_store_commands.py::test_upload_parameters_filepath_type",
"tests/commands/test_store_commands.py::test_upload_call_ok[None]",
"tests/commands/test_store_commands.py::test_upload_call_ok[json]",
"tests/commands/test_store_commands.py::test_upload_call_error[None]",
"tests/commands/test_store_commands.py::test_upload_call_error[json]",
"tests/commands/test_store_commands.py::test_upload_call_ok_including_release[None]",
"tests/commands/test_store_commands.py::test_upload_call_ok_including_release[json]",
"tests/commands/test_store_commands.py::test_upload_call_ok_including_release_multiple",
"tests/commands/test_store_commands.py::test_upload_including_release_with_resources[None]",
"tests/commands/test_store_commands.py::test_upload_including_release_with_resources[json]",
"tests/commands/test_store_commands.py::test_upload_options_resource",
"tests/commands/test_store_commands.py::test_upload_call_error_including_release",
"tests/commands/test_store_commands.py::test_upload_charm_with_init_template_todo_token",
"tests/commands/test_store_commands.py::test_upload_with_different_name_than_in_metadata",
"tests/commands/test_store_commands.py::test_revisions_simple[None]",
"tests/commands/test_store_commands.py::test_revisions_simple[json]",
"tests/commands/test_store_commands.py::test_revisions_empty[None]",
"tests/commands/test_store_commands.py::test_revisions_empty[json]",
"tests/commands/test_store_commands.py::test_revisions_ordered_by_revision[None]",
"tests/commands/test_store_commands.py::test_revisions_ordered_by_revision[json]",
"tests/commands/test_store_commands.py::test_revisions_version_null[None]",
"tests/commands/test_store_commands.py::test_revisions_version_null[json]",
"tests/commands/test_store_commands.py::test_revisions_errors_simple[None]",
"tests/commands/test_store_commands.py::test_revisions_errors_simple[json]",
"tests/commands/test_store_commands.py::test_revisions_errors_multiple[None]",
"tests/commands/test_store_commands.py::test_revisions_errors_multiple[json]",
"tests/commands/test_store_commands.py::test_release_simple_ok",
"tests/commands/test_store_commands.py::test_release_simple_multiple_channels",
"tests/commands/test_store_commands.py::test_release_including_resources",
"tests/commands/test_store_commands.py::test_release_options_resource",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs0-expected_parsed0]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs1-expected_parsed1]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs2-expected_parsed2]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs3-expected_parsed3]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs4-expected_parsed4]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs5-expected_parsed5]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs6-expected_parsed6]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs0]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs1]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs2]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs3]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs4]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs5]",
"tests/commands/test_store_commands.py::test_close_simple_ok",
"tests/commands/test_store_commands.py::test_status_simple_ok[None]",
"tests/commands/test_store_commands.py::test_status_empty[None]",
"tests/commands/test_store_commands.py::test_status_empty[json]",
"tests/commands/test_store_commands.py::test_status_channels_not_released_with_fallback[None]",
"tests/commands/test_store_commands.py::test_status_channels_not_released_without_fallback[None]",
"tests/commands/test_store_commands.py::test_status_multiple_tracks[None]",
"tests/commands/test_store_commands.py::test_status_tracks_order",
"tests/commands/test_store_commands.py::test_status_with_one_branch[None]",
"tests/commands/test_store_commands.py::test_status_with_multiple_branches",
"tests/commands/test_store_commands.py::test_status_with_resources[None]",
"tests/commands/test_store_commands.py::test_status_with_resources_missing_after_closed_channel",
"tests/commands/test_store_commands.py::test_status_with_resources_and_branches",
"tests/commands/test_store_commands.py::test_status_multiplebases_single_track[None]",
"tests/commands/test_store_commands.py::test_status_multiplebases_multiple_tracks",
"tests/commands/test_store_commands.py::test_status_multiplebases_everything_combined",
"tests/commands/test_store_commands.py::test_status_with_base_in_none[None]",
"tests/commands/test_store_commands.py::test_status_unreleased_track",
"tests/commands/test_store_commands.py::test_createlib_simple[None]",
"tests/commands/test_store_commands.py::test_createlib_simple[json]",
"tests/commands/test_store_commands.py::test_createlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_createlib_name_contains_dash",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[foo.bar]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[foo/bar]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[Foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[123foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[_foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[]",
"tests/commands/test_store_commands.py::test_createlib_path_already_there",
"tests/commands/test_store_commands.py::test_createlib_library_template_is_python",
"tests/commands/test_store_commands.py::test_publishlib_simple[None]",
"tests/commands/test_store_commands.py::test_publishlib_simple[json]",
"tests/commands/test_store_commands.py::test_publishlib_contains_dash",
"tests/commands/test_store_commands.py::test_publishlib_all[None]",
"tests/commands/test_store_commands.py::test_publishlib_all[json]",
"tests/commands/test_store_commands.py::test_publishlib_not_found",
"tests/commands/test_store_commands.py::test_publishlib_not_from_current_charm",
"tests/commands/test_store_commands.py::test_publishlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_publishlib_store_is_advanced[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_advanced[json]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_exactly_behind_ok",
"tests/commands/test_store_commands.py::test_publishlib_store_is_exactly_behind_same_hash[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_exactly_behind_same_hash[json]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_too_behind[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_too_behind[json]",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_same_hash[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_same_hash[json]",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_other_hash[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_other_hash[json]",
"tests/commands/test_store_commands.py::test_getlibinfo_success_simple",
"tests/commands/test_store_commands.py::test_getlibinfo_success_content",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[charms.testcharm.v3.testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[charms.testcharm.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[testcharm.v2.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[mycharms.testcharm.v2.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_not_importable_charm_name",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/v3/testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/v3/testlib.html]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/v3/testlib.]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[testcharm/v2/testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[mycharms/testcharm/v2/testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.v-three.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.v-3.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.3.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.vX.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_missing_library_from_name",
"tests/commands/test_store_commands.py::test_getlibinfo_missing_library_from_path",
"tests/commands/test_store_commands.py::test_getlibinfo_malformed_metadata_field",
"tests/commands/test_store_commands.py::test_getlibinfo_missing_metadata_field",
"tests/commands/test_store_commands.py::test_getlibinfo_api_not_int",
"tests/commands/test_store_commands.py::test_getlibinfo_api_negative",
"tests/commands/test_store_commands.py::test_getlibinfo_patch_not_int",
"tests/commands/test_store_commands.py::test_getlibinfo_patch_negative",
"tests/commands/test_store_commands.py::test_getlibinfo_api_patch_both_zero",
"tests/commands/test_store_commands.py::test_getlibinfo_metadata_api_different_path_api",
"tests/commands/test_store_commands.py::test_getlibinfo_libid_non_string",
"tests/commands/test_store_commands.py::test_getlibinfo_libid_non_ascii",
"tests/commands/test_store_commands.py::test_getlibinfo_libid_empty",
"tests/commands/test_store_commands.py::test_fetchlib_simple_downloaded[None]",
"tests/commands/test_store_commands.py::test_fetchlib_simple_downloaded[json]",
"tests/commands/test_store_commands.py::test_fetchlib_simple_dash_in_name",
"tests/commands/test_store_commands.py::test_fetchlib_simple_dash_in_name_on_disk",
"tests/commands/test_store_commands.py::test_fetchlib_simple_updated",
"tests/commands/test_store_commands.py::test_fetchlib_all[None]",
"tests/commands/test_store_commands.py::test_fetchlib_all[json]",
"tests/commands/test_store_commands.py::test_fetchlib_store_not_found[None]",
"tests/commands/test_store_commands.py::test_fetchlib_store_not_found[json]",
"tests/commands/test_store_commands.py::test_fetchlib_store_is_old[None]",
"tests/commands/test_store_commands.py::test_fetchlib_store_is_old[json]",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_same_hash[None]",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_same_hash[json]",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_different_hash[None]",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_different_hash[json]",
"tests/commands/test_store_commands.py::test_listlib_simple[None]",
"tests/commands/test_store_commands.py::test_listlib_simple[json]",
"tests/commands/test_store_commands.py::test_listlib_charm_from_metadata",
"tests/commands/test_store_commands.py::test_listlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_listlib_empty[None]",
"tests/commands/test_store_commands.py::test_listlib_empty[json]",
"tests/commands/test_store_commands.py::test_listlib_properly_sorted[None]",
"tests/commands/test_store_commands.py::test_listlib_properly_sorted[json]",
"tests/commands/test_store_commands.py::test_resources_simple[None]",
"tests/commands/test_store_commands.py::test_resources_simple[json]",
"tests/commands/test_store_commands.py::test_resources_empty[None]",
"tests/commands/test_store_commands.py::test_resources_empty[json]",
"tests/commands/test_store_commands.py::test_resources_ordered_and_grouped[None]",
"tests/commands/test_store_commands.py::test_resources_ordered_and_grouped[json]",
"tests/commands/test_store_commands.py::test_uploadresource_options_filepath_type",
"tests/commands/test_store_commands.py::test_uploadresource_options_image_type",
"tests/commands/test_store_commands.py::test_uploadresource_options_good_combinations[sysargs0]",
"tests/commands/test_store_commands.py::test_uploadresource_options_good_combinations[sysargs1]",
"tests/commands/test_store_commands.py::test_uploadresource_options_bad_combinations[sysargs0]",
"tests/commands/test_store_commands.py::test_uploadresource_options_bad_combinations[sysargs1]",
"tests/commands/test_store_commands.py::test_uploadresource_filepath_call_ok[None]",
"tests/commands/test_store_commands.py::test_uploadresource_filepath_call_ok[json]",
"tests/commands/test_store_commands.py::test_uploadresource_image_call_already_uploaded[None]",
"tests/commands/test_store_commands.py::test_uploadresource_image_call_already_uploaded[json]",
"tests/commands/test_store_commands.py::test_uploadresource_image_call_upload_from_local",
"tests/commands/test_store_commands.py::test_uploadresource_image_call_missing_everywhere",
"tests/commands/test_store_commands.py::test_uploadresource_call_error[None]",
"tests/commands/test_store_commands.py::test_uploadresource_call_error[json]",
"tests/commands/test_store_commands.py::test_resourcerevisions_simple[None]",
"tests/commands/test_store_commands.py::test_resourcerevisions_simple[json]",
"tests/commands/test_store_commands.py::test_resourcerevisions_empty[None]",
"tests/commands/test_store_commands.py::test_resourcerevisions_empty[json]",
"tests/commands/test_store_commands.py::test_resourcerevisions_ordered_by_revision[None]",
"tests/commands/test_store_commands.py::test_resourcerevisions_ordered_by_revision[json]"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2022-07-05 12:15:06+00:00 | apache-2.0 | 1,469 |
|
canonical__charmcraft-866 | diff --git a/charmcraft/reactive_plugin.py b/charmcraft/reactive_plugin.py
index bc50b20..337ac49 100644
--- a/charmcraft/reactive_plugin.py
+++ b/charmcraft/reactive_plugin.py
@@ -14,6 +14,7 @@
"""Charmcraft's reactive plugin for craft-parts."""
+import json
import shlex
import subprocess
import sys
@@ -61,14 +62,19 @@ class ReactivePluginEnvironmentValidator(plugins.validator.PluginEnvironmentVali
:raises PluginEnvironmentValidationError: If the environment is invalid.
"""
try:
- output = self._execute("charm version").strip()
- _, tools_version = output.split("\n")
-
- if not tools_version.startswith("charm-tools"):
+ version_data = json.loads(self._execute("charm version --format json"))
+
+ tool_name = "charm-tools"
+ if not (
+ tool_name in version_data
+ and "version" in version_data[tool_name]
+ and "git" in version_data[tool_name]
+ ):
raise PluginEnvironmentValidationError(
part_name=self._part_name,
- reason=f"invalid charm tools version {tools_version}",
+ reason=f"invalid charm tools version {version_data}",
)
+ tools_version = f"{tool_name} {version_data[tool_name]['version']} ({version_data[tool_name]['git']})"
emit.debug(f"found {tools_version}")
except ValueError as err:
raise PluginEnvironmentValidationError(
| canonical/charmcraft | 7790c04425256960b954ec62747154f69402ec8a | diff --git a/tests/test_reactive_plugin.py b/tests/test_reactive_plugin.py
index 8e12027..bea2715 100644
--- a/tests/test_reactive_plugin.py
+++ b/tests/test_reactive_plugin.py
@@ -36,7 +36,7 @@ def charm_exe(tmp_path):
charm_bin = pathlib.Path(tmp_path, "mock_bin", "charm")
charm_bin.parent.mkdir(exist_ok=True)
charm_bin.write_text(
- '#!/bin/sh\necho "charmstore-client 2.5.1"\necho "charm-tools version 2.8.2"'
+ '#!/bin/sh\necho \'{"charm-tools": {"version": "2.8.4", "git": "+git-7-6126e17", "gitn": 7, "gitsha": "6126e17", "pre_release": false, "snap": "+snap-x12"}}\''
)
charm_bin.chmod(0o755)
yield charm_bin
| reactive plugin charm tools version validation code is britle
The charm tools may soon remove the now redundant legacy charm store code, when that happens the output from the version command may no longer use the format expected here:
https://github.com/canonical/charmcraft/blob/dbf82a0fbaa6633248bb213d3943603c1e7c50a6/charmcraft/reactive_plugin.py#L64-L65
I would recommend either using the `--format json` argument or looking for the `charm-tools` string in the output rather than relying on there being a newline character there.
Ref: https://github.com/juju/charm-tools/blob/17812f0e00ad634f8440053d43d1a5370a71fa50/charmtools/version.py#L15-L25 | 0.0 | 7790c04425256960b954ec62747154f69402ec8a | [
"tests/test_reactive_plugin.py::TestReactivePlugin::test_validate_environment"
]
| [
"tests/test_reactive_plugin.py::TestReactivePlugin::test_get_build_package",
"tests/test_reactive_plugin.py::TestReactivePlugin::test_get_build_snaps",
"tests/test_reactive_plugin.py::TestReactivePlugin::test_get_build_environment",
"tests/test_reactive_plugin.py::TestReactivePlugin::test_get_build_commands",
"tests/test_reactive_plugin.py::TestReactivePlugin::test_invalid_properties",
"tests/test_reactive_plugin.py::TestReactivePlugin::test_validate_environment_with_charm_part",
"tests/test_reactive_plugin.py::TestReactivePlugin::test_validate_missing_charm",
"tests/test_reactive_plugin.py::TestReactivePlugin::test_validate_broken_charm",
"tests/test_reactive_plugin.py::test_build",
"tests/test_reactive_plugin.py::test_build_charm_proof_raises_error_messages",
"tests/test_reactive_plugin.py::test_build_charm_proof_raises_warning_messages_does_not_raise",
"tests/test_reactive_plugin.py::test_build_charm_build_raises_error_messages",
"tests/test_reactive_plugin.py::test_build_charm_build_raises_warning_messages_does_not_raise"
]
| {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | 2022-09-15 13:19:53+00:00 | apache-2.0 | 1,470 |
|
canonical__charmcraft-869 | diff --git a/charmcraft/reactive_plugin.py b/charmcraft/reactive_plugin.py
index 5dcc4c3..350dcb3 100644
--- a/charmcraft/reactive_plugin.py
+++ b/charmcraft/reactive_plugin.py
@@ -21,7 +21,6 @@ import sys
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, cast
-from craft_cli import emit
from craft_parts import plugins
from craft_parts.errors import PluginEnvironmentValidationError
@@ -76,7 +75,7 @@ class ReactivePluginEnvironmentValidator(plugins.validator.PluginEnvironmentVali
reason=f"invalid charm tools version {version_data}",
)
tools_version = f"{tool_name} {version_data[tool_name]['version']} ({version_data[tool_name]['git']})"
- emit.debug(f"found {tools_version}")
+ print(f"found {tools_version}")
except ValueError as err:
raise PluginEnvironmentValidationError(
part_name=self._part_name,
@@ -140,6 +139,26 @@ class ReactivePlugin(plugins.Plugin):
return [" ".join(shlex.quote(i) for i in command)]
+def run_charm_tool(args: List[str]):
+ """Run the charm tool, log and check exit code."""
+ result_classification = "SUCCESS"
+ exc = None
+
+ print(f"charm tool execution command={args}")
+ try:
+ completed_process = subprocess.run(args, check=True)
+ except subprocess.CalledProcessError as call_error:
+ exc = call_error
+ if call_error.returncode < 100 or call_error.returncode >= 200:
+ result_classification = "ERROR"
+ raise
+ result_classification = "WARNING"
+ finally:
+ print(
+ f"charm tool execution {result_classification}: returncode={exc.returncode if exc else completed_process.returncode}"
+ )
+
+
def build(
*, charm_name: str, build_dir: Path, install_dir: Path, charm_build_arguments: List[str]
):
@@ -161,11 +180,11 @@ def build(
string produced by `charm` would be misleading.
"""
# Verify the charm is ok from a charm tool point of view.
+
try:
- subprocess.run(["charm", "proof"], check=True)
+ run_charm_tool(["charm", "proof"])
except subprocess.CalledProcessError as call_error:
- if call_error.returncode >= 200:
- return call_error.returncode
+ return call_error.returncode
# Link the installation directory to the place where charm creates
# the charm.
@@ -179,10 +198,9 @@ def build(
cmd.extend(["-o", build_dir])
try:
- subprocess.run(cmd, check=True)
+ run_charm_tool(cmd)
except subprocess.CalledProcessError as call_error:
- if call_error.returncode >= 200:
- return call_error.returncode
+ return call_error.returncode
finally:
charm_build_dir.unlink()
| canonical/charmcraft | a95394f28ff6c0b974d39baa44f6d34530fd6a5f | diff --git a/tests/test_reactive_plugin.py b/tests/test_reactive_plugin.py
index 6322e08..d2a2d85 100644
--- a/tests/test_reactive_plugin.py
+++ b/tests/test_reactive_plugin.py
@@ -16,7 +16,7 @@
import pathlib
import sys
-from subprocess import CalledProcessError
+from subprocess import CalledProcessError, CompletedProcess
from unittest.mock import call, patch
import craft_parts
@@ -173,6 +173,7 @@ def fake_run():
def test_build(build_dir, install_dir, fake_run):
+ fake_run.return_value = CompletedProcess(("charm", "build"), 0)
returncode = reactive_plugin.build(
charm_name="test-charm",
build_dir=build_dir,
@@ -248,7 +249,20 @@ def test_build_charm_proof_raises_warning_messages_does_not_raise(
def test_build_charm_build_raises_error_messages(build_dir, install_dir, fake_run):
- fake_run.side_effect = [None, CalledProcessError(200, "E: name missing")]
+ def _run_generator():
+ """Passing an iterable to `side_effect` pivots the mocks return_value,
+ and does not allow us to raise an actual exception.
+
+ Thus we need this helper to accomplish this.
+
+ Ref: https://docs.python.org/3/library/unittest.mock-examples.html#side-effect-functions-and-iterables
+ """
+ yield CompletedProcess(("charm", "proof"), 0)
+ yield CalledProcessError(200, "E: name missing")
+ yield CompletedProcess(("charm", "proof"), 0)
+ yield CalledProcessError(-1, "E: name missing")
+
+ fake_run.side_effect = _run_generator()
returncode = reactive_plugin.build(
charm_name="test-charm",
@@ -275,11 +289,31 @@ def test_build_charm_build_raises_error_messages(build_dir, install_dir, fake_ru
),
]
+ # Also ensure negative return codes raises error
+ returncode = reactive_plugin.build(
+ charm_name="test-charm",
+ build_dir=build_dir,
+ install_dir=install_dir,
+ charm_build_arguments=["--charm-argument", "--charm-argument-with", "argument"],
+ )
+ assert returncode == -1
+
def test_build_charm_build_raises_warning_messages_does_not_raise(
build_dir, install_dir, fake_run
):
- fake_run.side_effect = [None, CalledProcessError(100, "W: Description is not pretty")]
+ def _run_generator():
+ """Passing an iterable to `side_effect` pivots the mocks return_value,
+ and does not allow us to raise an actual exception.
+
+ Thus we need this helper to accomplish this.
+
+ Ref: https://docs.python.org/3/library/unittest.mock-examples.html#side-effect-functions-and-iterables
+ """
+ yield CompletedProcess(("charm", "proof"), 0)
+ yield CalledProcessError(100, "W: Description is not pretty")
+
+ fake_run.side_effect = _run_generator()
returncode = reactive_plugin.build(
charm_name="test-charm",
| The reactive plugin is not correctly checking if `charm` failed
The `charm` tool used by the reactive plugin has a complex return code specification.
While Charmcraft is correctly checking that the return code >= 200 is an error, it's failing to verify that <0 is also an error.
Furthermore, it should log in debug the return code from the execution (no matter which one it is).
All said above applies for both `charm` calls. | 0.0 | a95394f28ff6c0b974d39baa44f6d34530fd6a5f | [
"tests/test_reactive_plugin.py::test_build_charm_build_raises_error_messages"
]
| [
"tests/test_reactive_plugin.py::TestReactivePlugin::test_get_build_package",
"tests/test_reactive_plugin.py::TestReactivePlugin::test_get_build_snaps",
"tests/test_reactive_plugin.py::TestReactivePlugin::test_get_build_environment",
"tests/test_reactive_plugin.py::TestReactivePlugin::test_get_build_commands",
"tests/test_reactive_plugin.py::TestReactivePlugin::test_invalid_properties",
"tests/test_reactive_plugin.py::TestReactivePlugin::test_validate_environment",
"tests/test_reactive_plugin.py::TestReactivePlugin::test_validate_environment_with_charm_part",
"tests/test_reactive_plugin.py::TestReactivePlugin::test_validate_missing_charm",
"tests/test_reactive_plugin.py::TestReactivePlugin::test_validate_broken_charm",
"tests/test_reactive_plugin.py::test_build",
"tests/test_reactive_plugin.py::test_build_charm_proof_raises_error_messages",
"tests/test_reactive_plugin.py::test_build_charm_proof_raises_warning_messages_does_not_raise",
"tests/test_reactive_plugin.py::test_build_charm_build_raises_warning_messages_does_not_raise"
]
| {
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2022-09-16 13:56:09+00:00 | apache-2.0 | 1,471 |
|
canonical__charmcraft-903 | diff --git a/charmcraft/commands/store/__init__.py b/charmcraft/commands/store/__init__.py
index 26ab8dd..cbccb21 100644
--- a/charmcraft/commands/store/__init__.py
+++ b/charmcraft/commands/store/__init__.py
@@ -401,6 +401,10 @@ class ListNamesCommand(BaseCommand):
seen by any user, while `private` items are only for you and the
other accounts with permission to collaborate on that specific name.
+ The --include-collaborations option can be included to also list those
+ names you collaborate with; in that case the publisher will be included
+ in the output.
+
Listing names will take you through login if needed.
"""
)
@@ -409,26 +413,36 @@ class ListNamesCommand(BaseCommand):
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
self.include_format_option(parser)
+ parser.add_argument(
+ "--include-collaborations",
+ action="store_true",
+ help="Include the names you are a collaborator of",
+ )
def run(self, parsed_args):
"""Run the command."""
store = Store(self.config.charmhub)
- result = store.list_registered_names()
+ with_collab = parsed_args.include_collaborations
+ result = store.list_registered_names(include_collaborations=with_collab)
# build the structure that we need for both human and programmatic output
headers = ["Name", "Type", "Visibility", "Status"]
prog_keys = ["name", "type", "visibility", "status"]
+ if with_collab:
+ headers.append("Publisher")
+ prog_keys.append("publisher")
data = []
for item in result:
visibility = "private" if item.private else "public"
- data.append(
- [
- item.name,
- item.entity_type,
- visibility,
- item.status,
- ]
- )
+ datum = [
+ item.name,
+ item.entity_type,
+ visibility,
+ item.status,
+ ]
+ if with_collab:
+ datum.append(item.publisher_display_name)
+ data.append(datum)
if parsed_args.format:
info = [dict(zip(prog_keys, item)) for item in data]
diff --git a/charmcraft/commands/store/store.py b/charmcraft/commands/store/store.py
index ed887e2..dd6fe74 100644
--- a/charmcraft/commands/store/store.py
+++ b/charmcraft/commands/store/store.py
@@ -33,7 +33,7 @@ from charmcraft.commands.store.client import Client, ALTERNATE_AUTH_ENV_VAR
Account = namedtuple("Account", "name username id")
Package = namedtuple("Package", "id name type")
MacaroonInfo = namedtuple("MacaroonInfo", "account channels packages permissions")
-Entity = namedtuple("Charm", "entity_type name private status")
+Entity = namedtuple("Charm", "entity_type name private status publisher_display_name")
Uploaded = namedtuple("Uploaded", "ok status revision errors")
# XXX Facundo 2020-07-23: Need to do a massive rename to call `revno` to the "revision as
# the number" inside the "revision as the structure", this gets super confusing in the code with
@@ -246,9 +246,12 @@ class Store:
)
@_store_client_wrapper()
- def list_registered_names(self):
+ def list_registered_names(self, include_collaborations):
"""Return names registered by the authenticated user."""
- response = self._client.request_urlpath_json("GET", "/v1/charm")
+ endpoint = "/v1/charm"
+ if include_collaborations:
+ endpoint += "?include-collaborations=true"
+ response = self._client.request_urlpath_json("GET", endpoint)
result = []
for item in response["results"]:
result.append(
@@ -257,6 +260,7 @@ class Store:
private=item["private"],
status=item["status"],
entity_type=item["type"],
+ publisher_display_name=item["publisher"]["display-name"],
)
)
return result
diff --git a/completion.bash b/completion.bash
index baec579..512d0dc 100644
--- a/completion.bash
+++ b/completion.bash
@@ -121,7 +121,7 @@ _charmcraft()
COMPREPLY=( $(compgen -W "${globals[*]} --format" -- "$cur") )
;;
names)
- COMPREPLY=( $(compgen -W "${globals[*]} --format" -- "$cur") )
+ COMPREPLY=( $(compgen -W "${globals[*]} --format --include-collaborations" -- "$cur") )
;;
revisions)
COMPREPLY=( $(compgen -W "${globals[*]} --format" -- "$cur") )
| canonical/charmcraft | 96308c3ebaf7e27e3a5b9ac83b5a96f10b282c2a | diff --git a/tests/commands/test_store_api.py b/tests/commands/test_store_api.py
index 94cc574..c46ac46 100644
--- a/tests/commands/test_store_api.py
+++ b/tests/commands/test_store_api.py
@@ -488,7 +488,7 @@ def test_list_registered_names_empty(client_mock, config):
auth_response = {"results": []}
client_mock.request_urlpath_json.return_value = auth_response
- result = store.list_registered_names()
+ result = store.list_registered_names(include_collaborations=False)
assert client_mock.mock_calls == [call.request_urlpath_json("GET", "/v1/charm")]
assert result == []
@@ -498,15 +498,28 @@ def test_list_registered_names_multiple(client_mock, config):
"""List registered names getting a multiple response."""
store = Store(config.charmhub)
+ publisher = {"display-name": "J. Doe", "other-info": "a lot"}
auth_response = {
"results": [
- {"name": "name1", "type": "charm", "private": False, "status": "status1"},
- {"name": "name2", "type": "bundle", "private": True, "status": "status2"},
+ {
+ "name": "name1",
+ "type": "charm",
+ "private": False,
+ "status": "status1",
+ "publisher": publisher,
+ },
+ {
+ "name": "name2",
+ "type": "bundle",
+ "private": True,
+ "status": "status2",
+ "publisher": publisher,
+ },
]
}
client_mock.request_urlpath_json.return_value = auth_response
- result = store.list_registered_names()
+ result = store.list_registered_names(include_collaborations=False)
assert client_mock.mock_calls == [call.request_urlpath_json("GET", "/v1/charm")]
item1, item2 = result
@@ -514,10 +527,54 @@ def test_list_registered_names_multiple(client_mock, config):
assert item1.entity_type == "charm"
assert not item1.private
assert item1.status == "status1"
+ assert item1.publisher_display_name == "J. Doe"
+ assert item2.name == "name2"
+ assert item2.entity_type == "bundle"
+ assert item2.private
+ assert item2.status == "status2"
+ assert item2.publisher_display_name == "J. Doe"
+
+
+def test_list_registered_names_include_collaborations(client_mock, config):
+ """List registered names including collaborations."""
+ store = Store(config.charmhub)
+
+ auth_response = {
+ "results": [
+ {
+ "name": "name1",
+ "type": "charm",
+ "private": False,
+ "status": "status1",
+ "publisher": {"display-name": "J. Doe", "other-info": "a lot"},
+ },
+ {
+ "name": "name2",
+ "type": "bundle",
+ "private": True,
+ "status": "status2",
+ "publisher": {"display-name": "Anonymous", "other-info": "more"},
+ },
+ ]
+ }
+ client_mock.request_urlpath_json.return_value = auth_response
+
+ result = store.list_registered_names(include_collaborations=True)
+
+ assert client_mock.mock_calls == [
+ call.request_urlpath_json("GET", "/v1/charm?include-collaborations=true")
+ ]
+ item1, item2 = result
+ assert item1.name == "name1"
+ assert item1.entity_type == "charm"
+ assert not item1.private
+ assert item1.status == "status1"
+ assert item1.publisher_display_name == "J. Doe"
assert item2.name == "name2"
assert item2.entity_type == "bundle"
assert item2.private
assert item2.status == "status2"
+ assert item2.publisher_display_name == "Anonymous"
# -- tests for the upload functionality (both for charm/bundles and resources)
diff --git a/tests/commands/test_store_commands.py b/tests/commands/test_store_commands.py
index 327866f..9c70c9e 100644
--- a/tests/commands/test_store_commands.py
+++ b/tests/commands/test_store_commands.py
@@ -620,11 +620,11 @@ def test_list_registered_empty(emitter, store_mock, config, formatted):
store_response = []
store_mock.list_registered_names.return_value = store_response
- args = Namespace(format=formatted)
+ args = Namespace(format=formatted, include_collaborations=None)
ListNamesCommand(config).run(args)
assert store_mock.mock_calls == [
- call.list_registered_names(),
+ call.list_registered_names(include_collaborations=None),
]
if formatted:
emitter.assert_json_output([])
@@ -637,15 +637,21 @@ def test_list_registered_empty(emitter, store_mock, config, formatted):
def test_list_registered_one_private(emitter, store_mock, config, formatted):
"""List registered with one private item in the response."""
store_response = [
- Entity(entity_type="charm", name="charm", private=True, status="status"),
+ Entity(
+ entity_type="charm",
+ name="charm",
+ private=True,
+ status="status",
+ publisher_display_name="J. Doe",
+ ),
]
store_mock.list_registered_names.return_value = store_response
- args = Namespace(format=formatted)
+ args = Namespace(format=formatted, include_collaborations=None)
ListNamesCommand(config).run(args)
assert store_mock.mock_calls == [
- call.list_registered_names(),
+ call.list_registered_names(include_collaborations=None),
]
expected = [
"Name Type Visibility Status",
@@ -669,15 +675,21 @@ def test_list_registered_one_private(emitter, store_mock, config, formatted):
def test_list_registered_one_public(emitter, store_mock, config, formatted):
"""List registered with one public item in the response."""
store_response = [
- Entity(entity_type="charm", name="charm", private=False, status="status"),
+ Entity(
+ entity_type="charm",
+ name="charm",
+ private=False,
+ status="status",
+ publisher_display_name="J. Doe",
+ ),
]
store_mock.list_registered_names.return_value = store_response
- args = Namespace(format=formatted)
+ args = Namespace(format=formatted, include_collaborations=None)
ListNamesCommand(config).run(args)
assert store_mock.mock_calls == [
- call.list_registered_names(),
+ call.list_registered_names(include_collaborations=None),
]
expected = [
"Name Type Visibility Status",
@@ -701,23 +713,42 @@ def test_list_registered_one_public(emitter, store_mock, config, formatted):
def test_list_registered_several(emitter, store_mock, config, formatted):
"""List registered with several itemsssssssss in the response."""
store_response = [
- Entity(entity_type="charm", name="charm1", private=True, status="simple status"),
- Entity(entity_type="charm", name="charm2-long-name", private=False, status="other"),
- Entity(entity_type="charm", name="charm3", private=True, status="super long status"),
+ Entity(
+ entity_type="charm",
+ name="charm1",
+ private=True,
+ status="simple status",
+ publisher_display_name="J. Doe",
+ ),
+ Entity(
+ entity_type="charm",
+ name="charm2-long-name",
+ private=False,
+ status="other",
+ publisher_display_name="J. Doe",
+ ),
+ Entity(
+ entity_type="charm",
+ name="charm3",
+ private=True,
+ status="super long status",
+ publisher_display_name="J. Doe",
+ ),
Entity(
entity_type="bundle",
name="somebundle",
private=False,
status="bundle status",
+ publisher_display_name="J. Doe",
),
]
store_mock.list_registered_names.return_value = store_response
- args = Namespace(format=formatted)
+ args = Namespace(format=formatted, include_collaborations=None)
ListNamesCommand(config).run(args)
assert store_mock.mock_calls == [
- call.list_registered_names(),
+ call.list_registered_names(include_collaborations=None),
]
if formatted:
expected = [
@@ -758,6 +789,60 @@ def test_list_registered_several(emitter, store_mock, config, formatted):
emitter.assert_messages(expected)
[email protected]("formatted", [None, JSON_FORMAT])
+def test_list_registered_with_collaborations(emitter, store_mock, config, formatted):
+ """List registered with collaborations flag."""
+ store_response = [
+ Entity(
+ entity_type="charm",
+ name="charm1",
+ private=True,
+ status="simple status",
+ publisher_display_name="J. Doe",
+ ),
+ Entity(
+ entity_type="bundle",
+ name="somebundle",
+ private=False,
+ status="bundle status",
+ publisher_display_name="Ms. Bundle Publisher",
+ ),
+ ]
+ store_mock.list_registered_names.return_value = store_response
+
+ args = Namespace(format=formatted, include_collaborations=True)
+ ListNamesCommand(config).run(args)
+
+ assert store_mock.mock_calls == [
+ call.list_registered_names(include_collaborations=True),
+ ]
+ if formatted:
+ expected = [
+ {
+ "name": "charm1",
+ "type": "charm",
+ "visibility": "private",
+ "status": "simple status",
+ "publisher": "J. Doe",
+ },
+ {
+ "name": "somebundle",
+ "type": "bundle",
+ "visibility": "public",
+ "status": "bundle status",
+ "publisher": "Ms. Bundle Publisher",
+ },
+ ]
+ emitter.assert_json_output(expected)
+ else:
+ expected = [
+ "Name Type Visibility Status Publisher",
+ "charm1 charm private simple status J. Doe",
+ "somebundle bundle public bundle status Ms. Bundle Publisher",
+ ]
+ emitter.assert_messages(expected)
+
+
# -- tests for upload command
| The `names` command should allow listing those "collaborated" charms
Currently it only lists those charms *owned* by the developer.
We could add an `--include-collaborations` option (or something shorter, but remember we have bash autocompletion, so...) to also list the charms where the developer is a collaborator.
When this option is used a new column will be shown, exposing the Publisher's display name.
Implementation-wise, the Charmhub endpoint needs to be modified this way: `/v1/charm?include-collaborations=true` | 0.0 | 96308c3ebaf7e27e3a5b9ac83b5a96f10b282c2a | [
"tests/commands/test_store_api.py::test_list_registered_names_empty",
"tests/commands/test_store_api.py::test_list_registered_names_multiple",
"tests/commands/test_store_api.py::test_list_registered_names_include_collaborations",
"tests/commands/test_store_commands.py::test_list_registered_empty[None]",
"tests/commands/test_store_commands.py::test_list_registered_empty[json]",
"tests/commands/test_store_commands.py::test_list_registered_one_private[None]",
"tests/commands/test_store_commands.py::test_list_registered_one_private[json]",
"tests/commands/test_store_commands.py::test_list_registered_one_public[None]",
"tests/commands/test_store_commands.py::test_list_registered_one_public[json]",
"tests/commands/test_store_commands.py::test_list_registered_several[None]",
"tests/commands/test_store_commands.py::test_list_registered_several[json]",
"tests/commands/test_store_commands.py::test_list_registered_with_collaborations[None]",
"tests/commands/test_store_commands.py::test_list_registered_with_collaborations[json]"
]
| [
"tests/commands/test_store_api.py::test_client_init",
"tests/commands/test_store_api.py::test_client_init_ephemeral",
"tests/commands/test_store_api.py::test_craft_store_error_raises_command_error",
"tests/commands/test_store_api.py::test_not_logged_in_warns_regular_auth",
"tests/commands/test_store_api.py::test_not_logged_in_warns_alternate_auth",
"tests/commands/test_store_api.py::test_not_logged_in_disable_auto_login",
"tests/commands/test_store_api.py::test_not_logged_in_alternate_auth_disable_auto_login",
"tests/commands/test_store_api.py::test_auth_valid_credentials",
"tests/commands/test_store_api.py::test_auth_bad_credentials",
"tests/commands/test_store_api.py::test_no_keyring",
"tests/commands/test_store_api.py::test_login",
"tests/commands/test_store_api.py::test_login_attenuating_ttl",
"tests/commands/test_store_api.py::test_login_attenuating_permissions",
"tests/commands/test_store_api.py::test_login_attenuating_channels",
"tests/commands/test_store_api.py::test_login_attenuating_packages",
"tests/commands/test_store_api.py::test_logout",
"tests/commands/test_store_api.py::test_whoami_simple",
"tests/commands/test_store_api.py::test_whoami_packages",
"tests/commands/test_store_api.py::test_whoami_channels",
"tests/commands/test_store_api.py::test_register_name",
"tests/commands/test_store_api.py::test_upload_straightforward",
"tests/commands/test_store_api.py::test_upload_polls_status_ok",
"tests/commands/test_store_api.py::test_upload_polls_status_timeout",
"tests/commands/test_store_api.py::test_upload_error",
"tests/commands/test_store_api.py::test_upload_charmbundles_endpoint",
"tests/commands/test_store_api.py::test_upload_resources_endpoint",
"tests/commands/test_store_api.py::test_upload_including_extra_parameters",
"tests/commands/test_store_api.py::test_list_revisions_ok",
"tests/commands/test_store_api.py::test_list_revisions_empty",
"tests/commands/test_store_api.py::test_list_revisions_errors",
"tests/commands/test_store_api.py::test_list_revisions_several_mixed",
"tests/commands/test_store_api.py::test_list_revisions_bases_none",
"tests/commands/test_store_api.py::test_release_simple",
"tests/commands/test_store_api.py::test_release_multiple_channels",
"tests/commands/test_store_api.py::test_release_with_resources",
"tests/commands/test_store_api.py::test_status_ok",
"tests/commands/test_store_api.py::test_status_with_resources",
"tests/commands/test_store_api.py::test_status_base_in_None",
"tests/commands/test_store_api.py::test_create_library_id",
"tests/commands/test_store_api.py::test_create_library_revision",
"tests/commands/test_store_api.py::test_get_library",
"tests/commands/test_store_api.py::test_get_tips_simple",
"tests/commands/test_store_api.py::test_get_tips_empty",
"tests/commands/test_store_api.py::test_get_tips_several",
"tests/commands/test_store_api.py::test_get_tips_query_combinations",
"tests/commands/test_store_api.py::test_list_resources_ok",
"tests/commands/test_store_api.py::test_list_resources_empty",
"tests/commands/test_store_api.py::test_list_resources_several",
"tests/commands/test_store_api.py::test_list_resource_revisions_ok",
"tests/commands/test_store_api.py::test_list_resource_revisions_empty",
"tests/commands/test_store_api.py::test_list_resource_revisions_several",
"tests/commands/test_store_api.py::test_get_oci_registry_credentials",
"tests/commands/test_store_api.py::test_get_oci_image_blob",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_ok",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_no_file",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_bad_content_garbage",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_bad_content_no_name",
"tests/commands/test_store_commands.py::test_login_simple",
"tests/commands/test_store_commands.py::test_login_exporting",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[charm]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[bundle]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[permission]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[channel]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[ttl]",
"tests/commands/test_store_commands.py::test_login_restricting_ttl",
"tests/commands/test_store_commands.py::test_login_restricting_channels",
"tests/commands/test_store_commands.py::test_login_restricting_permissions",
"tests/commands/test_store_commands.py::test_login_restricting_permission_invalid",
"tests/commands/test_store_commands.py::test_login_restricting_charms",
"tests/commands/test_store_commands.py::test_login_restricting_bundles",
"tests/commands/test_store_commands.py::test_login_restriction_mix",
"tests/commands/test_store_commands.py::test_logout",
"tests/commands/test_store_commands.py::test_logout_but_not_logged_in",
"tests/commands/test_store_commands.py::test_whoami[None]",
"tests/commands/test_store_commands.py::test_whoami[json]",
"tests/commands/test_store_commands.py::test_whoami_but_not_logged_in[None]",
"tests/commands/test_store_commands.py::test_whoami_but_not_logged_in[json]",
"tests/commands/test_store_commands.py::test_whoami_with_channels[None]",
"tests/commands/test_store_commands.py::test_whoami_with_channels[json]",
"tests/commands/test_store_commands.py::test_whoami_with_charms[None]",
"tests/commands/test_store_commands.py::test_whoami_with_charms[json]",
"tests/commands/test_store_commands.py::test_whoami_with_bundles[None]",
"tests/commands/test_store_commands.py::test_whoami_with_bundles[json]",
"tests/commands/test_store_commands.py::test_whoami_comprehensive",
"tests/commands/test_store_commands.py::test_register_charm_name",
"tests/commands/test_store_commands.py::test_register_bundle_name",
"tests/commands/test_store_commands.py::test_get_name_bad_zip",
"tests/commands/test_store_commands.py::test_get_name_charm_ok",
"tests/commands/test_store_commands.py::test_get_name_charm_bad_metadata[=]",
"tests/commands/test_store_commands.py::test_get_name_charm_bad_metadata[foo:",
"tests/commands/test_store_commands.py::test_get_name_bundle_ok",
"tests/commands/test_store_commands.py::test_get_name_bundle_bad_data[=]",
"tests/commands/test_store_commands.py::test_get_name_bundle_bad_data[foo:",
"tests/commands/test_store_commands.py::test_get_name_nor_charm_nor_bundle",
"tests/commands/test_store_commands.py::test_upload_parameters_filepath_type",
"tests/commands/test_store_commands.py::test_upload_call_ok[None]",
"tests/commands/test_store_commands.py::test_upload_call_ok[json]",
"tests/commands/test_store_commands.py::test_upload_call_error[None]",
"tests/commands/test_store_commands.py::test_upload_call_error[json]",
"tests/commands/test_store_commands.py::test_upload_call_ok_including_release[None]",
"tests/commands/test_store_commands.py::test_upload_call_ok_including_release[json]",
"tests/commands/test_store_commands.py::test_upload_call_ok_including_release_multiple",
"tests/commands/test_store_commands.py::test_upload_including_release_with_resources[None]",
"tests/commands/test_store_commands.py::test_upload_including_release_with_resources[json]",
"tests/commands/test_store_commands.py::test_upload_options_resource",
"tests/commands/test_store_commands.py::test_upload_call_error_including_release",
"tests/commands/test_store_commands.py::test_upload_charm_with_init_template_todo_token",
"tests/commands/test_store_commands.py::test_upload_with_different_name_than_in_metadata",
"tests/commands/test_store_commands.py::test_revisions_simple[None]",
"tests/commands/test_store_commands.py::test_revisions_simple[json]",
"tests/commands/test_store_commands.py::test_revisions_empty[None]",
"tests/commands/test_store_commands.py::test_revisions_empty[json]",
"tests/commands/test_store_commands.py::test_revisions_ordered_by_revision[None]",
"tests/commands/test_store_commands.py::test_revisions_ordered_by_revision[json]",
"tests/commands/test_store_commands.py::test_revisions_version_null[None]",
"tests/commands/test_store_commands.py::test_revisions_version_null[json]",
"tests/commands/test_store_commands.py::test_revisions_errors_simple[None]",
"tests/commands/test_store_commands.py::test_revisions_errors_simple[json]",
"tests/commands/test_store_commands.py::test_revisions_errors_multiple[None]",
"tests/commands/test_store_commands.py::test_revisions_errors_multiple[json]",
"tests/commands/test_store_commands.py::test_release_simple_ok",
"tests/commands/test_store_commands.py::test_release_simple_multiple_channels",
"tests/commands/test_store_commands.py::test_release_including_resources",
"tests/commands/test_store_commands.py::test_release_options_resource",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs0-expected_parsed0]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs1-expected_parsed1]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs2-expected_parsed2]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs3-expected_parsed3]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs4-expected_parsed4]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs5-expected_parsed5]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs6-expected_parsed6]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs0]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs1]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs2]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs3]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs4]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs5]",
"tests/commands/test_store_commands.py::test_close_simple_ok",
"tests/commands/test_store_commands.py::test_status_simple_ok[None]",
"tests/commands/test_store_commands.py::test_status_simple_ok[json]",
"tests/commands/test_store_commands.py::test_status_empty[None]",
"tests/commands/test_store_commands.py::test_status_empty[json]",
"tests/commands/test_store_commands.py::test_status_channels_not_released_with_fallback[None]",
"tests/commands/test_store_commands.py::test_status_channels_not_released_with_fallback[json]",
"tests/commands/test_store_commands.py::test_status_channels_not_released_without_fallback[None]",
"tests/commands/test_store_commands.py::test_status_channels_not_released_without_fallback[json]",
"tests/commands/test_store_commands.py::test_status_multiple_tracks[None]",
"tests/commands/test_store_commands.py::test_status_multiple_tracks[json]",
"tests/commands/test_store_commands.py::test_status_tracks_order",
"tests/commands/test_store_commands.py::test_status_with_one_branch[None]",
"tests/commands/test_store_commands.py::test_status_with_one_branch[json]",
"tests/commands/test_store_commands.py::test_status_with_multiple_branches",
"tests/commands/test_store_commands.py::test_status_with_resources[None]",
"tests/commands/test_store_commands.py::test_status_with_resources[json]",
"tests/commands/test_store_commands.py::test_status_with_resources_missing_after_closed_channel",
"tests/commands/test_store_commands.py::test_status_with_resources_and_branches",
"tests/commands/test_store_commands.py::test_status_multiplebases_single_track[None]",
"tests/commands/test_store_commands.py::test_status_multiplebases_single_track[json]",
"tests/commands/test_store_commands.py::test_status_multiplebases_multiple_tracks",
"tests/commands/test_store_commands.py::test_status_multiplebases_everything_combined",
"tests/commands/test_store_commands.py::test_status_with_base_in_none[None]",
"tests/commands/test_store_commands.py::test_status_with_base_in_none[json]",
"tests/commands/test_store_commands.py::test_status_unreleased_track",
"tests/commands/test_store_commands.py::test_createlib_simple[None]",
"tests/commands/test_store_commands.py::test_createlib_simple[json]",
"tests/commands/test_store_commands.py::test_createlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_createlib_name_contains_dash",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[foo.bar]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[foo/bar]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[Foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[123foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[_foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[]",
"tests/commands/test_store_commands.py::test_createlib_path_already_there",
"tests/commands/test_store_commands.py::test_createlib_library_template_is_python",
"tests/commands/test_store_commands.py::test_publishlib_simple[None]",
"tests/commands/test_store_commands.py::test_publishlib_simple[json]",
"tests/commands/test_store_commands.py::test_publishlib_contains_dash",
"tests/commands/test_store_commands.py::test_publishlib_all[None]",
"tests/commands/test_store_commands.py::test_publishlib_all[json]",
"tests/commands/test_store_commands.py::test_publishlib_not_found",
"tests/commands/test_store_commands.py::test_publishlib_not_from_current_charm",
"tests/commands/test_store_commands.py::test_publishlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_publishlib_store_is_advanced[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_advanced[json]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_exactly_behind_ok",
"tests/commands/test_store_commands.py::test_publishlib_store_is_exactly_behind_same_hash[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_exactly_behind_same_hash[json]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_too_behind[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_is_too_behind[json]",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_same_hash[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_same_hash[json]",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_other_hash[None]",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_other_hash[json]",
"tests/commands/test_store_commands.py::test_getlibinfo_success_simple",
"tests/commands/test_store_commands.py::test_getlibinfo_success_content",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[charms.testcharm.v3.testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[charms.testcharm.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[testcharm.v2.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[mycharms.testcharm.v2.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_not_importable_charm_name",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/v3/testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/v3/testlib.html]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/v3/testlib.]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[testcharm/v2/testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[mycharms/testcharm/v2/testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.v-three.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.v-3.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.3.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.vX.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_missing_library_from_name",
"tests/commands/test_store_commands.py::test_getlibinfo_missing_library_from_path",
"tests/commands/test_store_commands.py::test_getlibinfo_malformed_metadata_field",
"tests/commands/test_store_commands.py::test_getlibinfo_missing_metadata_field",
"tests/commands/test_store_commands.py::test_getlibinfo_api_not_int",
"tests/commands/test_store_commands.py::test_getlibinfo_api_negative",
"tests/commands/test_store_commands.py::test_getlibinfo_patch_not_int",
"tests/commands/test_store_commands.py::test_getlibinfo_patch_negative",
"tests/commands/test_store_commands.py::test_getlibinfo_api_patch_both_zero",
"tests/commands/test_store_commands.py::test_getlibinfo_metadata_api_different_path_api",
"tests/commands/test_store_commands.py::test_getlibinfo_libid_non_string",
"tests/commands/test_store_commands.py::test_getlibinfo_libid_non_ascii",
"tests/commands/test_store_commands.py::test_getlibinfo_libid_empty",
"tests/commands/test_store_commands.py::test_fetchlib_simple_downloaded[None]",
"tests/commands/test_store_commands.py::test_fetchlib_simple_downloaded[json]",
"tests/commands/test_store_commands.py::test_fetchlib_simple_dash_in_name",
"tests/commands/test_store_commands.py::test_fetchlib_simple_dash_in_name_on_disk",
"tests/commands/test_store_commands.py::test_fetchlib_simple_updated",
"tests/commands/test_store_commands.py::test_fetchlib_all[None]",
"tests/commands/test_store_commands.py::test_fetchlib_all[json]",
"tests/commands/test_store_commands.py::test_fetchlib_store_not_found[None]",
"tests/commands/test_store_commands.py::test_fetchlib_store_not_found[json]",
"tests/commands/test_store_commands.py::test_fetchlib_store_is_old[None]",
"tests/commands/test_store_commands.py::test_fetchlib_store_is_old[json]",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_same_hash[None]",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_same_hash[json]",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_different_hash[None]",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_different_hash[json]",
"tests/commands/test_store_commands.py::test_listlib_simple[None]",
"tests/commands/test_store_commands.py::test_listlib_simple[json]",
"tests/commands/test_store_commands.py::test_listlib_charm_from_metadata",
"tests/commands/test_store_commands.py::test_listlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_listlib_empty[None]",
"tests/commands/test_store_commands.py::test_listlib_empty[json]",
"tests/commands/test_store_commands.py::test_listlib_properly_sorted[None]",
"tests/commands/test_store_commands.py::test_listlib_properly_sorted[json]",
"tests/commands/test_store_commands.py::test_resources_simple[None]",
"tests/commands/test_store_commands.py::test_resources_simple[json]",
"tests/commands/test_store_commands.py::test_resources_empty[None]",
"tests/commands/test_store_commands.py::test_resources_empty[json]",
"tests/commands/test_store_commands.py::test_resources_ordered_and_grouped[None]",
"tests/commands/test_store_commands.py::test_resources_ordered_and_grouped[json]",
"tests/commands/test_store_commands.py::test_uploadresource_options_filepath_type",
"tests/commands/test_store_commands.py::test_uploadresource_options_image_type",
"tests/commands/test_store_commands.py::test_uploadresource_options_good_combinations[sysargs0]",
"tests/commands/test_store_commands.py::test_uploadresource_options_good_combinations[sysargs1]",
"tests/commands/test_store_commands.py::test_uploadresource_options_bad_combinations[sysargs0]",
"tests/commands/test_store_commands.py::test_uploadresource_options_bad_combinations[sysargs1]",
"tests/commands/test_store_commands.py::test_uploadresource_filepath_call_ok[None]",
"tests/commands/test_store_commands.py::test_uploadresource_filepath_call_ok[json]",
"tests/commands/test_store_commands.py::test_uploadresource_image_digest_already_uploaded[None]",
"tests/commands/test_store_commands.py::test_uploadresource_image_digest_already_uploaded[json]",
"tests/commands/test_store_commands.py::test_uploadresource_image_digest_upload_from_local",
"tests/commands/test_store_commands.py::test_uploadresource_image_id_upload_from_local",
"tests/commands/test_store_commands.py::test_uploadresource_image_digest_missing_everywhere",
"tests/commands/test_store_commands.py::test_uploadresource_image_id_missing",
"tests/commands/test_store_commands.py::test_uploadresource_call_error[None]",
"tests/commands/test_store_commands.py::test_uploadresource_call_error[json]",
"tests/commands/test_store_commands.py::test_resourcerevisions_simple[None]",
"tests/commands/test_store_commands.py::test_resourcerevisions_simple[json]",
"tests/commands/test_store_commands.py::test_resourcerevisions_empty[None]",
"tests/commands/test_store_commands.py::test_resourcerevisions_empty[json]",
"tests/commands/test_store_commands.py::test_resourcerevisions_ordered_by_revision[None]",
"tests/commands/test_store_commands.py::test_resourcerevisions_ordered_by_revision[json]"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2022-10-11 17:32:56+00:00 | apache-2.0 | 1,472 |
|
canonical__charmcraft-916 | diff --git a/charmcraft/commands/store/store.py b/charmcraft/commands/store/store.py
index dd6fe74..6dc7f06 100644
--- a/charmcraft/commands/store/store.py
+++ b/charmcraft/commands/store/store.py
@@ -25,6 +25,7 @@ from functools import wraps
import craft_store
from craft_cli import emit, CraftError
from craft_store import attenuations, endpoints
+from craft_store.errors import CredentialsAlreadyAvailable
from dateutil import parser
from charmcraft.commands.store.client import Client, ALTERNATE_AUTH_ENV_VAR
@@ -207,7 +208,15 @@ class Store:
if packages:
kwargs["packages"] = packages
- return self._client.login(**kwargs)
+ try:
+ credentials = self._client.login(**kwargs)
+ except CredentialsAlreadyAvailable as exc:
+ raise CraftError(
+ "Cannot login because credentials were found in your system "
+ "(which may be no longer valid, though).",
+ resolution="Please logout first, then login again.",
+ ) from exc
+ return credentials
def logout(self):
"""Logout from the store.
| canonical/charmcraft | cad2b5739b7c4ba8761554e17097a90a498cf3f5 | diff --git a/tests/commands/test_store_api.py b/tests/commands/test_store_api.py
index c46ac46..00ff878 100644
--- a/tests/commands/test_store_api.py
+++ b/tests/commands/test_store_api.py
@@ -26,7 +26,12 @@ from dateutil import parser
from craft_cli import CraftError
from craft_store import attenuations
from craft_store.endpoints import Package
-from craft_store.errors import NetworkError, CredentialsUnavailable, StoreServerError
+from craft_store.errors import (
+ CredentialsAlreadyAvailable,
+ CredentialsUnavailable,
+ NetworkError,
+ StoreServerError,
+)
from charmcraft.commands.store.client import Client
from charmcraft.utils import ResourceOption
@@ -296,6 +301,24 @@ def test_login(client_mock, config):
assert result == acquired_credentials
+def test_login_having_credentials(client_mock, config):
+ """Login attempt when already having credentials.."""
+ # client raises a specific exception for this case
+ original_exception = CredentialsAlreadyAvailable("app", "host")
+ client_mock.login.side_effect = original_exception
+
+ store = Store(config.charmhub)
+ with pytest.raises(CraftError) as cm:
+ store.login()
+ error = cm.value
+ assert str(error) == (
+ "Cannot login because credentials were found in your system (which may be "
+ "no longer valid, though)."
+ )
+ assert error.resolution == "Please logout first, then login again."
+ assert error.__cause__ is original_exception
+
+
def test_login_attenuating_ttl(client_mock, config):
"""Login with specific TTL restrictions."""
store = Store(config.charmhub)
diff --git a/tests/spread/store/resources/task.yaml b/tests/spread/store/resources/task.yaml
index 881bae3..cbcba78 100644
--- a/tests/spread/store/resources/task.yaml
+++ b/tests/spread/store/resources/task.yaml
@@ -35,7 +35,7 @@ prepare: |
echo "Super complex file resource" > resourcefile.txt
# an oci image resource
- docker pull hellow-world@sha256:18a657d0cc1c7d0678a3fbea8b7eb4918bba25968d3e1b0adebfa71caddbc346
+ docker pull hello-world@sha256:18a657d0cc1c7d0678a3fbea8b7eb4918bba25968d3e1b0adebfa71caddbc346
restore: |
pushd charm
| Provide a better error description and suggest solution for the "double login" case
Craft Store does not allow to "login" if it finds credentials in the keyring. Currently the received error is confusing:
```
craft-store error: Credentials found for 'charmcraft' on 'api.charmhub.io'.
```
Furthermore, when the user reads "credentials found" they move on and issue another store related command that may also fail because the credentials are expired! so they try to login again and ... (loop)
We should provide a better error and possible resolution:
```
Cannot login because credentials were found in your system (which may be no longer valid, though)
Recommended resolution: Please logout before login again
```
(and the original error should appear in the logs only)
| 0.0 | cad2b5739b7c4ba8761554e17097a90a498cf3f5 | [
"tests/commands/test_store_api.py::test_login_having_credentials"
]
| [
"tests/commands/test_store_api.py::test_client_init",
"tests/commands/test_store_api.py::test_client_init_ephemeral",
"tests/commands/test_store_api.py::test_craft_store_error_raises_command_error",
"tests/commands/test_store_api.py::test_not_logged_in_warns_regular_auth",
"tests/commands/test_store_api.py::test_not_logged_in_warns_alternate_auth",
"tests/commands/test_store_api.py::test_not_logged_in_disable_auto_login",
"tests/commands/test_store_api.py::test_not_logged_in_alternate_auth_disable_auto_login",
"tests/commands/test_store_api.py::test_auth_valid_credentials",
"tests/commands/test_store_api.py::test_auth_bad_credentials",
"tests/commands/test_store_api.py::test_no_keyring",
"tests/commands/test_store_api.py::test_login",
"tests/commands/test_store_api.py::test_login_attenuating_ttl",
"tests/commands/test_store_api.py::test_login_attenuating_permissions",
"tests/commands/test_store_api.py::test_login_attenuating_channels",
"tests/commands/test_store_api.py::test_login_attenuating_packages",
"tests/commands/test_store_api.py::test_logout",
"tests/commands/test_store_api.py::test_whoami_simple",
"tests/commands/test_store_api.py::test_whoami_packages",
"tests/commands/test_store_api.py::test_whoami_channels",
"tests/commands/test_store_api.py::test_register_name",
"tests/commands/test_store_api.py::test_list_registered_names_empty",
"tests/commands/test_store_api.py::test_list_registered_names_multiple",
"tests/commands/test_store_api.py::test_list_registered_names_include_collaborations",
"tests/commands/test_store_api.py::test_upload_straightforward",
"tests/commands/test_store_api.py::test_upload_polls_status_ok",
"tests/commands/test_store_api.py::test_upload_polls_status_timeout",
"tests/commands/test_store_api.py::test_upload_error",
"tests/commands/test_store_api.py::test_upload_charmbundles_endpoint",
"tests/commands/test_store_api.py::test_upload_resources_endpoint",
"tests/commands/test_store_api.py::test_upload_including_extra_parameters",
"tests/commands/test_store_api.py::test_list_revisions_ok",
"tests/commands/test_store_api.py::test_list_revisions_empty",
"tests/commands/test_store_api.py::test_list_revisions_errors",
"tests/commands/test_store_api.py::test_list_revisions_several_mixed",
"tests/commands/test_store_api.py::test_list_revisions_bases_none",
"tests/commands/test_store_api.py::test_release_simple",
"tests/commands/test_store_api.py::test_release_multiple_channels",
"tests/commands/test_store_api.py::test_release_with_resources",
"tests/commands/test_store_api.py::test_status_ok",
"tests/commands/test_store_api.py::test_status_with_resources",
"tests/commands/test_store_api.py::test_status_base_in_None",
"tests/commands/test_store_api.py::test_create_library_id",
"tests/commands/test_store_api.py::test_create_library_revision",
"tests/commands/test_store_api.py::test_get_library",
"tests/commands/test_store_api.py::test_get_tips_simple",
"tests/commands/test_store_api.py::test_get_tips_empty",
"tests/commands/test_store_api.py::test_get_tips_several",
"tests/commands/test_store_api.py::test_get_tips_query_combinations",
"tests/commands/test_store_api.py::test_list_resources_ok",
"tests/commands/test_store_api.py::test_list_resources_empty",
"tests/commands/test_store_api.py::test_list_resources_several",
"tests/commands/test_store_api.py::test_list_resource_revisions_ok",
"tests/commands/test_store_api.py::test_list_resource_revisions_empty",
"tests/commands/test_store_api.py::test_list_resource_revisions_several",
"tests/commands/test_store_api.py::test_get_oci_registry_credentials",
"tests/commands/test_store_api.py::test_get_oci_image_blob"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2022-10-27 14:33:20+00:00 | apache-2.0 | 1,473 |
|
canonical__charmcraft-917 | diff --git a/charmcraft/main.py b/charmcraft/main.py
index 716cc14..bf24e73 100644
--- a/charmcraft/main.py
+++ b/charmcraft/main.py
@@ -102,16 +102,20 @@ EXTRA_ENVIRONMENT = ("DESKTOP_SESSION", "XDG_CURRENT_DESKTOP")
def _get_system_details():
"""Produce details about the system."""
- # prepare the useful environment variables: all CHARMCRAFT* (except AUTH keys)
+ # prepare the useful environment variables: all CHARMCRAFT* (hiding AUTH keys)
# and desktop/session
- useful_env = {name for name in os.environ if name.startswith("CHARMCRAFT")}
- useful_env.discard(ALTERNATE_AUTH_ENV_VAR)
- useful_env.update(EXTRA_ENVIRONMENT)
-
- os_platform = utils.get_os_platform()
- env_string = ", ".join(f"{k}={v!r}" for k, v in sorted(os.environ.items()) if k in useful_env)
+ useful_env = {
+ name: value
+ for name, value in os.environ.items()
+ if name.startswith("CHARMCRAFT") or name in EXTRA_ENVIRONMENT
+ }
+ if ALTERNATE_AUTH_ENV_VAR in useful_env:
+ useful_env[ALTERNATE_AUTH_ENV_VAR] = "<hidden>"
+ env_string = ", ".join(f"{name}={value!r}" for name, value in sorted(useful_env.items()))
if not env_string:
env_string = "None"
+
+ os_platform = utils.get_os_platform()
return f"System details: {os_platform}; Environment: {env_string}"
| canonical/charmcraft | 754050ef4091ed4f1aeaf58934850deb50d046aa | diff --git a/tests/test_main.py b/tests/test_main.py
index 5c5aa3c..48202da 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -334,8 +334,8 @@ def test_systemdetails_charmcraft_environment():
)
-def test_systemdetails_no_auth():
- """System details specifically excluding secrets."""
+def test_systemdetails_hidden_auth():
+ """System details specifically hiding secrets."""
with patch("os.environ", {ALTERNATE_AUTH_ENV_VAR: "supersecret"}):
with patch("charmcraft.utils.get_os_platform") as platform_mock:
platform_mock.return_value = utils.OSPlatform(
@@ -344,7 +344,7 @@ def test_systemdetails_no_auth():
result = _get_system_details()
assert result == (
"System details: OSPlatform(system='test-system', release='test-release', "
- "machine='test-machine'); Environment: None"
+ f"machine='test-machine'); Environment: {ALTERNATE_AUTH_ENV_VAR}='<hidden>'"
)
| The presence of CHARMCRAFT_AUTH should be logged
Currently we're logging all CHARMCRAFT_* environment variables, but specifically excluding `CHARMCRAFT_AUTH` to not leak secrets.
However, it's very useful, for debugging purposes, know that `CHARMCRAFT_AUTH` is present or not, so we should log it with a different value, something like...
```
System details: (...); Environment: DESKTOP_SESSION='plasma', CHARMCRAFT_AUTH='<hidden>', ...
``` | 0.0 | 754050ef4091ed4f1aeaf58934850deb50d046aa | [
"tests/test_main.py::test_systemdetails_hidden_auth"
]
| [
"tests/test_main.py::test_main_ok",
"tests/test_main.py::test_main_managed_instance_init",
"tests/test_main.py::test_main_managed_instance_error[side_effect0]",
"tests/test_main.py::test_main_managed_instance_error[side_effect1]",
"tests/test_main.py::test_main_managed_instance_error[side_effect2]",
"tests/test_main.py::test_main_managed_instance_error[side_effect3]",
"tests/test_main.py::test_main_load_config_ok",
"tests/test_main.py::test_main_load_config_not_present_ok",
"tests/test_main.py::test_main_load_config_not_present_but_needed",
"tests/test_main.py::test_main_no_args",
"tests/test_main.py::test_main_controlled_error",
"tests/test_main.py::test_main_controlled_return_code",
"tests/test_main.py::test_main_crash",
"tests/test_main.py::test_main_interrupted",
"tests/test_main.py::test_main_controlled_arguments_error",
"tests/test_main.py::test_main_providing_help",
"tests/test_main.py::test_main_logs_system_details",
"tests/test_main.py::test_systemdetails_basic",
"tests/test_main.py::test_systemdetails_extra_environment",
"tests/test_main.py::test_systemdetails_charmcraft_environment",
"tests/test_main.py::test_commands[AnalyzeCommand]",
"tests/test_main.py::test_commands[CleanCommand]",
"tests/test_main.py::test_commands[PackCommand]",
"tests/test_main.py::test_commands[InitCommand]",
"tests/test_main.py::test_commands[VersionCommand]",
"tests/test_main.py::test_commands[LoginCommand]",
"tests/test_main.py::test_commands[LogoutCommand]",
"tests/test_main.py::test_commands[WhoamiCommand]",
"tests/test_main.py::test_commands[RegisterCharmNameCommand]",
"tests/test_main.py::test_commands[RegisterBundleNameCommand]",
"tests/test_main.py::test_commands[ListNamesCommand]",
"tests/test_main.py::test_commands[UploadCommand]",
"tests/test_main.py::test_commands[ListRevisionsCommand]",
"tests/test_main.py::test_commands[ReleaseCommand]",
"tests/test_main.py::test_commands[StatusCommand]",
"tests/test_main.py::test_commands[CloseCommand]",
"tests/test_main.py::test_commands[CreateLibCommand]",
"tests/test_main.py::test_commands[PublishLibCommand]",
"tests/test_main.py::test_commands[ListLibCommand]",
"tests/test_main.py::test_commands[FetchLibCommand]",
"tests/test_main.py::test_commands[ListResourcesCommand]",
"tests/test_main.py::test_commands[UploadResourceCommand]",
"tests/test_main.py::test_commands[ListResourceRevisionsCommand]",
"tests/test_main.py::test_aesthetic_help_msg[AnalyzeCommand]",
"tests/test_main.py::test_aesthetic_help_msg[CleanCommand]",
"tests/test_main.py::test_aesthetic_help_msg[PackCommand]",
"tests/test_main.py::test_aesthetic_help_msg[InitCommand]",
"tests/test_main.py::test_aesthetic_help_msg[VersionCommand]",
"tests/test_main.py::test_aesthetic_help_msg[LoginCommand]",
"tests/test_main.py::test_aesthetic_help_msg[LogoutCommand]",
"tests/test_main.py::test_aesthetic_help_msg[WhoamiCommand]",
"tests/test_main.py::test_aesthetic_help_msg[RegisterCharmNameCommand]",
"tests/test_main.py::test_aesthetic_help_msg[RegisterBundleNameCommand]",
"tests/test_main.py::test_aesthetic_help_msg[ListNamesCommand]",
"tests/test_main.py::test_aesthetic_help_msg[UploadCommand]",
"tests/test_main.py::test_aesthetic_help_msg[ListRevisionsCommand]",
"tests/test_main.py::test_aesthetic_help_msg[ReleaseCommand]",
"tests/test_main.py::test_aesthetic_help_msg[StatusCommand]",
"tests/test_main.py::test_aesthetic_help_msg[CloseCommand]",
"tests/test_main.py::test_aesthetic_help_msg[CreateLibCommand]",
"tests/test_main.py::test_aesthetic_help_msg[PublishLibCommand]",
"tests/test_main.py::test_aesthetic_help_msg[ListLibCommand]",
"tests/test_main.py::test_aesthetic_help_msg[FetchLibCommand]",
"tests/test_main.py::test_aesthetic_help_msg[ListResourcesCommand]",
"tests/test_main.py::test_aesthetic_help_msg[UploadResourceCommand]",
"tests/test_main.py::test_aesthetic_help_msg[ListResourceRevisionsCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[AnalyzeCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[CleanCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[PackCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[InitCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[VersionCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[LoginCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[LogoutCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[WhoamiCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[RegisterCharmNameCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[RegisterBundleNameCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[ListNamesCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[UploadCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[ListRevisionsCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[ReleaseCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[StatusCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[CloseCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[CreateLibCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[PublishLibCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[ListLibCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[FetchLibCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[ListResourcesCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[UploadResourceCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[ListResourceRevisionsCommand]",
"tests/test_main.py::test_usage_of_parsed_args[AnalyzeCommand]",
"tests/test_main.py::test_usage_of_parsed_args[CleanCommand]",
"tests/test_main.py::test_usage_of_parsed_args[PackCommand]",
"tests/test_main.py::test_usage_of_parsed_args[InitCommand]",
"tests/test_main.py::test_usage_of_parsed_args[VersionCommand]",
"tests/test_main.py::test_usage_of_parsed_args[LoginCommand]",
"tests/test_main.py::test_usage_of_parsed_args[LogoutCommand]",
"tests/test_main.py::test_usage_of_parsed_args[WhoamiCommand]",
"tests/test_main.py::test_usage_of_parsed_args[RegisterCharmNameCommand]",
"tests/test_main.py::test_usage_of_parsed_args[RegisterBundleNameCommand]",
"tests/test_main.py::test_usage_of_parsed_args[ListNamesCommand]",
"tests/test_main.py::test_usage_of_parsed_args[UploadCommand]",
"tests/test_main.py::test_usage_of_parsed_args[ListRevisionsCommand]",
"tests/test_main.py::test_usage_of_parsed_args[ReleaseCommand]",
"tests/test_main.py::test_usage_of_parsed_args[StatusCommand]",
"tests/test_main.py::test_usage_of_parsed_args[CloseCommand]",
"tests/test_main.py::test_usage_of_parsed_args[CreateLibCommand]",
"tests/test_main.py::test_usage_of_parsed_args[PublishLibCommand]",
"tests/test_main.py::test_usage_of_parsed_args[ListLibCommand]",
"tests/test_main.py::test_usage_of_parsed_args[FetchLibCommand]",
"tests/test_main.py::test_usage_of_parsed_args[ListResourcesCommand]",
"tests/test_main.py::test_usage_of_parsed_args[UploadResourceCommand]",
"tests/test_main.py::test_usage_of_parsed_args[ListResourceRevisionsCommand]",
"tests/test_main.py::test_basecommand_needs_config_default",
"tests/test_main.py::test_basecommand_include_format_option",
"tests/test_main.py::test_basecommand_format_content_json",
"tests/test_main.py::test_basecommand_format_content_unkown"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2022-10-27 20:03:13+00:00 | apache-2.0 | 1,474 |
|
canonical__charmcraft-933 | diff --git a/charmcraft/cmdbase.py b/charmcraft/cmdbase.py
index 6106a7f..8009afc 100644
--- a/charmcraft/cmdbase.py
+++ b/charmcraft/cmdbase.py
@@ -21,6 +21,7 @@ import json
import craft_cli
JSON_FORMAT = "json"
+FORMAT_HELP_STR = "Produce the result in the specified format (currently only 'json')"
class BaseCommand(craft_cli.BaseCommand):
@@ -50,5 +51,5 @@ class BaseCommand(craft_cli.BaseCommand):
parser.add_argument(
"--format",
choices=[JSON_FORMAT],
- help="Produce the result formatted as a JSON string",
+ help=FORMAT_HELP_STR,
)
| canonical/charmcraft | c267233a504919a0beaf2de9b51cdf6aa16c31b4 | diff --git a/tests/test_main.py b/tests/test_main.py
index 48202da..feb1304 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -34,7 +34,7 @@ from craft_cli import (
from craft_store.errors import CraftStoreError
from charmcraft import __version__, env, utils
-from charmcraft.cmdbase import BaseCommand, JSON_FORMAT
+from charmcraft.cmdbase import BaseCommand, JSON_FORMAT, FORMAT_HELP_STR
from charmcraft.commands.store.client import ALTERNATE_AUTH_ENV_VAR
from charmcraft.main import COMMAND_GROUPS, main, _get_system_details
@@ -484,7 +484,7 @@ def test_basecommand_include_format_option(config):
assert action.dest == "format"
assert action.default is None
assert action.choices == [JSON_FORMAT]
- assert action.help == "Produce the result formatted as a JSON string"
+ assert action.help == FORMAT_HELP_STR
def test_basecommand_format_content_json(config):
| Clarify `charmcraft status --help` text for `--format`
#784 implements structured output of `charmcraft status`, but the help text on it is a bit misleading:
```
base ❯ time charmcraft status --help
Usage:
charmcraft status [options] <name>
...
Options:
...
--format: Produce the result formatted as a JSON string
```
The above makes it seem like `charmcraft status charmname --format` will export in JSON, but really it wants `--format json`. Easy to fix by changing the option string to say it needs an extra arg, one of `json` or whatever else is available. | 0.0 | c267233a504919a0beaf2de9b51cdf6aa16c31b4 | [
"tests/test_main.py::test_main_ok",
"tests/test_main.py::test_main_managed_instance_init",
"tests/test_main.py::test_main_managed_instance_error[side_effect0]",
"tests/test_main.py::test_main_managed_instance_error[side_effect1]",
"tests/test_main.py::test_main_managed_instance_error[side_effect2]",
"tests/test_main.py::test_main_managed_instance_error[side_effect3]",
"tests/test_main.py::test_main_load_config_ok",
"tests/test_main.py::test_main_load_config_not_present_ok",
"tests/test_main.py::test_main_load_config_not_present_but_needed",
"tests/test_main.py::test_main_no_args",
"tests/test_main.py::test_main_controlled_error",
"tests/test_main.py::test_main_controlled_return_code",
"tests/test_main.py::test_main_crash",
"tests/test_main.py::test_main_interrupted",
"tests/test_main.py::test_main_controlled_arguments_error",
"tests/test_main.py::test_main_providing_help",
"tests/test_main.py::test_main_logs_system_details",
"tests/test_main.py::test_systemdetails_basic",
"tests/test_main.py::test_systemdetails_extra_environment",
"tests/test_main.py::test_systemdetails_charmcraft_environment",
"tests/test_main.py::test_systemdetails_hidden_auth",
"tests/test_main.py::test_commands[AnalyzeCommand]",
"tests/test_main.py::test_commands[CleanCommand]",
"tests/test_main.py::test_commands[PackCommand]",
"tests/test_main.py::test_commands[InitCommand]",
"tests/test_main.py::test_commands[VersionCommand]",
"tests/test_main.py::test_commands[LoginCommand]",
"tests/test_main.py::test_commands[LogoutCommand]",
"tests/test_main.py::test_commands[WhoamiCommand]",
"tests/test_main.py::test_commands[RegisterCharmNameCommand]",
"tests/test_main.py::test_commands[RegisterBundleNameCommand]",
"tests/test_main.py::test_commands[ListNamesCommand]",
"tests/test_main.py::test_commands[UploadCommand]",
"tests/test_main.py::test_commands[ListRevisionsCommand]",
"tests/test_main.py::test_commands[ReleaseCommand]",
"tests/test_main.py::test_commands[StatusCommand]",
"tests/test_main.py::test_commands[CloseCommand]",
"tests/test_main.py::test_commands[CreateLibCommand]",
"tests/test_main.py::test_commands[PublishLibCommand]",
"tests/test_main.py::test_commands[ListLibCommand]",
"tests/test_main.py::test_commands[FetchLibCommand]",
"tests/test_main.py::test_commands[ListResourcesCommand]",
"tests/test_main.py::test_commands[UploadResourceCommand]",
"tests/test_main.py::test_commands[ListResourceRevisionsCommand]",
"tests/test_main.py::test_aesthetic_help_msg[AnalyzeCommand]",
"tests/test_main.py::test_aesthetic_help_msg[CleanCommand]",
"tests/test_main.py::test_aesthetic_help_msg[PackCommand]",
"tests/test_main.py::test_aesthetic_help_msg[InitCommand]",
"tests/test_main.py::test_aesthetic_help_msg[VersionCommand]",
"tests/test_main.py::test_aesthetic_help_msg[LoginCommand]",
"tests/test_main.py::test_aesthetic_help_msg[LogoutCommand]",
"tests/test_main.py::test_aesthetic_help_msg[WhoamiCommand]",
"tests/test_main.py::test_aesthetic_help_msg[RegisterCharmNameCommand]",
"tests/test_main.py::test_aesthetic_help_msg[RegisterBundleNameCommand]",
"tests/test_main.py::test_aesthetic_help_msg[ListNamesCommand]",
"tests/test_main.py::test_aesthetic_help_msg[UploadCommand]",
"tests/test_main.py::test_aesthetic_help_msg[ListRevisionsCommand]",
"tests/test_main.py::test_aesthetic_help_msg[ReleaseCommand]",
"tests/test_main.py::test_aesthetic_help_msg[StatusCommand]",
"tests/test_main.py::test_aesthetic_help_msg[CloseCommand]",
"tests/test_main.py::test_aesthetic_help_msg[CreateLibCommand]",
"tests/test_main.py::test_aesthetic_help_msg[PublishLibCommand]",
"tests/test_main.py::test_aesthetic_help_msg[ListLibCommand]",
"tests/test_main.py::test_aesthetic_help_msg[FetchLibCommand]",
"tests/test_main.py::test_aesthetic_help_msg[ListResourcesCommand]",
"tests/test_main.py::test_aesthetic_help_msg[UploadResourceCommand]",
"tests/test_main.py::test_aesthetic_help_msg[ListResourceRevisionsCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[AnalyzeCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[CleanCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[PackCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[InitCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[VersionCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[LoginCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[LogoutCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[WhoamiCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[RegisterCharmNameCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[RegisterBundleNameCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[ListNamesCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[UploadCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[ListRevisionsCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[ReleaseCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[StatusCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[CloseCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[CreateLibCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[PublishLibCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[ListLibCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[FetchLibCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[ListResourcesCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[UploadResourceCommand]",
"tests/test_main.py::test_aesthetic_args_options_msg[ListResourceRevisionsCommand]",
"tests/test_main.py::test_usage_of_parsed_args[AnalyzeCommand]",
"tests/test_main.py::test_usage_of_parsed_args[CleanCommand]",
"tests/test_main.py::test_usage_of_parsed_args[PackCommand]",
"tests/test_main.py::test_usage_of_parsed_args[InitCommand]",
"tests/test_main.py::test_usage_of_parsed_args[VersionCommand]",
"tests/test_main.py::test_usage_of_parsed_args[LoginCommand]",
"tests/test_main.py::test_usage_of_parsed_args[LogoutCommand]",
"tests/test_main.py::test_usage_of_parsed_args[WhoamiCommand]",
"tests/test_main.py::test_usage_of_parsed_args[RegisterCharmNameCommand]",
"tests/test_main.py::test_usage_of_parsed_args[RegisterBundleNameCommand]",
"tests/test_main.py::test_usage_of_parsed_args[ListNamesCommand]",
"tests/test_main.py::test_usage_of_parsed_args[UploadCommand]",
"tests/test_main.py::test_usage_of_parsed_args[ListRevisionsCommand]",
"tests/test_main.py::test_usage_of_parsed_args[ReleaseCommand]",
"tests/test_main.py::test_usage_of_parsed_args[StatusCommand]",
"tests/test_main.py::test_usage_of_parsed_args[CloseCommand]",
"tests/test_main.py::test_usage_of_parsed_args[CreateLibCommand]",
"tests/test_main.py::test_usage_of_parsed_args[PublishLibCommand]",
"tests/test_main.py::test_usage_of_parsed_args[ListLibCommand]",
"tests/test_main.py::test_usage_of_parsed_args[FetchLibCommand]",
"tests/test_main.py::test_usage_of_parsed_args[ListResourcesCommand]",
"tests/test_main.py::test_usage_of_parsed_args[UploadResourceCommand]",
"tests/test_main.py::test_usage_of_parsed_args[ListResourceRevisionsCommand]",
"tests/test_main.py::test_basecommand_needs_config_default",
"tests/test_main.py::test_basecommand_include_format_option",
"tests/test_main.py::test_basecommand_format_content_json",
"tests/test_main.py::test_basecommand_format_content_unkown"
]
| []
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2022-11-28 10:07:23+00:00 | apache-2.0 | 1,475 |
|
canonical__operator-1000 | diff --git a/ops/model.py b/ops/model.py
index 24f40f5..906b463 100644
--- a/ops/model.py
+++ b/ops/model.py
@@ -1181,6 +1181,9 @@ class Secret:
def get_content(self, *, refresh: bool = False) -> Dict[str, str]:
"""Get the secret's content.
+ Returns:
+ A copy of the secret's content dictionary.
+
Args:
refresh: If true, fetch the latest revision's content and tell
Juju to update to tracking that revision. The default is to
@@ -1190,7 +1193,7 @@ class Secret:
if refresh or self._content is None:
self._content = self._backend.secret_get(
id=self.id, label=self.label, refresh=refresh)
- return self._content
+ return self._content.copy()
def peek_content(self) -> Dict[str, str]:
"""Get the content of the latest revision of this secret.
| canonical/operator | c9bba5b00bc99cab1c679dfd0814c1f2d0294b5c | diff --git a/test/test_model.py b/test/test_model.py
index b310e0f..d74dd17 100755
--- a/test/test_model.py
+++ b/test/test_model.py
@@ -3104,6 +3104,18 @@ class TestSecretClass(unittest.TestCase):
self.assertEqual(fake_script_calls(self, clear=True),
[['secret-get', 'secret:z', '--format=json']])
+ def test_get_content_copies_dict(self):
+ fake_script(self, 'secret-get', """echo '{"foo": "bar"}'""")
+
+ secret = self.make_secret(id='z')
+ content = secret.get_content()
+ self.assertEqual(content, {'foo': 'bar'})
+ content['new'] = 'value'
+ self.assertEqual(secret.get_content(), {'foo': 'bar'})
+
+ self.assertEqual(fake_script_calls(self, clear=True),
+ [['secret-get', 'secret:z', '--format=json']])
+
def test_peek_content(self):
fake_script(self, 'secret-get', """echo '{"foo": "peeked"}'""")
| secret.get_content() un-intuitively cached locally
"Screenshot" from a debugger session.
As you can see, I'm fetching contents of a secret, saving it to `dict` called `full_content`.
Then I update `full_content`. NOT the secret, but the dictionary holding the output of the `get_content()` call.
Yet, when I call `secret.get_content()` afterwards, I find that it's returning the UPDATED content of the `full_content` dictionary, instead of the real contents of the secret. AGAIN, note that the secret has NOT been updated at this point.
However, as a result, the secret will actually never be updated. The update command silently fails on line 630, and neither the contents, nor the revision of the secret gets updated.
[The value of the `content` variable was `{'endpoints': 'host1:port,host2:port,host3:port'}` from the start]
```
625 import pdb; pdb.set_trace()
626 -> full_content = secret.get_content()
627 old_content = copy.deepcopy(full_content)
628 full_content.update(content)
629 if old_content != full_content:
630 secret.set_content(full_content)
631
(Pdb) n
> /var/lib/juju/agents/unit-kafka-0/charm/lib/charms/data_platform_libs/v0/data_interfaces.py(627)update_relation_secret()
-> old_content = copy.deepcopy(full_content)
(Pdb) full_content
{'endpoints': 'host1:port,host2:port'}
(Pdb) n
> /var/lib/juju/agents/unit-kafka-0/charm/lib/charms/data_platform_libs/v0/data_interfaces.py(628)update_relation_secret()
-> full_content.update(content)
(Pdb) n
> /var/lib/juju/agents/unit-kafka-0/charm/lib/charms/data_platform_libs/v0/data_interfaces.py(629)update_relation_secret()
-> if old_content != full_content:
(Pdb) secret.get_content()
{'endpoints': 'host1:port,host2:port,host3:port'}
```
This behavior is highly unintuitive, and should be addressed.
| 0.0 | c9bba5b00bc99cab1c679dfd0814c1f2d0294b5c | [
"test/test_model.py::TestSecretClass::test_get_content_copies_dict"
]
| [
"test/test_model.py::TestModel::test_active_message_default",
"test/test_model.py::TestModel::test_app_immutable",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_leader",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_minion",
"test/test_model.py::TestModel::test_base_status_instance_raises",
"test/test_model.py::TestModel::test_config",
"test/test_model.py::TestModel::test_config_immutable",
"test/test_model.py::TestModel::test_get_app_relation_data",
"test/test_model.py::TestModel::test_get_relation",
"test/test_model.py::TestModel::test_invalid_type_relation_data",
"test/test_model.py::TestModel::test_is_leader",
"test/test_model.py::TestModel::test_local_set_valid_app_status",
"test/test_model.py::TestModel::test_local_set_valid_unit_status",
"test/test_model.py::TestModel::test_model_attributes",
"test/test_model.py::TestModel::test_model_name_from_backend",
"test/test_model.py::TestModel::test_our_unit_is_our",
"test/test_model.py::TestModel::test_peer_relation_app",
"test/test_model.py::TestModel::test_pod_immutable",
"test/test_model.py::TestModel::test_pod_spec",
"test/test_model.py::TestModel::test_relation_data_access_peer_leader",
"test/test_model.py::TestModel::test_relation_data_access_peer_minion",
"test/test_model.py::TestModel::test_relation_data_del_key",
"test/test_model.py::TestModel::test_relation_data_del_missing_key",
"test/test_model.py::TestModel::test_relation_data_modify_our",
"test/test_model.py::TestModel::test_relation_data_modify_remote",
"test/test_model.py::TestModel::test_relation_data_type_check",
"test/test_model.py::TestModel::test_relation_local_app_data_readability_follower",
"test/test_model.py::TestModel::test_relation_local_app_data_readability_leader",
"test/test_model.py::TestModel::test_relation_no_units",
"test/test_model.py::TestModel::test_relation_set_fail",
"test/test_model.py::TestModel::test_relations_immutable",
"test/test_model.py::TestModel::test_relations_keys",
"test/test_model.py::TestModel::test_remote_app_relation_data",
"test/test_model.py::TestModel::test_remote_app_status",
"test/test_model.py::TestModel::test_remote_unit_status",
"test/test_model.py::TestModel::test_remote_units_is_our",
"test/test_model.py::TestModel::test_resources",
"test/test_model.py::TestModel::test_resources_immutable",
"test/test_model.py::TestModel::test_run_error",
"test/test_model.py::TestModel::test_set_app_status_invalid",
"test/test_model.py::TestModel::test_set_app_status_non_leader_raises",
"test/test_model.py::TestModel::test_set_unit_status_invalid",
"test/test_model.py::TestModel::test_status_eq",
"test/test_model.py::TestModel::test_status_repr",
"test/test_model.py::TestModel::test_storage",
"test/test_model.py::TestModel::test_storages_immutable",
"test/test_model.py::TestModel::test_unit_immutable",
"test/test_model.py::TestModel::test_unit_relation_data",
"test/test_model.py::TestModel::test_workload_version",
"test/test_model.py::TestModel::test_workload_version_invalid",
"test/test_model.py::test_recursive_list[case0]",
"test/test_model.py::test_recursive_list[case1]",
"test/test_model.py::test_recursive_list[case2]",
"test/test_model.py::test_recursive_push_and_pull[case0]",
"test/test_model.py::test_recursive_push_and_pull[case1]",
"test/test_model.py::test_recursive_push_and_pull[case2]",
"test/test_model.py::test_recursive_push_and_pull[case3]",
"test/test_model.py::test_recursive_push_and_pull[case4]",
"test/test_model.py::test_recursive_push_and_pull[case5]",
"test/test_model.py::test_recursive_push_and_pull[case6]",
"test/test_model.py::test_recursive_push_and_pull[case7]",
"test/test_model.py::test_recursive_push_and_pull[case8]",
"test/test_model.py::test_push_path_relative[case0]",
"test/test_model.py::test_push_path_relative[case1]",
"test/test_model.py::test_push_path_relative[case2]",
"test/test_model.py::TestApplication::test_mocked_get_services",
"test/test_model.py::TestApplication::test_planned_units",
"test/test_model.py::TestApplication::test_planned_units_garbage_values",
"test/test_model.py::TestApplication::test_planned_units_override",
"test/test_model.py::TestApplication::test_planned_units_user_set",
"test/test_model.py::TestContainers::test_unit_containers",
"test/test_model.py::TestContainers::test_unit_get_container",
"test/test_model.py::TestContainerPebble::test_add_layer",
"test/test_model.py::TestContainerPebble::test_autostart",
"test/test_model.py::TestContainerPebble::test_can_connect",
"test/test_model.py::TestContainerPebble::test_can_connect_api_error",
"test/test_model.py::TestContainerPebble::test_can_connect_connection_error",
"test/test_model.py::TestContainerPebble::test_can_connect_file_not_found_error",
"test/test_model.py::TestContainerPebble::test_can_connect_simple",
"test/test_model.py::TestContainerPebble::test_get_check",
"test/test_model.py::TestContainerPebble::test_get_checks",
"test/test_model.py::TestContainerPebble::test_get_plan",
"test/test_model.py::TestContainerPebble::test_get_service",
"test/test_model.py::TestContainerPebble::test_get_services",
"test/test_model.py::TestContainerPebble::test_list_files",
"test/test_model.py::TestContainerPebble::test_make_dir",
"test/test_model.py::TestContainerPebble::test_pull",
"test/test_model.py::TestContainerPebble::test_push",
"test/test_model.py::TestContainerPebble::test_remove_path",
"test/test_model.py::TestContainerPebble::test_replan",
"test/test_model.py::TestContainerPebble::test_restart",
"test/test_model.py::TestContainerPebble::test_restart_fallback",
"test/test_model.py::TestContainerPebble::test_restart_fallback_non_400_error",
"test/test_model.py::TestContainerPebble::test_restart_no_arguments",
"test/test_model.py::TestContainerPebble::test_send_signal",
"test/test_model.py::TestContainerPebble::test_socket_path",
"test/test_model.py::TestContainerPebble::test_start",
"test/test_model.py::TestContainerPebble::test_start_no_arguments",
"test/test_model.py::TestContainerPebble::test_stop",
"test/test_model.py::TestContainerPebble::test_stop_no_arguments",
"test/test_model.py::TestContainerPebble::test_type_errors",
"test/test_model.py::TestModelBindings::test_binding_by_relation",
"test/test_model.py::TestModelBindings::test_binding_by_relation_name",
"test/test_model.py::TestModelBindings::test_binding_no_iface_name",
"test/test_model.py::TestModelBindings::test_dead_relations",
"test/test_model.py::TestModelBindings::test_empty_bind_addresses",
"test/test_model.py::TestModelBindings::test_empty_interface_info",
"test/test_model.py::TestModelBindings::test_invalid_keys",
"test/test_model.py::TestModelBindings::test_missing_bind_addresses",
"test/test_model.py::TestModelBindings::test_missing_egress_subnets",
"test/test_model.py::TestModelBindings::test_missing_ingress_addresses",
"test/test_model.py::TestModelBindings::test_no_bind_addresses",
"test/test_model.py::TestModelBindings::test_unresolved_ingress_addresses",
"test/test_model.py::TestModelBackend::test_action_fail",
"test/test_model.py::TestModelBackend::test_action_get",
"test/test_model.py::TestModelBackend::test_action_get_error",
"test/test_model.py::TestModelBackend::test_action_log",
"test/test_model.py::TestModelBackend::test_action_log_error",
"test/test_model.py::TestModelBackend::test_action_set",
"test/test_model.py::TestModelBackend::test_action_set_dotted_dict",
"test/test_model.py::TestModelBackend::test_action_set_duplicated_keys",
"test/test_model.py::TestModelBackend::test_action_set_error",
"test/test_model.py::TestModelBackend::test_action_set_key_validation",
"test/test_model.py::TestModelBackend::test_action_set_more_nested",
"test/test_model.py::TestModelBackend::test_action_set_nested",
"test/test_model.py::TestModelBackend::test_application_version_set",
"test/test_model.py::TestModelBackend::test_application_version_set_invalid",
"test/test_model.py::TestModelBackend::test_invalid_metric_label_values",
"test/test_model.py::TestModelBackend::test_invalid_metric_labels",
"test/test_model.py::TestModelBackend::test_invalid_metric_names",
"test/test_model.py::TestModelBackend::test_invalid_metric_values",
"test/test_model.py::TestModelBackend::test_is_leader_refresh",
"test/test_model.py::TestModelBackend::test_juju_log",
"test/test_model.py::TestModelBackend::test_local_get_status",
"test/test_model.py::TestModelBackend::test_local_set_invalid_status",
"test/test_model.py::TestModelBackend::test_network_get",
"test/test_model.py::TestModelBackend::test_network_get_errors",
"test/test_model.py::TestModelBackend::test_planned_units",
"test/test_model.py::TestModelBackend::test_relation_get_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_get_set_is_app_arg",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_env",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_errors",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_success",
"test/test_model.py::TestModelBackend::test_relation_set_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_tool_errors",
"test/test_model.py::TestModelBackend::test_status_get",
"test/test_model.py::TestModelBackend::test_status_is_app_forced_kwargs",
"test/test_model.py::TestModelBackend::test_status_set_is_app_not_bool_raises",
"test/test_model.py::TestModelBackend::test_storage_tool_errors",
"test/test_model.py::TestModelBackend::test_valid_metrics",
"test/test_model.py::TestLazyMapping::test_invalidate",
"test/test_model.py::TestSecrets::test_add_secret_errors",
"test/test_model.py::TestSecrets::test_app_add_secret_args",
"test/test_model.py::TestSecrets::test_app_add_secret_simple",
"test/test_model.py::TestSecrets::test_get_secret_id",
"test/test_model.py::TestSecrets::test_get_secret_id_and_label",
"test/test_model.py::TestSecrets::test_get_secret_label",
"test/test_model.py::TestSecrets::test_get_secret_no_args",
"test/test_model.py::TestSecrets::test_get_secret_not_found",
"test/test_model.py::TestSecrets::test_get_secret_other_error",
"test/test_model.py::TestSecrets::test_secret_unique_identifier",
"test/test_model.py::TestSecrets::test_unit_add_secret_args",
"test/test_model.py::TestSecrets::test_unit_add_secret_errors",
"test/test_model.py::TestSecrets::test_unit_add_secret_simple",
"test/test_model.py::TestSecretInfo::test_from_dict",
"test/test_model.py::TestSecretInfo::test_init",
"test/test_model.py::TestSecretClass::test_get_content_cached",
"test/test_model.py::TestSecretClass::test_get_content_refresh",
"test/test_model.py::TestSecretClass::test_get_content_uncached",
"test/test_model.py::TestSecretClass::test_get_info",
"test/test_model.py::TestSecretClass::test_grant",
"test/test_model.py::TestSecretClass::test_id_and_label",
"test/test_model.py::TestSecretClass::test_peek_content",
"test/test_model.py::TestSecretClass::test_remove_all_revisions",
"test/test_model.py::TestSecretClass::test_remove_revision",
"test/test_model.py::TestSecretClass::test_revoke",
"test/test_model.py::TestSecretClass::test_set_content",
"test/test_model.py::TestSecretClass::test_set_info",
"test/test_model.py::TestPorts::test_close_port",
"test/test_model.py::TestPorts::test_close_port_error",
"test/test_model.py::TestPorts::test_open_port",
"test/test_model.py::TestPorts::test_open_port_error",
"test/test_model.py::TestPorts::test_opened_ports",
"test/test_model.py::TestPorts::test_opened_ports_warnings"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2023-08-29 23:15:35+00:00 | apache-2.0 | 1,476 |
|
canonical__operator-1024 | diff --git a/ops/model.py b/ops/model.py
index 1d88eaa..cd1733e 100644
--- a/ops/model.py
+++ b/ops/model.py
@@ -2275,6 +2275,9 @@ class Container:
try:
for info in Container._list_recursive(local_list, source_path):
dstpath = self._build_destpath(info.path, source_path, dest_dir)
+ if info.type is pebble.FileType.DIRECTORY:
+ self.make_dir(dstpath, make_parents=True)
+ continue
with open(info.path) as src:
self.push(
dstpath,
@@ -2352,6 +2355,9 @@ class Container:
try:
for info in Container._list_recursive(self.list_files, source_path):
dstpath = self._build_destpath(info.path, source_path, dest_dir)
+ if info.type is pebble.FileType.DIRECTORY:
+ dstpath.mkdir(parents=True, exist_ok=True)
+ continue
dstpath.parent.mkdir(parents=True, exist_ok=True)
with self.pull(info.path, encoding=None) as src:
with dstpath.open(mode='wb') as dst:
@@ -2406,6 +2412,9 @@ class Container:
for info in list_func(path):
if info.type is pebble.FileType.DIRECTORY:
+ # Yield the directory to ensure empty directories are created, then
+ # all of the contained files.
+ yield info
yield from Container._list_recursive(list_func, Path(info.path))
elif info.type in (pebble.FileType.FILE, pebble.FileType.SYMLINK):
yield info
| canonical/operator | c22c807440f2dfc449e99dd29b6f4101b8a0ffd6 | diff --git a/test/test_model.py b/test/test_model.py
index d9ea9d7..365ddff 100755
--- a/test/test_model.py
+++ b/test/test_model.py
@@ -981,14 +981,18 @@ class PushPullCase:
files: typing.List[str],
want: typing.Optional[typing.Set[str]] = None,
dst: typing.Optional[str] = None,
- errors: typing.Optional[typing.Set[str]] = None):
+ errors: typing.Optional[typing.Set[str]] = None,
+ dirs: typing.Optional[typing.Set[str]] = None,
+ want_dirs: typing.Optional[typing.Set[str]] = None):
self.pattern = None
self.dst = dst
self.errors = errors or set()
self.name = name
self.path = path
self.files = files
+ self.dirs = dirs or set()
self.want = want or set()
+ self.want_dirs = want_dirs or set()
recursive_list_cases = [
@@ -996,13 +1000,13 @@ recursive_list_cases = [
name='basic recursive list',
path='/',
files=['/foo/bar.txt', '/baz.txt'],
- want={'/foo/bar.txt', '/baz.txt'},
+ want={'/foo', '/foo/bar.txt', '/baz.txt'},
),
PushPullCase(
name='basic recursive list reverse',
path='/',
files=['/baz.txt', '/foo/bar.txt'],
- want={'/foo/bar.txt', '/baz.txt'},
+ want={'/foo', '/foo/bar.txt', '/baz.txt'},
),
PushPullCase(
name='directly list a (non-directory) file',
@@ -1156,6 +1160,14 @@ recursive_push_pull_cases = [
files=['/foo/bar/baz.txt', '/foo/foobar.txt', '/quux.txt'],
want={'/baz/foobar.txt', '/baz/bar/baz.txt'},
),
+ PushPullCase(
+ name='push/pull an empty directory',
+ path='/foo',
+ dst='/foobar',
+ files=[],
+ dirs={'/foo/baz'},
+ want_dirs={'/foobar/foo/baz'},
+ ),
]
@@ -1179,6 +1191,10 @@ def test_recursive_push_and_pull(case: PushPullCase):
os.makedirs(os.path.dirname(fpath), exist_ok=True)
with open(fpath, 'w') as f:
f.write('hello')
+ if case.dirs:
+ for directory in case.dirs:
+ fpath = os.path.join(push_src.name, directory[1:])
+ os.makedirs(fpath, exist_ok=True)
# test push
if isinstance(case.path, list):
@@ -1204,11 +1220,16 @@ def test_recursive_push_and_pull(case: PushPullCase):
f'push_path gave wrong expected errors: want {case.errors}, got {errors}'
for fpath in case.want:
assert c.exists(fpath), f'push_path failed: file {fpath} missing at destination'
+ for fdir in case.want_dirs:
+ assert c.isdir(fdir), f'push_path failed: dir {fdir} missing at destination'
# create pull test case filesystem structure
pull_dst = tempfile.TemporaryDirectory()
for fpath in case.files:
c.push(fpath, 'hello', make_dirs=True)
+ if case.dirs:
+ for directory in case.dirs:
+ c.make_dir(directory, make_parents=True)
# test pull
errors: typing.Set[str] = set()
@@ -1223,6 +1244,8 @@ def test_recursive_push_and_pull(case: PushPullCase):
f'pull_path gave wrong expected errors: want {case.errors}, got {errors}'
for fpath in case.want:
assert c.exists(fpath), f'pull_path failed: file {fpath} missing at destination'
+ for fdir in case.want_dirs:
+ assert c.isdir(fdir), f'pull_path failed: dir {fdir} missing at destination'
@pytest.mark.parametrize('case', [
diff --git a/test/test_testing.py b/test/test_testing.py
index d49c4a9..e28ef55 100644
--- a/test/test_testing.py
+++ b/test/test_testing.py
@@ -4574,11 +4574,13 @@ class TestFilesystem(unittest.TestCase, _TestingPebbleClientMixin):
(tempdir / "foo/bar").mkdir(parents=True)
(tempdir / "foo/test").write_text("test")
(tempdir / "foo/bar/foobar").write_text("foobar")
+ (tempdir / "foo/baz").mkdir(parents=True)
self.container.push_path(tempdir / "foo", "/tmp")
self.assertTrue((self.root / "tmp").is_dir())
self.assertTrue((self.root / "tmp/foo").is_dir())
self.assertTrue((self.root / "tmp/foo/bar").is_dir())
+ self.assertTrue((self.root / "tmp/foo/baz").is_dir())
self.assertEqual((self.root / "tmp/foo/test").read_text(), "test")
self.assertEqual((self.root / "tmp/foo/bar/foobar").read_text(), "foobar")
@@ -4595,16 +4597,14 @@ class TestFilesystem(unittest.TestCase, _TestingPebbleClientMixin):
def test_pull_path(self):
(self.root / "foo").mkdir()
(self.root / "foo/bar").write_text("bar")
- # TODO: pull_path doesn't pull empty directories
- # https://github.com/canonical/operator/issues/968
- # (self.root / "foobar").mkdir()
+ (self.root / "foobar").mkdir()
(self.root / "test").write_text("test")
with tempfile.TemporaryDirectory() as temp:
tempdir = pathlib.Path(temp)
self.container.pull_path("/", tempdir)
self.assertTrue((tempdir / "foo").is_dir())
self.assertEqual((tempdir / "foo/bar").read_text(), "bar")
- # self.assertTrue((tempdir / "foobar").is_dir())
+ self.assertTrue((tempdir / "foobar").is_dir())
self.assertEqual((tempdir / "test").read_text(), "test")
def test_list_files(self):
| pull_path doesn't retrieve empty directories
Per [this comment](https://github.com/canonical/operator/pull/960#issuecomment-1628001631), `Container.pull_path` doesn't retrieve empty directories. This seems like a bug, not a feature (Git's behaviour notwithstanding). From a quick look at the code, I think `push_path` will have a similar problem.
We probably need to change the `_list_recursive` helper to also yield directories (before recursing), and the callers test if it's a directory and create it, otherwise copy the file. | 0.0 | c22c807440f2dfc449e99dd29b6f4101b8a0ffd6 | [
"test/test_model.py::test_recursive_list[case0]",
"test/test_model.py::test_recursive_list[case1]",
"test/test_model.py::test_recursive_push_and_pull[case9]",
"test/test_testing.py::TestFilesystem::test_pull_path",
"test/test_testing.py::TestFilesystem::test_push_path"
]
| [
"test/test_model.py::TestModel::test_active_message_default",
"test/test_model.py::TestModel::test_app_immutable",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_leader",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_minion",
"test/test_model.py::TestModel::test_base_status_instance_raises",
"test/test_model.py::TestModel::test_config",
"test/test_model.py::TestModel::test_config_immutable",
"test/test_model.py::TestModel::test_get_app_relation_data",
"test/test_model.py::TestModel::test_get_relation",
"test/test_model.py::TestModel::test_invalid_type_relation_data",
"test/test_model.py::TestModel::test_is_leader",
"test/test_model.py::TestModel::test_local_set_valid_app_status",
"test/test_model.py::TestModel::test_local_set_valid_unit_status",
"test/test_model.py::TestModel::test_model_attributes",
"test/test_model.py::TestModel::test_model_name_from_backend",
"test/test_model.py::TestModel::test_our_unit_is_our",
"test/test_model.py::TestModel::test_peer_relation_app",
"test/test_model.py::TestModel::test_pod_immutable",
"test/test_model.py::TestModel::test_pod_spec",
"test/test_model.py::TestModel::test_relation_data_access_peer_leader",
"test/test_model.py::TestModel::test_relation_data_access_peer_minion",
"test/test_model.py::TestModel::test_relation_data_del_key",
"test/test_model.py::TestModel::test_relation_data_del_missing_key",
"test/test_model.py::TestModel::test_relation_data_modify_our",
"test/test_model.py::TestModel::test_relation_data_modify_remote",
"test/test_model.py::TestModel::test_relation_data_type_check",
"test/test_model.py::TestModel::test_relation_local_app_data_readability_follower",
"test/test_model.py::TestModel::test_relation_local_app_data_readability_leader",
"test/test_model.py::TestModel::test_relation_no_units",
"test/test_model.py::TestModel::test_relation_set_fail",
"test/test_model.py::TestModel::test_relations_immutable",
"test/test_model.py::TestModel::test_relations_keys",
"test/test_model.py::TestModel::test_remote_app_relation_data",
"test/test_model.py::TestModel::test_remote_app_status",
"test/test_model.py::TestModel::test_remote_unit_status",
"test/test_model.py::TestModel::test_remote_units_is_our",
"test/test_model.py::TestModel::test_resources",
"test/test_model.py::TestModel::test_resources_immutable",
"test/test_model.py::TestModel::test_run_error",
"test/test_model.py::TestModel::test_set_app_status_invalid",
"test/test_model.py::TestModel::test_set_app_status_non_leader_raises",
"test/test_model.py::TestModel::test_set_unit_status_invalid",
"test/test_model.py::TestModel::test_status_eq",
"test/test_model.py::TestModel::test_status_repr",
"test/test_model.py::TestModel::test_storage",
"test/test_model.py::TestModel::test_storages_immutable",
"test/test_model.py::TestModel::test_unit_immutable",
"test/test_model.py::TestModel::test_unit_relation_data",
"test/test_model.py::TestModel::test_workload_version",
"test/test_model.py::TestModel::test_workload_version_invalid",
"test/test_model.py::test_recursive_list[case2]",
"test/test_model.py::test_recursive_push_and_pull[case0]",
"test/test_model.py::test_recursive_push_and_pull[case1]",
"test/test_model.py::test_recursive_push_and_pull[case2]",
"test/test_model.py::test_recursive_push_and_pull[case3]",
"test/test_model.py::test_recursive_push_and_pull[case4]",
"test/test_model.py::test_recursive_push_and_pull[case5]",
"test/test_model.py::test_recursive_push_and_pull[case6]",
"test/test_model.py::test_recursive_push_and_pull[case7]",
"test/test_model.py::test_recursive_push_and_pull[case8]",
"test/test_model.py::test_push_path_relative[case0]",
"test/test_model.py::test_push_path_relative[case1]",
"test/test_model.py::test_push_path_relative[case2]",
"test/test_model.py::TestApplication::test_mocked_get_services",
"test/test_model.py::TestApplication::test_planned_units",
"test/test_model.py::TestApplication::test_planned_units_garbage_values",
"test/test_model.py::TestApplication::test_planned_units_override",
"test/test_model.py::TestApplication::test_planned_units_user_set",
"test/test_model.py::TestContainers::test_unit_containers",
"test/test_model.py::TestContainers::test_unit_get_container",
"test/test_model.py::TestContainerPebble::test_add_layer",
"test/test_model.py::TestContainerPebble::test_autostart",
"test/test_model.py::TestContainerPebble::test_can_connect",
"test/test_model.py::TestContainerPebble::test_can_connect_api_error",
"test/test_model.py::TestContainerPebble::test_can_connect_connection_error",
"test/test_model.py::TestContainerPebble::test_can_connect_file_not_found_error",
"test/test_model.py::TestContainerPebble::test_can_connect_simple",
"test/test_model.py::TestContainerPebble::test_get_check",
"test/test_model.py::TestContainerPebble::test_get_checks",
"test/test_model.py::TestContainerPebble::test_get_plan",
"test/test_model.py::TestContainerPebble::test_get_service",
"test/test_model.py::TestContainerPebble::test_get_services",
"test/test_model.py::TestContainerPebble::test_list_files",
"test/test_model.py::TestContainerPebble::test_make_dir",
"test/test_model.py::TestContainerPebble::test_pull",
"test/test_model.py::TestContainerPebble::test_push",
"test/test_model.py::TestContainerPebble::test_remove_path",
"test/test_model.py::TestContainerPebble::test_replan",
"test/test_model.py::TestContainerPebble::test_restart",
"test/test_model.py::TestContainerPebble::test_restart_fallback",
"test/test_model.py::TestContainerPebble::test_restart_fallback_non_400_error",
"test/test_model.py::TestContainerPebble::test_restart_no_arguments",
"test/test_model.py::TestContainerPebble::test_send_signal",
"test/test_model.py::TestContainerPebble::test_socket_path",
"test/test_model.py::TestContainerPebble::test_start",
"test/test_model.py::TestContainerPebble::test_start_no_arguments",
"test/test_model.py::TestContainerPebble::test_stop",
"test/test_model.py::TestContainerPebble::test_stop_no_arguments",
"test/test_model.py::TestContainerPebble::test_type_errors",
"test/test_model.py::TestModelBindings::test_binding_by_relation",
"test/test_model.py::TestModelBindings::test_binding_by_relation_name",
"test/test_model.py::TestModelBindings::test_binding_no_iface_name",
"test/test_model.py::TestModelBindings::test_dead_relations",
"test/test_model.py::TestModelBindings::test_empty_bind_addresses",
"test/test_model.py::TestModelBindings::test_empty_interface_info",
"test/test_model.py::TestModelBindings::test_invalid_keys",
"test/test_model.py::TestModelBindings::test_missing_bind_addresses",
"test/test_model.py::TestModelBindings::test_missing_egress_subnets",
"test/test_model.py::TestModelBindings::test_missing_ingress_addresses",
"test/test_model.py::TestModelBindings::test_no_bind_addresses",
"test/test_model.py::TestModelBindings::test_unresolved_ingress_addresses",
"test/test_model.py::TestModelBackend::test_action_fail",
"test/test_model.py::TestModelBackend::test_action_get",
"test/test_model.py::TestModelBackend::test_action_get_error",
"test/test_model.py::TestModelBackend::test_action_log",
"test/test_model.py::TestModelBackend::test_action_log_error",
"test/test_model.py::TestModelBackend::test_action_set",
"test/test_model.py::TestModelBackend::test_action_set_dotted_dict",
"test/test_model.py::TestModelBackend::test_action_set_duplicated_keys",
"test/test_model.py::TestModelBackend::test_action_set_error",
"test/test_model.py::TestModelBackend::test_action_set_key_validation",
"test/test_model.py::TestModelBackend::test_action_set_more_nested",
"test/test_model.py::TestModelBackend::test_action_set_nested",
"test/test_model.py::TestModelBackend::test_application_version_set",
"test/test_model.py::TestModelBackend::test_application_version_set_invalid",
"test/test_model.py::TestModelBackend::test_invalid_metric_label_values",
"test/test_model.py::TestModelBackend::test_invalid_metric_labels",
"test/test_model.py::TestModelBackend::test_invalid_metric_names",
"test/test_model.py::TestModelBackend::test_invalid_metric_values",
"test/test_model.py::TestModelBackend::test_is_leader_refresh",
"test/test_model.py::TestModelBackend::test_juju_log",
"test/test_model.py::TestModelBackend::test_local_get_status",
"test/test_model.py::TestModelBackend::test_local_set_invalid_status",
"test/test_model.py::TestModelBackend::test_network_get",
"test/test_model.py::TestModelBackend::test_network_get_errors",
"test/test_model.py::TestModelBackend::test_planned_units",
"test/test_model.py::TestModelBackend::test_relation_get_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_get_set_is_app_arg",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_env",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_errors",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_success",
"test/test_model.py::TestModelBackend::test_relation_set_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_tool_errors",
"test/test_model.py::TestModelBackend::test_status_get",
"test/test_model.py::TestModelBackend::test_status_is_app_forced_kwargs",
"test/test_model.py::TestModelBackend::test_status_set_is_app_not_bool_raises",
"test/test_model.py::TestModelBackend::test_storage_tool_errors",
"test/test_model.py::TestModelBackend::test_valid_metrics",
"test/test_model.py::TestLazyMapping::test_invalidate",
"test/test_model.py::TestSecrets::test_add_secret_errors",
"test/test_model.py::TestSecrets::test_app_add_secret_args",
"test/test_model.py::TestSecrets::test_app_add_secret_simple",
"test/test_model.py::TestSecrets::test_get_secret_id",
"test/test_model.py::TestSecrets::test_get_secret_id_and_label",
"test/test_model.py::TestSecrets::test_get_secret_label",
"test/test_model.py::TestSecrets::test_get_secret_no_args",
"test/test_model.py::TestSecrets::test_get_secret_not_found",
"test/test_model.py::TestSecrets::test_get_secret_other_error",
"test/test_model.py::TestSecrets::test_secret_unique_identifier",
"test/test_model.py::TestSecrets::test_unit_add_secret_args",
"test/test_model.py::TestSecrets::test_unit_add_secret_errors",
"test/test_model.py::TestSecrets::test_unit_add_secret_simple",
"test/test_model.py::TestSecretInfo::test_from_dict",
"test/test_model.py::TestSecretInfo::test_init",
"test/test_model.py::TestSecretClass::test_get_content_cached",
"test/test_model.py::TestSecretClass::test_get_content_copies_dict",
"test/test_model.py::TestSecretClass::test_get_content_refresh",
"test/test_model.py::TestSecretClass::test_get_content_uncached",
"test/test_model.py::TestSecretClass::test_get_info",
"test/test_model.py::TestSecretClass::test_grant",
"test/test_model.py::TestSecretClass::test_id_and_label",
"test/test_model.py::TestSecretClass::test_peek_content",
"test/test_model.py::TestSecretClass::test_remove_all_revisions",
"test/test_model.py::TestSecretClass::test_remove_revision",
"test/test_model.py::TestSecretClass::test_revoke",
"test/test_model.py::TestSecretClass::test_set_content",
"test/test_model.py::TestSecretClass::test_set_content_invalidates_cache",
"test/test_model.py::TestSecretClass::test_set_info",
"test/test_model.py::TestPorts::test_close_port",
"test/test_model.py::TestPorts::test_close_port_error",
"test/test_model.py::TestPorts::test_open_port",
"test/test_model.py::TestPorts::test_open_port_error",
"test/test_model.py::TestPorts::test_opened_ports",
"test/test_model.py::TestPorts::test_opened_ports_warnings",
"test/test_model.py::TestPorts::test_set_ports_all_open",
"test/test_model.py::TestPorts::test_set_ports_close_all",
"test/test_model.py::TestPorts::test_set_ports_mixed",
"test/test_model.py::TestPorts::test_set_ports_noop",
"test/test_model.py::TestPorts::test_set_ports_replace",
"test/test_testing.py::TestHarness::test_actions_from_directory",
"test/test_testing.py::TestHarness::test_actions_from_directory_charmcraft_yaml",
"test/test_testing.py::TestHarness::test_actions_passed_in",
"test/test_testing.py::TestHarness::test_add_oci_resource_custom",
"test/test_testing.py::TestHarness::test_add_oci_resource_no_image",
"test/test_testing.py::TestHarness::test_add_peer_relation_with_initial_data_leader",
"test/test_testing.py::TestHarness::test_add_relation",
"test/test_testing.py::TestHarness::test_add_relation_and_unit",
"test/test_testing.py::TestHarness::test_add_relation_with_app_data",
"test/test_testing.py::TestHarness::test_add_relation_with_our_initial_data",
"test/test_testing.py::TestHarness::test_add_relation_with_remote_app_data",
"test/test_testing.py::TestHarness::test_add_relation_with_unit_data",
"test/test_testing.py::TestHarness::test_add_resource_but_oci",
"test/test_testing.py::TestHarness::test_add_resource_bytes",
"test/test_testing.py::TestHarness::test_add_resource_string",
"test/test_testing.py::TestHarness::test_add_resource_unknown",
"test/test_testing.py::TestHarness::test_add_resource_unknown_filename",
"test/test_testing.py::TestHarness::test_add_storage_after_harness_begin",
"test/test_testing.py::TestHarness::test_add_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_add_storage_not_attached_default",
"test/test_testing.py::TestHarness::test_add_storage_then_harness_begin",
"test/test_testing.py::TestHarness::test_add_storage_without_metadata_key_fails",
"test/test_testing.py::TestHarness::test_app_status",
"test/test_testing.py::TestHarness::test_attach_storage",
"test/test_testing.py::TestHarness::test_attach_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_bad_config_option_type",
"test/test_testing.py::TestHarness::test_begin_twice",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_install_sets_status",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_multiple_relation_same_endpoint",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_no_relations",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_no_relations_not_leader",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_peer_relation_pre_defined",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_relation_charm_with_no_relation",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_unknown_status",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_application_data",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_multiple_units",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_one_relation",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_peer_relation",
"test/test_testing.py::TestHarness::test_can_connect_begin_with_initial_hooks",
"test/test_testing.py::TestHarness::test_can_connect_default",
"test/test_testing.py::TestHarness::test_config_from_directory",
"test/test_testing.py::TestHarness::test_config_from_directory_charmcraft_yaml",
"test/test_testing.py::TestHarness::test_container_isdir_and_exists",
"test/test_testing.py::TestHarness::test_container_pebble_ready",
"test/test_testing.py::TestHarness::test_create_harness_twice",
"test/test_testing.py::TestHarness::test_detach_storage",
"test/test_testing.py::TestHarness::test_detach_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_empty_config_raises",
"test/test_testing.py::TestHarness::test_evaluate_status",
"test/test_testing.py::TestHarness::test_event_context",
"test/test_testing.py::TestHarness::test_event_context_inverse",
"test/test_testing.py::TestHarness::test_get_backend_calls",
"test/test_testing.py::TestHarness::test_get_backend_calls_with_kwargs",
"test/test_testing.py::TestHarness::test_get_filesystem_root",
"test/test_testing.py::TestHarness::test_get_pebble_container_plan",
"test/test_testing.py::TestHarness::test_get_pebble_container_plan_unknown",
"test/test_testing.py::TestHarness::test_get_pod_spec",
"test/test_testing.py::TestHarness::test_get_relation_data",
"test/test_testing.py::TestHarness::test_harness_leader_misconfig",
"test/test_testing.py::TestHarness::test_hooks_disabled_contextmanager",
"test/test_testing.py::TestHarness::test_hooks_disabled_nested_contextmanager",
"test/test_testing.py::TestHarness::test_hooks_disabled_noop",
"test/test_testing.py::TestHarness::test_hooks_enabled_and_disabled",
"test/test_testing.py::TestHarness::test_metadata_from_directory",
"test/test_testing.py::TestHarness::test_metadata_from_directory_charmcraft_yaml",
"test/test_testing.py::TestHarness::test_no_config_option_type",
"test/test_testing.py::TestHarness::test_no_event_on_empty_update_relation_unit_app",
"test/test_testing.py::TestHarness::test_no_event_on_empty_update_relation_unit_bag",
"test/test_testing.py::TestHarness::test_no_event_on_no_diff_update_relation_unit_app",
"test/test_testing.py::TestHarness::test_no_event_on_no_diff_update_relation_unit_bag",
"test/test_testing.py::TestHarness::test_populate_oci_resources",
"test/test_testing.py::TestHarness::test_relation_events",
"test/test_testing.py::TestHarness::test_relation_get_when_broken",
"test/test_testing.py::TestHarness::test_relation_set_app_not_leader",
"test/test_testing.py::TestHarness::test_relation_set_deletes",
"test/test_testing.py::TestHarness::test_relation_set_nonstring",
"test/test_testing.py::TestHarness::test_remove_detached_storage",
"test/test_testing.py::TestHarness::test_remove_relation",
"test/test_testing.py::TestHarness::test_remove_relation_unit",
"test/test_testing.py::TestHarness::test_remove_specific_relation_id",
"test/test_testing.py::TestHarness::test_remove_storage_after_harness_begin",
"test/test_testing.py::TestHarness::test_remove_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_remove_storage_without_metadata_key_fails",
"test/test_testing.py::TestHarness::test_removing_invalid_relation_id_raises_exception",
"test/test_testing.py::TestHarness::test_removing_relation_refreshes_charm_model",
"test/test_testing.py::TestHarness::test_removing_relation_removes_remote_app_data",
"test/test_testing.py::TestHarness::test_removing_relation_unit_does_not_remove_other_unit_and_data",
"test/test_testing.py::TestHarness::test_removing_relation_unit_removes_data_also",
"test/test_testing.py::TestHarness::test_resource_folder_cleanup",
"test/test_testing.py::TestHarness::test_set_leader",
"test/test_testing.py::TestHarness::test_set_model_info_after_begin",
"test/test_testing.py::TestHarness::test_set_model_name",
"test/test_testing.py::TestHarness::test_set_model_name_after_begin",
"test/test_testing.py::TestHarness::test_set_model_uuid_after_begin",
"test/test_testing.py::TestHarness::test_set_workload_version",
"test/test_testing.py::TestHarness::test_storage_with_hyphens_works",
"test/test_testing.py::TestHarness::test_uncastable_config_option_type",
"test/test_testing.py::TestHarness::test_unit_status",
"test/test_testing.py::TestHarness::test_update_config",
"test/test_testing.py::TestHarness::test_update_config_bad_type",
"test/test_testing.py::TestHarness::test_update_config_undefined_option",
"test/test_testing.py::TestHarness::test_update_config_unset_boolean",
"test/test_testing.py::TestHarness::test_update_peer_relation_app_data",
"test/test_testing.py::TestHarness::test_update_peer_relation_no_local_unit_change_event",
"test/test_testing.py::TestHarness::test_update_relation_exposes_new_data",
"test/test_testing.py::TestHarness::test_update_relation_no_local_app_change_event",
"test/test_testing.py::TestHarness::test_update_relation_no_local_unit_change_event",
"test/test_testing.py::TestHarness::test_update_relation_remove_data",
"test/test_testing.py::TestNetwork::test_add_network_all_args",
"test/test_testing.py::TestNetwork::test_add_network_default_fallback",
"test/test_testing.py::TestNetwork::test_add_network_defaults",
"test/test_testing.py::TestNetwork::test_add_network_endpoint_and_relation_id_do_not_correspond",
"test/test_testing.py::TestNetwork::test_add_network_endpoint_fallback",
"test/test_testing.py::TestNetwork::test_add_network_endpoint_not_in_meta",
"test/test_testing.py::TestNetwork::test_add_network_ipv6",
"test/test_testing.py::TestNetwork::test_add_network_relation_id_incorrect",
"test/test_testing.py::TestNetwork::test_add_network_relation_id_set_endpoint_not_set",
"test/test_testing.py::TestNetwork::test_add_network_specific_endpoint",
"test/test_testing.py::TestNetwork::test_add_network_specific_relation",
"test/test_testing.py::TestNetwork::test_network_get_relation_not_found",
"test/test_testing.py::TestTestingModelBackend::test_conforms_to_model_backend",
"test/test_testing.py::TestTestingModelBackend::test_get_pebble_methods",
"test/test_testing.py::TestTestingModelBackend::test_lazy_resource_directory",
"test/test_testing.py::TestTestingModelBackend::test_model_uuid_is_uuid_v4",
"test/test_testing.py::TestTestingModelBackend::test_relation_get_unknown_relation_id",
"test/test_testing.py::TestTestingModelBackend::test_relation_ids_unknown_relation",
"test/test_testing.py::TestTestingModelBackend::test_relation_list_unknown_relation_id",
"test/test_testing.py::TestTestingModelBackend::test_relation_remote_app_name",
"test/test_testing.py::TestTestingModelBackend::test_resource_get_no_resource",
"test/test_testing.py::TestTestingModelBackend::test_status_set_get_app",
"test/test_testing.py::TestTestingModelBackend::test_status_set_get_unit",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_no_override",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_override_merge",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_override_replace",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_override_unknown",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_merge",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_not_combined",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_three_services",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_autostart",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_bad_request",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_none",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_not_started",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_start_stop",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_subset",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_unknown",
"test/test_testing.py::TestTestingPebbleClient::test_invalid_start_service",
"test/test_testing.py::TestTestingPebbleClient::test_methods_match_pebble_client",
"test/test_testing.py::TestTestingPebbleClient::test_mixed_start_service",
"test/test_testing.py::TestTestingPebbleClient::test_send_signal",
"test/test_testing.py::TestTestingPebbleClient::test_start_service_str",
"test/test_testing.py::TestTestingPebbleClient::test_start_started_service",
"test/test_testing.py::TestTestingPebbleClient::test_stop_service_str",
"test/test_testing.py::TestTestingPebbleClient::test_stop_services_unknown",
"test/test_testing.py::TestTestingPebbleClient::test_stop_stopped_service",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_container_storage_mounts",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_list_directory_object_itself",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_list_files_not_found_raises",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_dir_with_ownership",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_dir_with_permission_mask",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_directory",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_directory_recursively",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_directory_with_relative_path_fails",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_subdir_of_file_fails",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_pull_directory",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_pull_not_found",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_list_file",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_pull_bytes",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_pull_larger_file",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_pull_non_utf8_data",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_as_child_of_file_raises_error",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_bytes_ignore_encoding",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_bytesio_ignore_encoding",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_file_with_relative_path_fails",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_files_and_list",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_files_and_list_by_pattern",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_to_non_existent_subdir",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_with_ownership",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_with_permission_mask",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_remove_path",
"test/test_testing.py::TestFilesystem::test_list_files",
"test/test_testing.py::TestFilesystem::test_make_dir",
"test/test_testing.py::TestFilesystem::test_pull",
"test/test_testing.py::TestFilesystem::test_push",
"test/test_testing.py::TestFilesystem::test_push_create_parent",
"test/test_testing.py::TestFilesystem::test_storage_mount",
"test/test_testing.py::TestSecrets::test_add_model_secret_by_app_instance",
"test/test_testing.py::TestSecrets::test_add_model_secret_by_app_name_str",
"test/test_testing.py::TestSecrets::test_add_model_secret_by_unit_instance",
"test/test_testing.py::TestSecrets::test_add_model_secret_invalid_content",
"test/test_testing.py::TestSecrets::test_get_secret_grants",
"test/test_testing.py::TestSecrets::test_grant_secret_and_revoke_secret",
"test/test_testing.py::TestSecrets::test_grant_secret_no_relation",
"test/test_testing.py::TestSecrets::test_grant_secret_wrong_app",
"test/test_testing.py::TestSecrets::test_grant_secret_wrong_unit",
"test/test_testing.py::TestSecrets::test_set_secret_content",
"test/test_testing.py::TestSecrets::test_set_secret_content_invalid_content",
"test/test_testing.py::TestSecrets::test_set_secret_content_invalid_secret_id",
"test/test_testing.py::TestSecrets::test_set_secret_content_wrong_owner",
"test/test_testing.py::TestSecrets::test_trigger_secret_expiration",
"test/test_testing.py::TestSecrets::test_trigger_secret_removal",
"test/test_testing.py::TestSecrets::test_trigger_secret_rotation",
"test/test_testing.py::TestPorts::test_errors",
"test/test_testing.py::TestPorts::test_ports",
"test/test_testing.py::TestHandleExec::test_combined_error",
"test/test_testing.py::TestHandleExec::test_exec_service_context",
"test/test_testing.py::TestHandleExec::test_exec_stdin",
"test/test_testing.py::TestHandleExec::test_exec_stdout_stderr",
"test/test_testing.py::TestHandleExec::test_exec_timeout",
"test/test_testing.py::TestHandleExec::test_re_register_handler",
"test/test_testing.py::TestHandleExec::test_register_handler",
"test/test_testing.py::TestHandleExec::test_register_match_all_prefix",
"test/test_testing.py::TestHandleExec::test_register_with_handler",
"test/test_testing.py::TestHandleExec::test_register_with_result"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2023-10-02 20:27:08+00:00 | apache-2.0 | 1,477 |
|
canonical__operator-1049 | diff --git a/ops/pebble.py b/ops/pebble.py
index 3654185..80ce8f6 100644
--- a/ops/pebble.py
+++ b/ops/pebble.py
@@ -1623,6 +1623,10 @@ class Client:
message = f'{type(e2).__name__} - {e2}'
raise APIError(body, code, status, message)
except urllib.error.URLError as e:
+ if e.args and isinstance(e.args[0], FileNotFoundError):
+ raise ConnectionError(
+ f"Could not connect to Pebble: socket not found at {self.socket_path!r} "
+ "(container restarted?)") from None
raise ConnectionError(e.reason)
return response
| canonical/operator | 959009081a4c6a1b0628b98d758796d47a64cd55 | diff --git a/test/test_pebble.py b/test/test_pebble.py
index 47e7388..af4937d 100644
--- a/test/test_pebble.py
+++ b/test/test_pebble.py
@@ -2579,6 +2579,7 @@ class TestSocketClient(unittest.TestCase):
with self.assertRaises(pebble.ConnectionError) as cm:
client.get_system_info()
self.assertIsInstance(cm.exception, pebble.Error)
+ self.assertIn("Could not connect to Pebble", str(cm.exception))
def test_real_client(self):
shutdown, socket_path = fake_pebble.start_server()
| Better Error Handling if the pebble socket goes missing
On a Microk8s cloud there were some issues causing containers to get restarted. Because containers were getting rescheduled, it ended up that during the time that a charm hook was firing, it ultimately failed to stay connected to pebble. The error message that you end up getting is "NoSuchFile" which is hard to interpret as "pebble went away".
The specific traceback was:
```
Traceback (most recent call last):
File "/var/lib/juju/agents/unit-postgresql-k8s-0/charm/venv/ops/pebble.py", line 1331, in _request_raw
response = self.opener.open(request, timeout=self.timeout)
File "/usr/lib/python3.8/urllib/request.py", line 525, in open
response = self._open(req, data)
File "/usr/lib/python3.8/urllib/request.py", line 542, in _open
result = self._call_chain(self.handle_open, protocol, protocol +
File "/usr/lib/python3.8/urllib/request.py", line 502, in _call_chain
result = func(*args)
File "/var/lib/juju/agents/unit-postgresql-k8s-0/charm/venv/ops/pebble.py", line 84, in http_open
return self.do_open(_UnixSocketConnection, req, socket_path=self.socket_path)
File "/usr/lib/python3.8/urllib/request.py", line 1357, in do_open
raise URLError(err)
urllib.error.URLError: <urlopen error [Errno 2] No such file or directory>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "./src/charm.py", line 777, in <module>
main(PostgresqlOperatorCharm, use_juju_for_storage=True)
File "/var/lib/juju/agents/unit-postgresql-k8s-0/charm/venv/ops/main.py", line 429, in main
framework.reemit()
File "/var/lib/juju/agents/unit-postgresql-k8s-0/charm/venv/ops/framework.py", line 794, in reemit
self._reemit()
File "/var/lib/juju/agents/unit-postgresql-k8s-0/charm/venv/ops/framework.py", line 857, in _reemit
custom_handler(event)
File "./src/charm.py", line 381, in _on_postgresql_pebble_ready
current_layer = container.get_plan()
File "/var/lib/juju/agents/unit-postgresql-k8s-0/charm/venv/ops/model.py", line 1367, in get_plan
return self._pebble.get_plan()
File "/var/lib/juju/agents/unit-postgresql-k8s-0/charm/venv/ops/pebble.py", line 1615, in get_plan
resp = self._request('GET', '/v1/plan', {'format': 'yaml'})
File "/var/lib/juju/agents/unit-postgresql-k8s-0/charm/venv/ops/pebble.py", line 1297, in _request
response = self._request_raw(method, path, query, headers, data)
File "/var/lib/juju/agents/unit-postgresql-k8s-0/charm/venv/ops/pebble.py", line 1344, in _request_raw
raise ConnectionError(e.reason)
ops.pebble.ConnectionError: [Errno 2] No such file or directory
```
There is a 'pebble.ConnectionError' in there, but the feedback from others is that it really wasn't clear why this was failing.
It would be nice if we could more clearly articulate this as something like "could not connect to pebble, socket not found at /...". Or even better if we could tell that we *did* have a connection earlier in the process, and could report that we *lost* the connection to pebble.
| 0.0 | 959009081a4c6a1b0628b98d758796d47a64cd55 | [
"test/test_pebble.py::TestSocketClient::test_socket_not_found"
]
| [
"test/test_pebble.py::TestTypes::test_api_error",
"test/test_pebble.py::TestTypes::test_change_error",
"test/test_pebble.py::TestTypes::test_change_error_with_task_logs",
"test/test_pebble.py::TestTypes::test_change_from_dict",
"test/test_pebble.py::TestTypes::test_change_id",
"test/test_pebble.py::TestTypes::test_change_init",
"test/test_pebble.py::TestTypes::test_change_state",
"test/test_pebble.py::TestTypes::test_connection_error",
"test/test_pebble.py::TestTypes::test_error",
"test/test_pebble.py::TestTypes::test_file_info_from_dict",
"test/test_pebble.py::TestTypes::test_file_info_init",
"test/test_pebble.py::TestTypes::test_file_type",
"test/test_pebble.py::TestTypes::test_path_error",
"test/test_pebble.py::TestTypes::test_protocol_error",
"test/test_pebble.py::TestTypes::test_system_info_from_dict",
"test/test_pebble.py::TestTypes::test_system_info_init",
"test/test_pebble.py::TestTypes::test_task_from_dict",
"test/test_pebble.py::TestTypes::test_task_id",
"test/test_pebble.py::TestTypes::test_task_init",
"test/test_pebble.py::TestTypes::test_task_progress_from_dict",
"test/test_pebble.py::TestTypes::test_task_progress_init",
"test/test_pebble.py::TestTypes::test_timeout_error",
"test/test_pebble.py::TestTypes::test_warning_from_dict",
"test/test_pebble.py::TestTypes::test_warning_init",
"test/test_pebble.py::TestTypes::test_warning_state",
"test/test_pebble.py::TestPlan::test_checks",
"test/test_pebble.py::TestPlan::test_no_args",
"test/test_pebble.py::TestPlan::test_service_equality",
"test/test_pebble.py::TestPlan::test_services",
"test/test_pebble.py::TestPlan::test_yaml",
"test/test_pebble.py::TestLayer::test_dict",
"test/test_pebble.py::TestLayer::test_layer_equality",
"test/test_pebble.py::TestLayer::test_layer_service_equality",
"test/test_pebble.py::TestLayer::test_no_args",
"test/test_pebble.py::TestLayer::test_yaml",
"test/test_pebble.py::TestService::test_dict",
"test/test_pebble.py::TestService::test_equality",
"test/test_pebble.py::TestService::test_name_only",
"test/test_pebble.py::TestCheck::test_dict",
"test/test_pebble.py::TestCheck::test_equality",
"test/test_pebble.py::TestCheck::test_level_raw",
"test/test_pebble.py::TestCheck::test_name_only",
"test/test_pebble.py::TestServiceInfo::test_is_running",
"test/test_pebble.py::TestServiceInfo::test_service_info",
"test/test_pebble.py::TestServiceInfo::test_service_startup",
"test/test_pebble.py::TestServiceInfo::test_service_status",
"test/test_pebble.py::TestCheckInfo::test_check_info",
"test/test_pebble.py::TestCheckInfo::test_check_level",
"test/test_pebble.py::TestCheckInfo::test_check_status",
"test/test_pebble.py::TestMultipartParser::test_multipart_parser",
"test/test_pebble.py::TestClient::test_abort_change",
"test/test_pebble.py::TestClient::test_ack_warnings",
"test/test_pebble.py::TestClient::test_add_layer",
"test/test_pebble.py::TestClient::test_add_layer_invalid_type",
"test/test_pebble.py::TestClient::test_autostart_services",
"test/test_pebble.py::TestClient::test_autostart_services_async",
"test/test_pebble.py::TestClient::test_change_error",
"test/test_pebble.py::TestClient::test_checklevel_conversion",
"test/test_pebble.py::TestClient::test_client_init",
"test/test_pebble.py::TestClient::test_get_change",
"test/test_pebble.py::TestClient::test_get_change_str",
"test/test_pebble.py::TestClient::test_get_changes",
"test/test_pebble.py::TestClient::test_get_checks_all",
"test/test_pebble.py::TestClient::test_get_checks_filters",
"test/test_pebble.py::TestClient::test_get_plan",
"test/test_pebble.py::TestClient::test_get_services_all",
"test/test_pebble.py::TestClient::test_get_services_names",
"test/test_pebble.py::TestClient::test_get_system_info",
"test/test_pebble.py::TestClient::test_get_warnings",
"test/test_pebble.py::TestClient::test_list_files_itself",
"test/test_pebble.py::TestClient::test_list_files_path",
"test/test_pebble.py::TestClient::test_list_files_pattern",
"test/test_pebble.py::TestClient::test_make_dir_all_options",
"test/test_pebble.py::TestClient::test_make_dir_basic",
"test/test_pebble.py::TestClient::test_make_dir_error",
"test/test_pebble.py::TestClient::test_pull_binary",
"test/test_pebble.py::TestClient::test_pull_boundary_spanning_chunk",
"test/test_pebble.py::TestClient::test_pull_path_error",
"test/test_pebble.py::TestClient::test_pull_protocol_errors",
"test/test_pebble.py::TestClient::test_pull_text",
"test/test_pebble.py::TestClient::test_push_all_options",
"test/test_pebble.py::TestClient::test_push_binary",
"test/test_pebble.py::TestClient::test_push_bytes",
"test/test_pebble.py::TestClient::test_push_path_error",
"test/test_pebble.py::TestClient::test_push_str",
"test/test_pebble.py::TestClient::test_push_text",
"test/test_pebble.py::TestClient::test_push_uid_gid",
"test/test_pebble.py::TestClient::test_remove_path_basic",
"test/test_pebble.py::TestClient::test_remove_path_error",
"test/test_pebble.py::TestClient::test_remove_path_recursive",
"test/test_pebble.py::TestClient::test_replan_services",
"test/test_pebble.py::TestClient::test_replan_services_async",
"test/test_pebble.py::TestClient::test_restart_services",
"test/test_pebble.py::TestClient::test_restart_services_async",
"test/test_pebble.py::TestClient::test_send_signal_name",
"test/test_pebble.py::TestClient::test_send_signal_number",
"test/test_pebble.py::TestClient::test_send_signal_type_error",
"test/test_pebble.py::TestClient::test_start_services",
"test/test_pebble.py::TestClient::test_start_services_async",
"test/test_pebble.py::TestClient::test_stop_services",
"test/test_pebble.py::TestClient::test_stop_services_async",
"test/test_pebble.py::TestClient::test_wait_change_error",
"test/test_pebble.py::TestClient::test_wait_change_success",
"test/test_pebble.py::TestClient::test_wait_change_success_multiple_calls",
"test/test_pebble.py::TestClient::test_wait_change_success_polled",
"test/test_pebble.py::TestClient::test_wait_change_success_polled_timeout_none",
"test/test_pebble.py::TestClient::test_wait_change_success_timeout_none",
"test/test_pebble.py::TestClient::test_wait_change_timeout",
"test/test_pebble.py::TestClient::test_wait_change_timeout_polled",
"test/test_pebble.py::TestSocketClient::test_real_client",
"test/test_pebble.py::TestExecError::test_init",
"test/test_pebble.py::TestExecError::test_str",
"test/test_pebble.py::TestExecError::test_str_truncated",
"test/test_pebble.py::TestExec::test_arg_errors",
"test/test_pebble.py::TestExec::test_connect_websocket_error",
"test/test_pebble.py::TestExec::test_no_wait_call",
"test/test_pebble.py::TestExec::test_send_signal",
"test/test_pebble.py::TestExec::test_wait_change_error",
"test/test_pebble.py::TestExec::test_wait_exit_nonzero",
"test/test_pebble.py::TestExec::test_wait_exit_zero",
"test/test_pebble.py::TestExec::test_wait_file_io",
"test/test_pebble.py::TestExec::test_wait_other_args",
"test/test_pebble.py::TestExec::test_wait_output",
"test/test_pebble.py::TestExec::test_wait_output_bad_command",
"test/test_pebble.py::TestExec::test_wait_output_bytes",
"test/test_pebble.py::TestExec::test_wait_output_combine_stderr",
"test/test_pebble.py::TestExec::test_wait_output_exit_nonzero",
"test/test_pebble.py::TestExec::test_wait_output_exit_nonzero_combine_stderr",
"test/test_pebble.py::TestExec::test_wait_output_no_stdout",
"test/test_pebble.py::TestExec::test_wait_output_send_stdin",
"test/test_pebble.py::TestExec::test_wait_output_send_stdin_bytes",
"test/test_pebble.py::TestExec::test_wait_passed_output",
"test/test_pebble.py::TestExec::test_wait_passed_output_bad_command",
"test/test_pebble.py::TestExec::test_wait_passed_output_bytes",
"test/test_pebble.py::TestExec::test_wait_passed_output_combine_stderr",
"test/test_pebble.py::TestExec::test_wait_returned_io",
"test/test_pebble.py::TestExec::test_wait_returned_io_bytes",
"test/test_pebble.py::TestExec::test_wait_timeout",
"test/test_pebble.py::TestExec::test_websocket_recv_raises",
"test/test_pebble.py::TestExec::test_websocket_send_raises"
]
| {
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2023-10-16 06:42:25+00:00 | apache-2.0 | 1,478 |
|
canonical__operator-1057 | diff --git a/ops/storage.py b/ops/storage.py
index 5dda21d..9f259f4 100644
--- a/ops/storage.py
+++ b/ops/storage.py
@@ -18,6 +18,7 @@ import os
import pickle
import shutil
import sqlite3
+import stat
import subprocess
from datetime import timedelta
from pathlib import Path
@@ -59,11 +60,29 @@ class SQLiteStorage:
# sqlite3.connect creates the file silently if it does not exist
logger.debug(f"Initializing SQLite local storage: {filename}.")
+ if filename != ":memory:":
+ self._ensure_db_permissions(str(filename))
self._db = sqlite3.connect(str(filename),
isolation_level=None,
timeout=self.DB_LOCK_TIMEOUT.total_seconds())
self._setup()
+ def _ensure_db_permissions(self, filename: str):
+ """Make sure that the DB file has appropriately secure permissions."""
+ mode = stat.S_IRUSR | stat.S_IWUSR
+ if os.path.exists(filename):
+ try:
+ os.chmod(filename, mode)
+ except OSError as e:
+ raise RuntimeError(f"Unable to adjust access permission of {filename!r}") from e
+ return
+
+ try:
+ fd = os.open(filename, os.O_CREAT | os.O_EXCL, mode=mode)
+ except OSError as e:
+ raise RuntimeError(f"Unable to adjust access permission of {filename!r}") from e
+ os.close(fd)
+
def _setup(self):
"""Make the database ready to be used as storage."""
# Make sure that the database is locked until the connection is closed,
| canonical/operator | d1941e136e7e28d6d4ce1b69e374803acb89b1b7 | diff --git a/test/test_storage.py b/test/test_storage.py
index 2d58b75..b8467c8 100644
--- a/test/test_storage.py
+++ b/test/test_storage.py
@@ -17,10 +17,12 @@ import gc
import io
import os
import pathlib
+import stat
import sys
import tempfile
import typing
import unittest
+import unittest.mock
from test.test_helpers import BaseTestCase, fake_script, fake_script_calls
from textwrap import dedent
@@ -218,6 +220,41 @@ class TestSQLiteStorage(StoragePermutations, BaseTestCase):
def create_storage(self):
return ops.storage.SQLiteStorage(':memory:')
+ def test_permissions_new(self):
+ with tempfile.TemporaryDirectory() as temp_dir:
+ filename = os.path.join(temp_dir, ".unit-state.db")
+ storage = ops.storage.SQLiteStorage(filename)
+ self.assertEqual(stat.S_IMODE(os.stat(filename).st_mode), stat.S_IRUSR | stat.S_IWUSR)
+ storage.close()
+
+ def test_permissions_existing(self):
+ with tempfile.TemporaryDirectory() as temp_dir:
+ filename = os.path.join(temp_dir, ".unit-state.db")
+ ops.storage.SQLiteStorage(filename).close()
+ # Set the file to access that will need fixing for user, group, and other.
+ os.chmod(filename, 0o744)
+ storage = ops.storage.SQLiteStorage(filename)
+ self.assertEqual(stat.S_IMODE(os.stat(filename).st_mode), stat.S_IRUSR | stat.S_IWUSR)
+ storage.close()
+
+ @unittest.mock.patch("os.path.exists")
+ def test_permissions_race(self, exists: unittest.mock.MagicMock):
+ exists.return_value = False
+ with tempfile.TemporaryDirectory() as temp_dir:
+ filename = os.path.join(temp_dir, ".unit-state.db")
+ # Create an existing file, but the mock will simulate a race condition saying that it
+ # does not exist.
+ open(filename, "w").close()
+ self.assertRaises(RuntimeError, ops.storage.SQLiteStorage, filename)
+
+ @unittest.mock.patch("os.chmod")
+ def test_permissions_failure(self, chmod: unittest.mock.MagicMock):
+ chmod.side_effect = OSError
+ with tempfile.TemporaryDirectory() as temp_dir:
+ filename = os.path.join(temp_dir, ".unit-state.db")
+ open(filename, "w").close()
+ self.assertRaises(RuntimeError, ops.storage.SQLiteStorage, filename)
+
def setup_juju_backend(test_case: unittest.TestCase, state_file: pathlib.Path):
"""Create fake scripts for pretending to be state-set and state-get."""
| .unit-state.db is world-readable
.unit-state.db created by an operator framework charm is visible by any user in the system. The permission should be tighten up.
This is one example of charms that saved a private key for a TLS certificate.
```
$ sudo -u nobody strings /var/lib/juju/agents/unit-ceph-dashboard-1/charm/.unit-state.db | grep PRIVATE
-----BEGIN RSA PRIVATE KEY-----
-----END RSA PRIVATE KEY-----
``` | 0.0 | d1941e136e7e28d6d4ce1b69e374803acb89b1b7 | [
"test/test_storage.py::TestSQLiteStorage::test_permissions_existing",
"test/test_storage.py::TestSQLiteStorage::test_permissions_failure",
"test/test_storage.py::TestSQLiteStorage::test_permissions_new",
"test/test_storage.py::TestSQLiteStorage::test_permissions_race"
]
| [
"test/test_storage.py::TestSQLiteStorage::test_all_notices",
"test/test_storage.py::TestSQLiteStorage::test_drop_snapshot",
"test/test_storage.py::TestSQLiteStorage::test_emit_event",
"test/test_storage.py::TestSQLiteStorage::test_load_notices",
"test/test_storage.py::TestSQLiteStorage::test_save_and_load_snapshot",
"test/test_storage.py::TestSQLiteStorage::test_save_and_overwrite_snapshot",
"test/test_storage.py::TestSQLiteStorage::test_save_load_drop_load_notices",
"test/test_storage.py::TestSQLiteStorage::test_save_notice",
"test/test_storage.py::TestSQLiteStorage::test_save_one_load_another_notice",
"test/test_storage.py::TestSQLiteStorage::test_save_snapshot_empty_string",
"test/test_storage.py::TestSQLiteStorage::test_save_snapshot_none",
"test/test_storage.py::TestSQLiteStorage::test_save_snapshot_zero",
"test/test_storage.py::TestJujuStorage::test_all_notices",
"test/test_storage.py::TestJujuStorage::test_drop_snapshot",
"test/test_storage.py::TestJujuStorage::test_emit_event",
"test/test_storage.py::TestJujuStorage::test_load_notices",
"test/test_storage.py::TestJujuStorage::test_save_and_load_snapshot",
"test/test_storage.py::TestJujuStorage::test_save_and_overwrite_snapshot",
"test/test_storage.py::TestJujuStorage::test_save_load_drop_load_notices",
"test/test_storage.py::TestJujuStorage::test_save_notice",
"test/test_storage.py::TestJujuStorage::test_save_one_load_another_notice",
"test/test_storage.py::TestJujuStorage::test_save_snapshot_empty_string",
"test/test_storage.py::TestJujuStorage::test_save_snapshot_none",
"test/test_storage.py::TestJujuStorage::test_save_snapshot_zero",
"test/test_storage.py::TestSimpleLoader::test_forbids_some_types",
"test/test_storage.py::TestSimpleLoader::test_handles_tuples",
"test/test_storage.py::TestSimpleLoader::test_is_c_dumper",
"test/test_storage.py::TestSimpleLoader::test_is_c_loader",
"test/test_storage.py::TestJujuStateBackend::test_get",
"test/test_storage.py::TestJujuStateBackend::test_is_available",
"test/test_storage.py::TestJujuStateBackend::test_is_not_available",
"test/test_storage.py::TestJujuStateBackend::test_set_and_get_complex_value",
"test/test_storage.py::TestJujuStateBackend::test_set_encodes_args"
]
| {
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2023-11-07 13:35:03+00:00 | apache-2.0 | 1,479 |
|
canonical__operator-1124 | diff --git a/CHANGES.md b/CHANGES.md
index c0049d3..07774d9 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,7 @@
+# 2.11.0
+
+* Added `ActionEvent.id`, exposing the JUJU_ACTION_UUID environment variable.
+
# 2.10.0
* Added support for Pebble Notices (`PebbleCustomNoticeEvent`, `get_notices`, and so on)
diff --git a/ops/charm.py b/ops/charm.py
index 4496383..cb77f59 100644
--- a/ops/charm.py
+++ b/ops/charm.py
@@ -125,9 +125,16 @@ class ActionEvent(EventBase):
:meth:`log`.
"""
+ id: str = ""
+ """The Juju ID of the action invocation."""
+
params: Dict[str, Any]
"""The parameters passed to the action."""
+ def __init__(self, handle: 'Handle', id: Optional[str] = None):
+ super().__init__(handle)
+ self.id = id # type: ignore (for backwards compatibility)
+
def defer(self) -> NoReturn:
"""Action events are not deferrable like other events.
@@ -144,10 +151,18 @@ class ActionEvent(EventBase):
Not meant to be called directly by charm code.
"""
+ self.id = cast(str, snapshot['id'])
# Params are loaded at restore rather than __init__ because
# the model is not available in __init__.
self.params = self.framework.model._backend.action_get()
+ def snapshot(self) -> Dict[str, Any]:
+ """Used by the framework to serialize the event to disk.
+
+ Not meant to be called by charm code.
+ """
+ return {'id': self.id}
+
def set_results(self, results: Dict[str, Any]):
"""Report the result of the action.
diff --git a/ops/framework.py b/ops/framework.py
index 717c74c..eb3b1c3 100644
--- a/ops/framework.py
+++ b/ops/framework.py
@@ -1057,10 +1057,10 @@ class BoundStoredState:
if TYPE_CHECKING:
# to help the type checker and IDEs:
@property
- def _data(self) -> StoredStateData: ... # noqa, type: ignore
+ def _data(self) -> StoredStateData: ... # type: ignore
@property
- def _attr_name(self) -> str: ... # noqa, type: ignore
+ def _attr_name(self) -> str: ... # type: ignore
def __init__(self, parent: Object, attr_name: str):
parent.framework.register_type(StoredStateData, parent)
diff --git a/ops/main.py b/ops/main.py
index 04c756c..6f7b893 100644
--- a/ops/main.py
+++ b/ops/main.py
@@ -191,6 +191,9 @@ def _get_event_args(charm: 'ops.charm.CharmBase',
storage = cast(Union[ops.storage.JujuStorage, ops.storage.SQLiteStorage], storage)
storage.location = storage_location # type: ignore
return [storage], {}
+ elif issubclass(event_type, ops.charm.ActionEvent):
+ args: List[Any] = [os.environ['JUJU_ACTION_UUID']]
+ return args, {}
elif issubclass(event_type, ops.charm.RelationEvent):
relation_name = os.environ['JUJU_RELATION']
relation_id = _get_juju_relation_id()
| canonical/operator | 2f304d3af048edd758bee8b4b4d59f474b33a286 | diff --git a/ops/testing.py b/ops/testing.py
index 0d782a6..5d509cb 100644
--- a/ops/testing.py
+++ b/ops/testing.py
@@ -246,6 +246,7 @@ class Harness(Generic[CharmType]):
self._unit_name: str = f"{self._meta.name}/0"
self._hooks_enabled: bool = True
self._relation_id_counter: int = 0
+ self._action_id_counter: int = 0
config_ = self._get_config(config)
self._backend = _TestingModelBackend(self._unit_name, self._meta, config_)
self._model = model.Model(self._meta, self._backend)
@@ -1883,7 +1884,8 @@ class Harness(Generic[CharmType]):
action_under_test = _RunningAction(action_name, ActionOutput([], {}), params)
handler = getattr(self.charm.on, f"{action_name.replace('-', '_')}_action")
self._backend._running_action = action_under_test
- handler.emit()
+ self._action_id_counter += 1
+ handler.emit(str(self._action_id_counter))
self._backend._running_action = None
if action_under_test.failure_message is not None:
raise ActionFailed(
diff --git a/test/test_charm.py b/test/test_charm.py
index ac14784..627d2d5 100644
--- a/test/test_charm.py
+++ b/test/test_charm.py
@@ -463,7 +463,7 @@ start:
def _on_foo_bar_action(self, event: ops.ActionEvent):
self.seen_action_params = event.params
event.log('test-log')
- event.set_results({'res': 'val with spaces'})
+ event.set_results({'res': 'val with spaces', 'id': event.id})
event.fail('test-fail')
def _on_start_action(self, event: ops.ActionEvent):
@@ -477,12 +477,13 @@ start:
self.assertIn('foo_bar_action', events)
self.assertIn('start_action', events)
- charm.on.foo_bar_action.emit()
+ action_id = "1234"
+ charm.on.foo_bar_action.emit(id=action_id)
self.assertEqual(charm.seen_action_params, {"foo-name": "name", "silent": True})
self.assertEqual(fake_script_calls(self), [
['action-get', '--format=json'],
['action-log', "test-log"],
- ['action-set', "res=val with spaces"],
+ ['action-set', "res=val with spaces", f"id={action_id}"],
['action-fail', "test-fail"],
])
@@ -511,7 +512,7 @@ start:
charm.res = bad_res
with self.assertRaises(ValueError):
- charm.on.foo_bar_action.emit()
+ charm.on.foo_bar_action.emit(id='1')
def _test_action_event_defer_fails(self, cmd_type: str):
@@ -532,7 +533,7 @@ start:
charm = MyCharm(framework)
with self.assertRaises(RuntimeError):
- charm.on.start_action.emit()
+ charm.on.start_action.emit(id='2')
def test_action_event_defer_fails(self):
self._test_action_event_defer_fails('action')
diff --git a/test/test_framework.py b/test/test_framework.py
index e7c7a20..7a2a778 100644
--- a/test/test_framework.py
+++ b/test/test_framework.py
@@ -1813,7 +1813,7 @@ class DebugHookTests(BaseTestCase):
with patch('sys.stderr', new_callable=io.StringIO):
with patch('pdb.runcall') as mock:
- publisher.foobar_action.emit()
+ publisher.foobar_action.emit(id='1')
self.assertEqual(mock.call_count, 1)
self.assertFalse(observer.called)
@@ -1833,7 +1833,7 @@ class DebugHookTests(BaseTestCase):
with patch('sys.stderr', new_callable=io.StringIO):
with patch('pdb.runcall') as mock:
- publisher.foobar_action.emit()
+ publisher.foobar_action.emit(id='2')
self.assertEqual(mock.call_count, 1)
self.assertFalse(observer.called)
diff --git a/test/test_main.py b/test/test_main.py
index 7c18495..3c9d5ec 100644
--- a/test/test_main.py
+++ b/test/test_main.py
@@ -586,11 +586,13 @@ class _TestMain(abc.ABC):
'departing_unit': 'remote/42'},
), (
EventSpec(ops.ActionEvent, 'start_action',
- env_var='JUJU_ACTION_NAME'),
+ env_var='JUJU_ACTION_NAME',
+ set_in_env={'JUJU_ACTION_UUID': '1'}),
{},
), (
EventSpec(ops.ActionEvent, 'foo_bar_action',
- env_var='JUJU_ACTION_NAME'),
+ env_var='JUJU_ACTION_NAME',
+ set_in_env={'JUJU_ACTION_UUID': '2'}),
{},
), (
EventSpec(ops.PebbleReadyEvent, 'test_pebble_ready',
@@ -726,19 +728,20 @@ class _TestMain(abc.ABC):
fake_script(typing.cast(unittest.TestCase, self), 'action-get', "echo '{}'")
test_cases = [(
- EventSpec(ops.ActionEvent, 'log_critical_action', env_var='JUJU_ACTION_NAME'),
+ EventSpec(ops.ActionEvent, 'log_critical_action', env_var='JUJU_ACTION_NAME',
+ set_in_env={'JUJU_ACTION_UUID': '1'}),
['juju-log', '--log-level', 'CRITICAL', '--', 'super critical'],
), (
EventSpec(ops.ActionEvent, 'log_error_action',
- env_var='JUJU_ACTION_NAME'),
+ env_var='JUJU_ACTION_NAME', set_in_env={'JUJU_ACTION_UUID': '2'}),
['juju-log', '--log-level', 'ERROR', '--', 'grave error'],
), (
EventSpec(ops.ActionEvent, 'log_warning_action',
- env_var='JUJU_ACTION_NAME'),
+ env_var='JUJU_ACTION_NAME', set_in_env={'JUJU_ACTION_UUID': '3'}),
['juju-log', '--log-level', 'WARNING', '--', 'wise warning'],
), (
EventSpec(ops.ActionEvent, 'log_info_action',
- env_var='JUJU_ACTION_NAME'),
+ env_var='JUJU_ACTION_NAME', set_in_env={'JUJU_ACTION_UUID': '4'}),
['juju-log', '--log-level', 'INFO', '--', 'useful info'],
)]
@@ -779,7 +782,8 @@ class _TestMain(abc.ABC):
state = self._simulate_event(EventSpec(
ops.ActionEvent, 'get_model_name_action',
env_var='JUJU_ACTION_NAME',
- model_name='test-model-name'))
+ model_name='test-model-name',
+ set_in_env={'JUJU_ACTION_UUID': '1'}))
assert isinstance(state, ops.BoundStoredState)
self.assertEqual(state._on_get_model_name_action, ['test-model-name'])
@@ -791,7 +795,8 @@ class _TestMain(abc.ABC):
"""echo '{"status": "unknown", "message": ""}'""")
state = self._simulate_event(EventSpec(
ops.ActionEvent, 'get_status_action',
- env_var='JUJU_ACTION_NAME'))
+ env_var='JUJU_ACTION_NAME',
+ set_in_env={'JUJU_ACTION_UUID': '1'}))
assert isinstance(state, ops.BoundStoredState)
self.assertEqual(state.status_name, 'unknown')
self.assertEqual(state.status_message, '')
@@ -801,7 +806,8 @@ class _TestMain(abc.ABC):
"""echo '{"status": "blocked", "message": "help meeee"}'""")
state = self._simulate_event(EventSpec(
ops.ActionEvent, 'get_status_action',
- env_var='JUJU_ACTION_NAME'))
+ env_var='JUJU_ACTION_NAME',
+ set_in_env={'JUJU_ACTION_UUID': '1'}))
assert isinstance(state, ops.BoundStoredState)
self.assertEqual(state.status_name, 'blocked')
self.assertEqual(state.status_message, 'help meeee')
@@ -1169,7 +1175,8 @@ class TestMainWithDispatch(_TestMainWithDispatch, unittest.TestCase):
with self.assertRaises(subprocess.CalledProcessError):
self._simulate_event(EventSpec(
ops.ActionEvent, 'keyerror_action',
- env_var='JUJU_ACTION_NAME'))
+ env_var='JUJU_ACTION_NAME',
+ set_in_env={'JUJU_ACTION_UUID': '1'}))
self.stderr.seek(0)
stderr = self.stderr.read()
self.assertIn('KeyError', stderr)
diff --git a/test/test_testing.py b/test/test_testing.py
index e937265..a2d0cfd 100644
--- a/test/test_testing.py
+++ b/test/test_testing.py
@@ -5445,6 +5445,7 @@ class TestActions(unittest.TestCase):
def _on_simple_action(self, event: ops.ActionEvent):
"""An action that doesn't generate logs, have any results, or fail."""
self.simple_was_called = True
+ assert isinstance(event.id, str)
def _on_fail_action(self, event: ops.ActionEvent):
event.fail("this will be ignored")
| expose JUJU_ACTION_UUID envvar
Some old hooks-based charms use the JUJU_ACTION_UUID envvar.
When porting them to use ops, it would be nice if there was an opsy way to access its value.
How about adding an `ActionEvent.uuid -> int` property to fetch it?
| 0.0 | 2f304d3af048edd758bee8b4b4d59f474b33a286 | [
"test/test_charm.py::TestCharm::test_action_event_defer_fails",
"test/test_charm.py::TestCharm::test_action_events",
"test/test_charm.py::TestCharm::test_invalid_action_results",
"test/test_framework.py::DebugHookTests::test_actions_are_interrupted",
"test/test_framework.py::DebugHookTests::test_interruption_enabled_with_all",
"test/test_testing.py::TestActions::test_additional_params",
"test/test_testing.py::TestActions::test_bad_results",
"test/test_testing.py::TestActions::test_fail_action",
"test/test_testing.py::TestActions::test_logs_and_results",
"test/test_testing.py::TestActions::test_required_param",
"test/test_testing.py::TestActions::test_run_action"
]
| [
"test/test_charm.py::TestCharm::test_add_status_type_error",
"test/test_charm.py::TestCharm::test_basic",
"test/test_charm.py::TestCharm::test_collect_app_and_unit_status",
"test/test_charm.py::TestCharm::test_collect_app_status_leader",
"test/test_charm.py::TestCharm::test_collect_app_status_no_statuses",
"test/test_charm.py::TestCharm::test_collect_app_status_non_leader",
"test/test_charm.py::TestCharm::test_collect_status_priority",
"test/test_charm.py::TestCharm::test_collect_unit_status",
"test/test_charm.py::TestCharm::test_collect_unit_status_no_statuses",
"test/test_charm.py::TestCharm::test_containers",
"test/test_charm.py::TestCharm::test_containers_storage",
"test/test_charm.py::TestCharm::test_containers_storage_multiple_mounts",
"test/test_charm.py::TestCharm::test_empty_action",
"test/test_charm.py::TestCharm::test_helper_properties",
"test/test_charm.py::TestCharm::test_observe_decorated_method",
"test/test_charm.py::TestCharm::test_relation_events",
"test/test_charm.py::TestCharm::test_relations_meta",
"test/test_charm.py::TestCharm::test_relations_meta_limit_type_validation",
"test/test_charm.py::TestCharm::test_relations_meta_scope_type_validation",
"test/test_charm.py::TestCharm::test_secret_events",
"test/test_charm.py::TestCharm::test_storage_events",
"test/test_charm.py::TestCharm::test_workload_events",
"test/test_charm.py::TestCharmMeta::test_assumes",
"test/test_charm.py::TestCharmMeta::test_links",
"test/test_charm.py::TestCharmMeta::test_links_charmcraft_yaml",
"test/test_framework.py::TestFramework::test_auto_register_event_types",
"test/test_framework.py::TestFramework::test_bad_sig_observer",
"test/test_framework.py::TestFramework::test_ban_concurrent_frameworks",
"test/test_framework.py::TestFramework::test_conflicting_event_attributes",
"test/test_framework.py::TestFramework::test_custom_event_data",
"test/test_framework.py::TestFramework::test_defer_and_reemit",
"test/test_framework.py::TestFramework::test_deprecated_init",
"test/test_framework.py::TestFramework::test_dynamic_event_types",
"test/test_framework.py::TestFramework::test_event_key_roundtrip",
"test/test_framework.py::TestFramework::test_event_regex",
"test/test_framework.py::TestFramework::test_events_base",
"test/test_framework.py::TestFramework::test_forget_and_multiple_objects",
"test/test_framework.py::TestFramework::test_forget_and_multiple_objects_with_load_snapshot",
"test/test_framework.py::TestFramework::test_handle_attrs_readonly",
"test/test_framework.py::TestFramework::test_handle_path",
"test/test_framework.py::TestFramework::test_helper_properties",
"test/test_framework.py::TestFramework::test_on_pre_commit_emitted",
"test/test_framework.py::TestFramework::test_reemit_ignores_unknown_event_type",
"test/test_framework.py::TestFramework::test_remove_unreferenced_events",
"test/test_framework.py::TestFramework::test_restore_unknown",
"test/test_framework.py::TestFramework::test_simple_event_observer",
"test/test_framework.py::TestFramework::test_snapshot_roundtrip",
"test/test_framework.py::TestFramework::test_snapshot_saving_restricted_to_simple_types",
"test/test_framework.py::TestFramework::test_unobserved_events_dont_leave_cruft",
"test/test_framework.py::TestFramework::test_weak_observer",
"test/test_framework.py::TestStoredState::test_basic_state_storage",
"test/test_framework.py::TestStoredState::test_comparison_operations",
"test/test_framework.py::TestStoredState::test_mutable_types",
"test/test_framework.py::TestStoredState::test_mutable_types_invalid",
"test/test_framework.py::TestStoredState::test_same_name_two_classes",
"test/test_framework.py::TestStoredState::test_set_default",
"test/test_framework.py::TestStoredState::test_set_operations",
"test/test_framework.py::TestStoredState::test_stored_dict_repr",
"test/test_framework.py::TestStoredState::test_stored_list_repr",
"test/test_framework.py::TestStoredState::test_stored_set_repr",
"test/test_framework.py::TestStoredState::test_straight_sub_subclass",
"test/test_framework.py::TestStoredState::test_straight_subclass",
"test/test_framework.py::TestStoredState::test_the_crazy_thing",
"test/test_framework.py::TestStoredState::test_two_names_one_state",
"test/test_framework.py::TestStoredState::test_two_subclasses",
"test/test_framework.py::TestStoredState::test_two_subclasses_no_conflicts",
"test/test_framework.py::BreakpointTests::test_breakpoint_builtin_sanity",
"test/test_framework.py::BreakpointTests::test_breakpoint_builtin_unset",
"test/test_framework.py::BreakpointTests::test_breakpoint_names",
"test/test_framework.py::BreakpointTests::test_builtin_breakpoint_hooked",
"test/test_framework.py::BreakpointTests::test_ignored",
"test/test_framework.py::BreakpointTests::test_named_indicated_all",
"test/test_framework.py::BreakpointTests::test_named_indicated_hook",
"test/test_framework.py::BreakpointTests::test_named_indicated_ingroup",
"test/test_framework.py::BreakpointTests::test_named_indicated_somethingelse",
"test/test_framework.py::BreakpointTests::test_named_indicated_specifically",
"test/test_framework.py::BreakpointTests::test_named_indicated_unnamed",
"test/test_framework.py::BreakpointTests::test_pdb_properly_called",
"test/test_framework.py::BreakpointTests::test_unnamed_indicated_all",
"test/test_framework.py::BreakpointTests::test_unnamed_indicated_hook",
"test/test_framework.py::BreakpointTests::test_welcome_message",
"test/test_framework.py::BreakpointTests::test_welcome_message_not_multiple",
"test/test_framework.py::DebugHookTests::test_basic_interruption_enabled",
"test/test_framework.py::DebugHookTests::test_envvar_missing",
"test/test_framework.py::DebugHookTests::test_envvar_mixed",
"test/test_framework.py::DebugHookTests::test_envvar_nohook",
"test/test_framework.py::DebugHookTests::test_envvar_parsing_empty",
"test/test_framework.py::DebugHookTests::test_envvar_parsing_missing",
"test/test_framework.py::DebugHookTests::test_envvar_parsing_multiple",
"test/test_framework.py::DebugHookTests::test_envvar_parsing_simple",
"test/test_framework.py::DebugHookTests::test_internal_events_not_interrupted",
"test/test_framework.py::DebugHookTests::test_no_registered_method",
"test/test_framework.py::DebugHookTests::test_welcome_message_not_multiple",
"test/test_main.py::CharmInitTestCase::test_breakpoint",
"test/test_main.py::CharmInitTestCase::test_controller_storage_deprecated",
"test/test_main.py::CharmInitTestCase::test_init_signature_old_key_argument",
"test/test_main.py::CharmInitTestCase::test_init_signature_only_framework",
"test/test_main.py::CharmInitTestCase::test_init_signature_passthrough",
"test/test_main.py::CharmInitTestCase::test_no_debug_breakpoint",
"test/test_main.py::CharmInitTestCase::test_storage_no_storage",
"test/test_main.py::CharmInitTestCase::test_storage_with_storage",
"test/test_main.py::TestDispatch::test_most_legacy",
"test/test_main.py::TestDispatch::test_with_dispatch",
"test/test_main.py::TestDispatch::test_with_dispatch_path_but_no_dispatch",
"test/test_main.py::TestMainWithNoDispatch::test_collect_metrics",
"test/test_main.py::TestMainWithNoDispatch::test_custom_event",
"test/test_main.py::TestMainWithNoDispatch::test_empty_actions",
"test/test_main.py::TestMainWithNoDispatch::test_event_not_implemented",
"test/test_main.py::TestMainWithNoDispatch::test_event_reemitted",
"test/test_main.py::TestMainWithNoDispatch::test_excepthook",
"test/test_main.py::TestMainWithNoDispatch::test_has_valid_status",
"test/test_main.py::TestMainWithNoDispatch::test_logger",
"test/test_main.py::TestMainWithNoDispatch::test_multiple_events_handled",
"test/test_main.py::TestMainWithNoDispatch::test_no_actions",
"test/test_main.py::TestMainWithNoDispatch::test_no_reemission_on_collect_metrics",
"test/test_main.py::TestMainWithNoDispatch::test_sets_model_name",
"test/test_main.py::TestMainWithNoDispatch::test_setup_action_links",
"test/test_main.py::TestMainWithNoDispatch::test_setup_event_links",
"test/test_main.py::TestMainWithNoDispatchButJujuIsDispatchAware::test_collect_metrics",
"test/test_main.py::TestMainWithNoDispatchButJujuIsDispatchAware::test_custom_event",
"test/test_main.py::TestMainWithNoDispatchButJujuIsDispatchAware::test_empty_actions",
"test/test_main.py::TestMainWithNoDispatchButJujuIsDispatchAware::test_event_not_implemented",
"test/test_main.py::TestMainWithNoDispatchButJujuIsDispatchAware::test_event_reemitted",
"test/test_main.py::TestMainWithNoDispatchButJujuIsDispatchAware::test_excepthook",
"test/test_main.py::TestMainWithNoDispatchButJujuIsDispatchAware::test_has_valid_status",
"test/test_main.py::TestMainWithNoDispatchButJujuIsDispatchAware::test_logger",
"test/test_main.py::TestMainWithNoDispatchButJujuIsDispatchAware::test_multiple_events_handled",
"test/test_main.py::TestMainWithNoDispatchButJujuIsDispatchAware::test_no_actions",
"test/test_main.py::TestMainWithNoDispatchButJujuIsDispatchAware::test_no_reemission_on_collect_metrics",
"test/test_main.py::TestMainWithNoDispatchButJujuIsDispatchAware::test_sets_model_name",
"test/test_main.py::TestMainWithNoDispatchButJujuIsDispatchAware::test_setup_action_links",
"test/test_main.py::TestMainWithNoDispatchButJujuIsDispatchAware::test_setup_event_links",
"test/test_main.py::TestMainWithNoDispatchButDispatchPathIsSet::test_collect_metrics",
"test/test_main.py::TestMainWithNoDispatchButDispatchPathIsSet::test_custom_event",
"test/test_main.py::TestMainWithNoDispatchButDispatchPathIsSet::test_empty_actions",
"test/test_main.py::TestMainWithNoDispatchButDispatchPathIsSet::test_event_not_implemented",
"test/test_main.py::TestMainWithNoDispatchButDispatchPathIsSet::test_event_reemitted",
"test/test_main.py::TestMainWithNoDispatchButDispatchPathIsSet::test_excepthook",
"test/test_main.py::TestMainWithNoDispatchButDispatchPathIsSet::test_has_valid_status",
"test/test_main.py::TestMainWithNoDispatchButDispatchPathIsSet::test_logger",
"test/test_main.py::TestMainWithNoDispatchButDispatchPathIsSet::test_multiple_events_handled",
"test/test_main.py::TestMainWithNoDispatchButDispatchPathIsSet::test_no_actions",
"test/test_main.py::TestMainWithNoDispatchButDispatchPathIsSet::test_no_reemission_on_collect_metrics",
"test/test_main.py::TestMainWithNoDispatchButDispatchPathIsSet::test_sets_model_name",
"test/test_main.py::TestMainWithNoDispatchButDispatchPathIsSet::test_setup_action_links",
"test/test_main.py::TestMainWithNoDispatchButDispatchPathIsSet::test_setup_event_links",
"test/test_main.py::TestMainWithNoDispatchButScriptsAreCopies::test_collect_metrics",
"test/test_main.py::TestMainWithNoDispatchButScriptsAreCopies::test_custom_event",
"test/test_main.py::TestMainWithNoDispatchButScriptsAreCopies::test_empty_actions",
"test/test_main.py::TestMainWithNoDispatchButScriptsAreCopies::test_event_not_implemented",
"test/test_main.py::TestMainWithNoDispatchButScriptsAreCopies::test_event_reemitted",
"test/test_main.py::TestMainWithNoDispatchButScriptsAreCopies::test_excepthook",
"test/test_main.py::TestMainWithNoDispatchButScriptsAreCopies::test_has_valid_status",
"test/test_main.py::TestMainWithNoDispatchButScriptsAreCopies::test_logger",
"test/test_main.py::TestMainWithNoDispatchButScriptsAreCopies::test_multiple_events_handled",
"test/test_main.py::TestMainWithNoDispatchButScriptsAreCopies::test_no_actions",
"test/test_main.py::TestMainWithNoDispatchButScriptsAreCopies::test_no_reemission_on_collect_metrics",
"test/test_main.py::TestMainWithNoDispatchButScriptsAreCopies::test_sets_model_name",
"test/test_main.py::TestMainWithNoDispatchButScriptsAreCopies::test_setup_action_links",
"test/test_main.py::TestMainWithNoDispatchButScriptsAreCopies::test_setup_event_links",
"test/test_main.py::TestMainWithDispatch::test_collect_metrics",
"test/test_main.py::TestMainWithDispatch::test_crash_action",
"test/test_main.py::TestMainWithDispatch::test_custom_event",
"test/test_main.py::TestMainWithDispatch::test_empty_actions",
"test/test_main.py::TestMainWithDispatch::test_event_not_implemented",
"test/test_main.py::TestMainWithDispatch::test_event_reemitted",
"test/test_main.py::TestMainWithDispatch::test_excepthook",
"test/test_main.py::TestMainWithDispatch::test_has_valid_status",
"test/test_main.py::TestMainWithDispatch::test_hook_and_dispatch",
"test/test_main.py::TestMainWithDispatch::test_hook_and_dispatch_but_hook_is_dispatch",
"test/test_main.py::TestMainWithDispatch::test_hook_and_dispatch_but_hook_is_dispatch_copy",
"test/test_main.py::TestMainWithDispatch::test_hook_and_dispatch_with_failing_hook",
"test/test_main.py::TestMainWithDispatch::test_logger",
"test/test_main.py::TestMainWithDispatch::test_multiple_events_handled",
"test/test_main.py::TestMainWithDispatch::test_no_actions",
"test/test_main.py::TestMainWithDispatch::test_no_reemission_on_collect_metrics",
"test/test_main.py::TestMainWithDispatch::test_non_executable_hook_and_dispatch",
"test/test_main.py::TestMainWithDispatch::test_sets_model_name",
"test/test_main.py::TestMainWithDispatch::test_setup_event_links",
"test/test_main.py::TestMainWithDispatchAsScript::test_collect_metrics",
"test/test_main.py::TestMainWithDispatchAsScript::test_custom_event",
"test/test_main.py::TestMainWithDispatchAsScript::test_empty_actions",
"test/test_main.py::TestMainWithDispatchAsScript::test_event_not_implemented",
"test/test_main.py::TestMainWithDispatchAsScript::test_event_reemitted",
"test/test_main.py::TestMainWithDispatchAsScript::test_excepthook",
"test/test_main.py::TestMainWithDispatchAsScript::test_has_valid_status",
"test/test_main.py::TestMainWithDispatchAsScript::test_hook_and_dispatch",
"test/test_main.py::TestMainWithDispatchAsScript::test_hook_and_dispatch_but_hook_is_dispatch",
"test/test_main.py::TestMainWithDispatchAsScript::test_hook_and_dispatch_but_hook_is_dispatch_copy",
"test/test_main.py::TestMainWithDispatchAsScript::test_hook_and_dispatch_with_failing_hook",
"test/test_main.py::TestMainWithDispatchAsScript::test_logger",
"test/test_main.py::TestMainWithDispatchAsScript::test_multiple_events_handled",
"test/test_main.py::TestMainWithDispatchAsScript::test_no_actions",
"test/test_main.py::TestMainWithDispatchAsScript::test_no_reemission_on_collect_metrics",
"test/test_main.py::TestMainWithDispatchAsScript::test_non_executable_hook_and_dispatch",
"test/test_main.py::TestMainWithDispatchAsScript::test_sets_model_name",
"test/test_main.py::TestMainWithDispatchAsScript::test_setup_event_links",
"test/test_main.py::TestStorageHeuristics::test_fallback_to_current_juju_version__new_enough",
"test/test_main.py::TestStorageHeuristics::test_fallback_to_current_juju_version__too_old",
"test/test_main.py::TestStorageHeuristics::test_not_if_already_local",
"test/test_main.py::TestStorageHeuristics::test_not_if_not_in_k8s",
"test/test_testing.py::TestHarness::test_actions_from_directory",
"test/test_testing.py::TestHarness::test_actions_from_directory_charmcraft_yaml",
"test/test_testing.py::TestHarness::test_actions_passed_in",
"test/test_testing.py::TestHarness::test_add_layer_with_log_targets_to_plan",
"test/test_testing.py::TestHarness::test_add_oci_resource_custom",
"test/test_testing.py::TestHarness::test_add_oci_resource_no_image",
"test/test_testing.py::TestHarness::test_add_peer_relation_with_initial_data_leader",
"test/test_testing.py::TestHarness::test_add_relation",
"test/test_testing.py::TestHarness::test_add_relation_and_unit",
"test/test_testing.py::TestHarness::test_add_relation_with_app_data",
"test/test_testing.py::TestHarness::test_add_relation_with_our_initial_data",
"test/test_testing.py::TestHarness::test_add_relation_with_remote_app_data",
"test/test_testing.py::TestHarness::test_add_relation_with_unit_data",
"test/test_testing.py::TestHarness::test_add_resource_but_oci",
"test/test_testing.py::TestHarness::test_add_resource_bytes",
"test/test_testing.py::TestHarness::test_add_resource_string",
"test/test_testing.py::TestHarness::test_add_resource_unknown",
"test/test_testing.py::TestHarness::test_add_resource_unknown_filename",
"test/test_testing.py::TestHarness::test_add_storage_after_harness_begin",
"test/test_testing.py::TestHarness::test_add_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_add_storage_not_attached_default",
"test/test_testing.py::TestHarness::test_add_storage_then_harness_begin",
"test/test_testing.py::TestHarness::test_add_storage_without_metadata_key_fails",
"test/test_testing.py::TestHarness::test_app_status",
"test/test_testing.py::TestHarness::test_attach_storage",
"test/test_testing.py::TestHarness::test_attach_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_bad_config_option_type",
"test/test_testing.py::TestHarness::test_begin_twice",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_install_sets_status",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_multiple_relation_same_endpoint",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_no_relations",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_no_relations_not_leader",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_peer_relation_pre_defined",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_relation_charm_with_no_relation",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_unknown_status",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_application_data",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_multiple_units",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_one_relation",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_peer_relation",
"test/test_testing.py::TestHarness::test_can_connect_begin_with_initial_hooks",
"test/test_testing.py::TestHarness::test_can_connect_default",
"test/test_testing.py::TestHarness::test_config_from_directory",
"test/test_testing.py::TestHarness::test_config_from_directory_charmcraft_yaml",
"test/test_testing.py::TestHarness::test_container_isdir_and_exists",
"test/test_testing.py::TestHarness::test_container_pebble_ready",
"test/test_testing.py::TestHarness::test_create_harness_twice",
"test/test_testing.py::TestHarness::test_detach_storage",
"test/test_testing.py::TestHarness::test_detach_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_empty_config_raises",
"test/test_testing.py::TestHarness::test_evaluate_status",
"test/test_testing.py::TestHarness::test_event_context",
"test/test_testing.py::TestHarness::test_event_context_inverse",
"test/test_testing.py::TestHarness::test_get_backend_calls",
"test/test_testing.py::TestHarness::test_get_backend_calls_with_kwargs",
"test/test_testing.py::TestHarness::test_get_filesystem_root",
"test/test_testing.py::TestHarness::test_get_pebble_container_plan",
"test/test_testing.py::TestHarness::test_get_pebble_container_plan_unknown",
"test/test_testing.py::TestHarness::test_get_pod_spec",
"test/test_testing.py::TestHarness::test_get_relation_data",
"test/test_testing.py::TestHarness::test_harness_leader_misconfig",
"test/test_testing.py::TestHarness::test_hooks_disabled_contextmanager",
"test/test_testing.py::TestHarness::test_hooks_disabled_nested_contextmanager",
"test/test_testing.py::TestHarness::test_hooks_disabled_noop",
"test/test_testing.py::TestHarness::test_hooks_enabled_and_disabled",
"test/test_testing.py::TestHarness::test_invalid_status_set",
"test/test_testing.py::TestHarness::test_metadata_from_directory",
"test/test_testing.py::TestHarness::test_metadata_from_directory_charmcraft_yaml",
"test/test_testing.py::TestHarness::test_no_config_option_type",
"test/test_testing.py::TestHarness::test_no_event_on_empty_update_relation_unit_app",
"test/test_testing.py::TestHarness::test_no_event_on_empty_update_relation_unit_bag",
"test/test_testing.py::TestHarness::test_no_event_on_no_diff_update_relation_unit_app",
"test/test_testing.py::TestHarness::test_no_event_on_no_diff_update_relation_unit_bag",
"test/test_testing.py::TestHarness::test_populate_oci_resources",
"test/test_testing.py::TestHarness::test_relation_events",
"test/test_testing.py::TestHarness::test_relation_get_when_broken",
"test/test_testing.py::TestHarness::test_relation_set_app_not_leader",
"test/test_testing.py::TestHarness::test_relation_set_deletes",
"test/test_testing.py::TestHarness::test_relation_set_nonstring",
"test/test_testing.py::TestHarness::test_remove_detached_storage",
"test/test_testing.py::TestHarness::test_remove_relation",
"test/test_testing.py::TestHarness::test_remove_relation_marks_relation_as_inactive",
"test/test_testing.py::TestHarness::test_remove_relation_unit",
"test/test_testing.py::TestHarness::test_remove_specific_relation_id",
"test/test_testing.py::TestHarness::test_remove_storage_after_harness_begin",
"test/test_testing.py::TestHarness::test_remove_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_remove_storage_without_metadata_key_fails",
"test/test_testing.py::TestHarness::test_removing_invalid_relation_id_raises_exception",
"test/test_testing.py::TestHarness::test_removing_relation_refreshes_charm_model",
"test/test_testing.py::TestHarness::test_removing_relation_removes_remote_app_data",
"test/test_testing.py::TestHarness::test_removing_relation_unit_does_not_remove_other_unit_and_data",
"test/test_testing.py::TestHarness::test_removing_relation_unit_removes_data_also",
"test/test_testing.py::TestHarness::test_resource_folder_cleanup",
"test/test_testing.py::TestHarness::test_set_leader",
"test/test_testing.py::TestHarness::test_set_model_info_after_begin",
"test/test_testing.py::TestHarness::test_set_model_name",
"test/test_testing.py::TestHarness::test_set_model_name_after_begin",
"test/test_testing.py::TestHarness::test_set_model_uuid_after_begin",
"test/test_testing.py::TestHarness::test_set_workload_version",
"test/test_testing.py::TestHarness::test_storage_with_hyphens_works",
"test/test_testing.py::TestHarness::test_uncastable_config_option_type",
"test/test_testing.py::TestHarness::test_unit_status",
"test/test_testing.py::TestHarness::test_update_config",
"test/test_testing.py::TestHarness::test_update_config_bad_type",
"test/test_testing.py::TestHarness::test_update_config_undefined_option",
"test/test_testing.py::TestHarness::test_update_config_unset_boolean",
"test/test_testing.py::TestHarness::test_update_peer_relation_app_data",
"test/test_testing.py::TestHarness::test_update_peer_relation_no_local_unit_change_event",
"test/test_testing.py::TestHarness::test_update_relation_exposes_new_data",
"test/test_testing.py::TestHarness::test_update_relation_no_local_app_change_event",
"test/test_testing.py::TestHarness::test_update_relation_no_local_unit_change_event",
"test/test_testing.py::TestHarness::test_update_relation_remove_data",
"test/test_testing.py::TestNetwork::test_add_network_all_args",
"test/test_testing.py::TestNetwork::test_add_network_default_fallback",
"test/test_testing.py::TestNetwork::test_add_network_defaults",
"test/test_testing.py::TestNetwork::test_add_network_endpoint_and_relation_id_do_not_correspond",
"test/test_testing.py::TestNetwork::test_add_network_endpoint_fallback",
"test/test_testing.py::TestNetwork::test_add_network_endpoint_not_in_meta",
"test/test_testing.py::TestNetwork::test_add_network_ipv6",
"test/test_testing.py::TestNetwork::test_add_network_relation_id_incorrect",
"test/test_testing.py::TestNetwork::test_add_network_relation_id_set_endpoint_not_set",
"test/test_testing.py::TestNetwork::test_add_network_specific_endpoint",
"test/test_testing.py::TestNetwork::test_add_network_specific_relation",
"test/test_testing.py::TestNetwork::test_network_get_relation_not_found",
"test/test_testing.py::TestTestingModelBackend::test_conforms_to_model_backend",
"test/test_testing.py::TestTestingModelBackend::test_get_pebble_methods",
"test/test_testing.py::TestTestingModelBackend::test_lazy_resource_directory",
"test/test_testing.py::TestTestingModelBackend::test_model_uuid_is_uuid_v4",
"test/test_testing.py::TestTestingModelBackend::test_reboot",
"test/test_testing.py::TestTestingModelBackend::test_relation_get_unknown_relation_id",
"test/test_testing.py::TestTestingModelBackend::test_relation_ids_unknown_relation",
"test/test_testing.py::TestTestingModelBackend::test_relation_list_unknown_relation_id",
"test/test_testing.py::TestTestingModelBackend::test_relation_remote_app_name",
"test/test_testing.py::TestTestingModelBackend::test_resource_get_no_resource",
"test/test_testing.py::TestTestingModelBackend::test_status_set_get_app",
"test/test_testing.py::TestTestingModelBackend::test_status_set_get_unit",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_no_override",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_override_merge",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_override_replace",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_override_unknown",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_merge",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_not_combined",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_three_services",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_autostart",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_bad_request",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_none",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_not_started",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_start_stop",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_subset",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_unknown",
"test/test_testing.py::TestTestingPebbleClient::test_invalid_start_service",
"test/test_testing.py::TestTestingPebbleClient::test_methods_match_pebble_client",
"test/test_testing.py::TestTestingPebbleClient::test_mixed_start_service",
"test/test_testing.py::TestTestingPebbleClient::test_send_signal",
"test/test_testing.py::TestTestingPebbleClient::test_start_service_str",
"test/test_testing.py::TestTestingPebbleClient::test_start_started_service",
"test/test_testing.py::TestTestingPebbleClient::test_stop_service_str",
"test/test_testing.py::TestTestingPebbleClient::test_stop_services_unknown",
"test/test_testing.py::TestTestingPebbleClient::test_stop_stopped_service",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_container_storage_mounts",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_list_directory_object_itself",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_list_files_not_found_raises",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_list_files_unnamed",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_dir_with_ownership",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_dir_with_permission_mask",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_directory",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_directory_recursively",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_directory_with_relative_path_fails",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_subdir_of_file_fails",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_pull_directory",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_pull_not_found",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_list_file",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_pull_bytes",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_pull_larger_file",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_pull_non_utf8_data",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_as_child_of_file_raises_error",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_bytes_ignore_encoding",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_bytesio_ignore_encoding",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_file_with_relative_path_fails",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_files_and_list",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_files_and_list_by_pattern",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_to_non_existent_subdir",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_with_ownership",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_with_permission_mask",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_remove_path",
"test/test_testing.py::TestFilesystem::test_list_files",
"test/test_testing.py::TestFilesystem::test_make_dir",
"test/test_testing.py::TestFilesystem::test_pull",
"test/test_testing.py::TestFilesystem::test_pull_path",
"test/test_testing.py::TestFilesystem::test_push",
"test/test_testing.py::TestFilesystem::test_push_create_parent",
"test/test_testing.py::TestFilesystem::test_push_path",
"test/test_testing.py::TestFilesystem::test_storage_mount",
"test/test_testing.py::TestSecrets::test_add_model_secret_by_app_instance",
"test/test_testing.py::TestSecrets::test_add_model_secret_by_app_name_str",
"test/test_testing.py::TestSecrets::test_add_model_secret_by_unit_instance",
"test/test_testing.py::TestSecrets::test_add_model_secret_invalid_content",
"test/test_testing.py::TestSecrets::test_get_secret_and_refresh",
"test/test_testing.py::TestSecrets::test_get_secret_as_owner",
"test/test_testing.py::TestSecrets::test_get_secret_by_label",
"test/test_testing.py::TestSecrets::test_get_secret_grants",
"test/test_testing.py::TestSecrets::test_get_secret_removed",
"test/test_testing.py::TestSecrets::test_grant_secret_and_revoke_secret",
"test/test_testing.py::TestSecrets::test_grant_secret_no_relation",
"test/test_testing.py::TestSecrets::test_grant_secret_wrong_app",
"test/test_testing.py::TestSecrets::test_grant_secret_wrong_unit",
"test/test_testing.py::TestSecrets::test_secret_permissions_leader",
"test/test_testing.py::TestSecrets::test_secret_permissions_nonleader",
"test/test_testing.py::TestSecrets::test_secret_permissions_unit",
"test/test_testing.py::TestSecrets::test_set_secret_content",
"test/test_testing.py::TestSecrets::test_set_secret_content_invalid_content",
"test/test_testing.py::TestSecrets::test_set_secret_content_invalid_secret_id",
"test/test_testing.py::TestSecrets::test_set_secret_content_wrong_owner",
"test/test_testing.py::TestSecrets::test_trigger_secret_expiration",
"test/test_testing.py::TestSecrets::test_trigger_secret_removal",
"test/test_testing.py::TestSecrets::test_trigger_secret_rotation",
"test/test_testing.py::TestPorts::test_errors",
"test/test_testing.py::TestPorts::test_ports",
"test/test_testing.py::TestHandleExec::test_combined_error",
"test/test_testing.py::TestHandleExec::test_exec_service_context",
"test/test_testing.py::TestHandleExec::test_exec_stdin",
"test/test_testing.py::TestHandleExec::test_exec_stdout_stderr",
"test/test_testing.py::TestHandleExec::test_exec_timeout",
"test/test_testing.py::TestHandleExec::test_re_register_handler",
"test/test_testing.py::TestHandleExec::test_register_handler",
"test/test_testing.py::TestHandleExec::test_register_match_all_prefix",
"test/test_testing.py::TestHandleExec::test_register_with_handler",
"test/test_testing.py::TestHandleExec::test_register_with_result",
"test/test_testing.py::TestActions::test_before_begin",
"test/test_testing.py::TestActions::test_invalid_action",
"test/test_testing.py::TestNotify::test_notify_basics",
"test/test_testing.py::TestNotify::test_notify_no_begin",
"test/test_testing.py::TestNotify::test_notify_no_repeat",
"test/test_testing.py::TestNotices::test_get_notice_by_id",
"test/test_testing.py::TestNotices::test_get_notices"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2024-02-07 06:58:59+00:00 | apache-2.0 | 1,480 |
|
canonical__operator-1150 | diff --git a/ops/charm.py b/ops/charm.py
index 59ad4c5..eae71a2 100644
--- a/ops/charm.py
+++ b/ops/charm.py
@@ -1540,7 +1540,9 @@ class StorageMeta:
self.multiple_range = None
if 'multiple' in raw:
range = raw['multiple']['range']
- if '-' not in range:
+ if range[-1] == '+':
+ self.multiple_range = (int(range[:-1]), None)
+ elif '-' not in range:
self.multiple_range = (int(range), int(range))
else:
range = range.split('-')
| canonical/operator | 5804652253926fea5c2aae5952d3032cea12ca5f | diff --git a/ops/testing.py b/ops/testing.py
index e3c9556..cb395be 100644
--- a/ops/testing.py
+++ b/ops/testing.py
@@ -405,7 +405,11 @@ class Harness(Generic[CharmType]):
for storage_name in self._meta.storages:
for storage_index in self._backend.storage_list(storage_name, include_detached=True):
s = model.Storage(storage_name, storage_index, self._backend)
- self.attach_storage(s.full_id)
+ if self._backend._storage_is_attached(storage_name, storage_index):
+ # Attaching was done already, but we still need the event to be emitted.
+ self.charm.on[storage_name].storage_attached.emit(s)
+ else:
+ self.attach_storage(s.full_id)
# Storage done, emit install event
charm.on.install.emit()
@@ -690,8 +694,8 @@ class Harness(Generic[CharmType]):
Args:
storage_name: The storage backend name on the Charm
count: Number of disks being added
- attach: True to also attach the storage mount and emit storage-attached if
- harness.begin() has been called.
+ attach: True to also attach the storage mount; if :meth:`begin`
+ has been called a True value will also emit storage-attached
Return:
A list of storage IDs, e.g. ["my-storage/1", "my-storage/2"].
@@ -739,12 +743,12 @@ class Harness(Generic[CharmType]):
"""Attach a storage device.
The intent of this function is to simulate a ``juju attach-storage`` call.
- It will trigger a storage-attached hook if the storage unit in question exists
+ If called after :meth:`begin` and hooks are not disabled, it will trigger
+ a storage-attached hook if the storage unit in question exists
and is presently marked as detached.
The test harness uses symbolic links to imitate storage mounts, which may lead to some
- inconsistencies compared to the actual charm. Users should be cognizant of
- this potential discrepancy.
+ inconsistencies compared to the actual charm.
Args:
storage_id: The full storage ID of the storage unit being attached, including the
@@ -2339,7 +2343,17 @@ class _TestingModelBackend:
mounting_dir.parent.mkdir(parents=True, exist_ok=True)
target_dir = pathlib.Path(store["location"])
target_dir.mkdir(parents=True, exist_ok=True)
- mounting_dir.symlink_to(target_dir)
+ try:
+ mounting_dir.symlink_to(target_dir, target_is_directory=True)
+ except FileExistsError:
+ # If the symlink is already the one we want, then we
+ # don't need to do anything here.
+ # NOTE: In Python 3.9, this can use `mounting_dir.readlink()`
+ if (
+ not mounting_dir.is_symlink()
+ or os.readlink(mounting_dir) != str(target_dir)
+ ):
+ raise
index = int(index)
if not self._storage_is_attached(name, index):
diff --git a/test/test_charm.py b/test/test_charm.py
index 1a9ac52..9767588 100644
--- a/test/test_charm.py
+++ b/test/test_charm.py
@@ -282,6 +282,10 @@ storage:
multiple:
range: 2-
type: filesystem
+ stor-plus:
+ multiple:
+ range: 10+
+ type: filesystem
''')
fake_script(
@@ -329,6 +333,7 @@ storage:
self.assertEqual(self.meta.storages['stor2'].multiple_range, (2, 2))
self.assertEqual(self.meta.storages['stor3'].multiple_range, (2, None))
self.assertEqual(self.meta.storages['stor-4'].multiple_range, (2, 4))
+ self.assertEqual(self.meta.storages['stor-plus'].multiple_range, (10, None))
charm = MyCharm(self.create_framework())
diff --git a/test/test_testing.py b/test/test_testing.py
index fcb3369..d531e08 100644
--- a/test/test_testing.py
+++ b/test/test_testing.py
@@ -4749,6 +4749,96 @@ class TestFilesystem(unittest.TestCase, _TestingPebbleClientMixin):
self.harness.attach_storage(storage_id)
self.assertTrue((self.root / "mounts/foo/bar").read_text(), "foobar")
+ def _make_storage_attach_harness(self, meta: typing.Optional[str] = None):
+ class MyCharm(ops.CharmBase):
+ def __init__(self, framework: ops.Framework):
+ super().__init__(framework)
+ self.attached: typing.List[str] = []
+ self.locations: typing.List[pathlib.Path] = []
+ framework.observe(self.on['test-storage'].storage_attached, self._on_attach)
+
+ def _on_attach(self, event: ops.StorageAttachedEvent):
+ self.attached.append(event.storage.full_id)
+ self.locations.append(event.storage.location)
+
+ if meta is None:
+ meta = '''
+ name: test
+ containers:
+ test-container:
+ mounts:
+ - storage: test-storage
+ location: /mounts/foo
+ storage:
+ test-storage:
+ type: filesystem
+ '''
+ harness = ops.testing.Harness(MyCharm, meta=meta)
+ self.addCleanup(harness.cleanup)
+ return harness
+
+ def test_storage_attach_begin_no_emit(self):
+ """If `begin()` hasn't been called, `attach` does not emit storage-attached."""
+ harness = self._make_storage_attach_harness()
+ harness.add_storage('test-storage', attach=True)
+ harness.begin()
+ self.assertNotIn('test-storage/0', harness.charm.attached)
+
+ def test_storage_attach_begin_with_hooks_emits(self):
+ """`attach` doesn't emit storage-attached before `begin_with_initial_hooks`."""
+ harness = self._make_storage_attach_harness()
+ harness.add_storage('test-storage', attach=True)
+ harness.begin_with_initial_hooks()
+ self.assertIn('test-storage/0', harness.charm.attached)
+ self.assertTrue(harness.charm.locations[0])
+
+ def test_storage_add_with_later_attach(self):
+ harness = self._make_storage_attach_harness()
+ harness.begin()
+ storage_ids = harness.add_storage('test-storage', attach=False)
+ self.assertNotIn('test-storage/0', harness.charm.attached)
+ for s_id in storage_ids:
+ harness.attach_storage(s_id)
+ # It's safe to call `attach_storage` more than once, and this will
+ # only trigger the event once - this is the same as executing
+ # `juju attach-storage <unit> <storage>` more than once.
+ harness.attach_storage(s_id)
+ self.assertEqual(harness.charm.attached.count('test-storage/0'), 1)
+
+ def test_storage_machine_charm_metadata(self):
+ meta = '''
+ name: test
+ storage:
+ test-storage:
+ type: filesystem
+ mount: /mounts/foo
+ '''
+ harness = self._make_storage_attach_harness(meta)
+ harness.begin()
+ harness.add_storage('test-storage', attach=True)
+ self.assertIn('test-storage/0', harness.charm.attached)
+
+ def test_storage_multiple_storage_instances(self):
+ meta = '''
+ name: test
+ storage:
+ test-storage:
+ type: filesystem
+ mount: /mounts/foo
+ multiple:
+ range: 2-4
+ '''
+ harness = self._make_storage_attach_harness(meta)
+ harness.begin()
+ harness.add_storage('test-storage', 2, attach=True)
+ self.assertEqual(harness.charm.attached, ['test-storage/0', 'test-storage/1'])
+ self.assertNotEqual(harness.charm.locations[0], harness.charm.locations[1])
+ harness.add_storage('test-storage', 2, attach=True)
+ self.assertEqual(
+ harness.charm.attached, [
+ 'test-storage/0', 'test-storage/1', 'test-storage/2', 'test-storage/3'])
+ self.assertEqual(len(set(harness.charm.locations)), 4)
+
class TestSecrets(unittest.TestCase):
def test_add_model_secret_by_app_name_str(self):
| Using Harness.add_storage(..., attach=True) before begin_with_initial_hooks gives confusing error
For example:
```python
import ops
import ops.testing
class MyCharm(ops.CharmBase):
pass
meta = """
containers:
test-container:
mounts:
- storage: test-storage
location: /mounts/foo
storage:
test-storage:
type: filesystem
"""
h = ops.testing.Harness(MyCharm, meta=meta)
try:
h.add_storage("test-storage", attach=True)
h.begin_with_initial_hooks()
finally:
h.cleanup()
```
If `begin()` is used instead of `begin_with_initial_hooks`, everything is ok, because `begin_with_initial_hooks` does the attaching (to then emit the `StorageAttached` event) and `begin` does not. Similarly, if `attach=True` is not used, everything is ok.
However, when run as above, an error like this is generated:
```
Traceback (most recent call last):
File "/tmp/storagetest/test.py", line 41, in <module>
h.begin_with_initial_hooks()
File "/tmp/storagetest/.venv/lib/python3.11/site-packages/ops/testing.py", line 407, in begin_with_initial_hooks
self.attach_storage(s.full_id)
File "/tmp/storagetest/.venv/lib/python3.11/site-packages/ops/testing.py", line 752, in attach_storage
if not self._backend._storage_attach(storage_id):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/tmp/storagetest/.venv/lib/python3.11/site-packages/ops/testing.py", line 2348, in _storage_attach
mounting_dir.symlink_to(target_dir)
File "/usr/lib/python3.11/pathlib.py", line 1199, in symlink_to
os.symlink(target, self, target_is_directory)
FileExistsError: [Errno 17] File exists: '/tmp/ops-harness-z2tflymi/storages/test-storage/0' -> '/tmp/ops-harness-z2tflymi/containers/test-container/mounts/foo'
```
This happens because the `add_storage` call mounts the storage, and then the `begin_with_initial_hooks` attempts to mount it again, but that's already happened, so the symlinking fails.
We should:
* [ ] Adjust the API documentation so that it explicitly says that `attach` does nothing if `begin` hasn't yet been called
* [ ] Verify that `begin` has been called (ie. `._charm is not None` before trying to mount the storage | 0.0 | 5804652253926fea5c2aae5952d3032cea12ca5f | [
"test/test_charm.py::TestCharm::test_storage_events"
]
| [
"test/test_charm.py::TestCharm::test_action_event_defer_fails",
"test/test_charm.py::TestCharm::test_action_events",
"test/test_charm.py::TestCharm::test_add_status_type_error",
"test/test_charm.py::TestCharm::test_basic",
"test/test_charm.py::TestCharm::test_collect_app_and_unit_status",
"test/test_charm.py::TestCharm::test_collect_app_status_leader",
"test/test_charm.py::TestCharm::test_collect_app_status_no_statuses",
"test/test_charm.py::TestCharm::test_collect_app_status_non_leader",
"test/test_charm.py::TestCharm::test_collect_status_priority",
"test/test_charm.py::TestCharm::test_collect_unit_status",
"test/test_charm.py::TestCharm::test_collect_unit_status_no_statuses",
"test/test_charm.py::TestCharm::test_containers",
"test/test_charm.py::TestCharm::test_containers_storage",
"test/test_charm.py::TestCharm::test_containers_storage_multiple_mounts",
"test/test_charm.py::TestCharm::test_empty_action",
"test/test_charm.py::TestCharm::test_helper_properties",
"test/test_charm.py::TestCharm::test_invalid_action_results",
"test/test_charm.py::TestCharm::test_observe_decorated_method",
"test/test_charm.py::TestCharm::test_observer_not_referenced_warning",
"test/test_charm.py::TestCharm::test_relation_events",
"test/test_charm.py::TestCharm::test_relations_meta",
"test/test_charm.py::TestCharm::test_relations_meta_limit_type_validation",
"test/test_charm.py::TestCharm::test_relations_meta_scope_type_validation",
"test/test_charm.py::TestCharm::test_secret_events",
"test/test_charm.py::TestCharm::test_workload_events",
"test/test_charm.py::TestCharmMeta::test_assumes",
"test/test_charm.py::TestCharmMeta::test_links",
"test/test_charm.py::TestCharmMeta::test_links_charmcraft_yaml",
"test/test_testing.py::TestHarness::test_actions_from_directory",
"test/test_testing.py::TestHarness::test_actions_from_directory_charmcraft_yaml",
"test/test_testing.py::TestHarness::test_actions_passed_in",
"test/test_testing.py::TestHarness::test_add_layer_with_log_targets_to_plan",
"test/test_testing.py::TestHarness::test_add_oci_resource_custom",
"test/test_testing.py::TestHarness::test_add_oci_resource_no_image",
"test/test_testing.py::TestHarness::test_add_peer_relation_with_initial_data_leader",
"test/test_testing.py::TestHarness::test_add_relation",
"test/test_testing.py::TestHarness::test_add_relation_and_unit",
"test/test_testing.py::TestHarness::test_add_relation_no_meta_fails",
"test/test_testing.py::TestHarness::test_add_relation_with_app_data",
"test/test_testing.py::TestHarness::test_add_relation_with_our_initial_data",
"test/test_testing.py::TestHarness::test_add_relation_with_remote_app_data",
"test/test_testing.py::TestHarness::test_add_relation_with_unit_data",
"test/test_testing.py::TestHarness::test_add_resource_but_oci",
"test/test_testing.py::TestHarness::test_add_resource_bytes",
"test/test_testing.py::TestHarness::test_add_resource_string",
"test/test_testing.py::TestHarness::test_add_resource_unknown",
"test/test_testing.py::TestHarness::test_add_resource_unknown_filename",
"test/test_testing.py::TestHarness::test_add_storage_after_harness_begin",
"test/test_testing.py::TestHarness::test_add_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_add_storage_not_attached_default",
"test/test_testing.py::TestHarness::test_add_storage_then_harness_begin",
"test/test_testing.py::TestHarness::test_add_storage_without_metadata_key_fails",
"test/test_testing.py::TestHarness::test_app_status",
"test/test_testing.py::TestHarness::test_attach_storage",
"test/test_testing.py::TestHarness::test_attach_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_bad_config_option_type",
"test/test_testing.py::TestHarness::test_begin_twice",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_install_sets_status",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_multiple_relation_same_endpoint",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_no_relations",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_no_relations_not_leader",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_peer_relation_pre_defined",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_relation_charm_with_no_relation",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_unknown_status",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_application_data",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_multiple_units",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_one_relation",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_peer_relation",
"test/test_testing.py::TestHarness::test_can_connect_begin_with_initial_hooks",
"test/test_testing.py::TestHarness::test_can_connect_default",
"test/test_testing.py::TestHarness::test_config_from_directory",
"test/test_testing.py::TestHarness::test_config_from_directory_charmcraft_yaml",
"test/test_testing.py::TestHarness::test_container_isdir_and_exists",
"test/test_testing.py::TestHarness::test_container_pebble_ready",
"test/test_testing.py::TestHarness::test_create_harness_twice",
"test/test_testing.py::TestHarness::test_detach_storage",
"test/test_testing.py::TestHarness::test_detach_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_empty_config_raises",
"test/test_testing.py::TestHarness::test_evaluate_status",
"test/test_testing.py::TestHarness::test_event_context",
"test/test_testing.py::TestHarness::test_event_context_inverse",
"test/test_testing.py::TestHarness::test_get_backend_calls",
"test/test_testing.py::TestHarness::test_get_backend_calls_with_kwargs",
"test/test_testing.py::TestHarness::test_get_filesystem_root",
"test/test_testing.py::TestHarness::test_get_pebble_container_plan",
"test/test_testing.py::TestHarness::test_get_pebble_container_plan_unknown",
"test/test_testing.py::TestHarness::test_get_pod_spec",
"test/test_testing.py::TestHarness::test_get_relation_data",
"test/test_testing.py::TestHarness::test_harness_leader_misconfig",
"test/test_testing.py::TestHarness::test_hooks_disabled_contextmanager",
"test/test_testing.py::TestHarness::test_hooks_disabled_nested_contextmanager",
"test/test_testing.py::TestHarness::test_hooks_disabled_noop",
"test/test_testing.py::TestHarness::test_hooks_enabled_and_disabled",
"test/test_testing.py::TestHarness::test_invalid_status_set",
"test/test_testing.py::TestHarness::test_metadata_from_directory",
"test/test_testing.py::TestHarness::test_metadata_from_directory_charmcraft_yaml",
"test/test_testing.py::TestHarness::test_no_config_option_type",
"test/test_testing.py::TestHarness::test_no_event_on_empty_update_relation_unit_app",
"test/test_testing.py::TestHarness::test_no_event_on_empty_update_relation_unit_bag",
"test/test_testing.py::TestHarness::test_no_event_on_no_diff_update_relation_unit_app",
"test/test_testing.py::TestHarness::test_no_event_on_no_diff_update_relation_unit_bag",
"test/test_testing.py::TestHarness::test_populate_oci_resources",
"test/test_testing.py::TestHarness::test_relation_events",
"test/test_testing.py::TestHarness::test_relation_set_app_not_leader",
"test/test_testing.py::TestHarness::test_relation_set_deletes",
"test/test_testing.py::TestHarness::test_relation_set_nonstring",
"test/test_testing.py::TestHarness::test_remove_detached_storage",
"test/test_testing.py::TestHarness::test_remove_relation",
"test/test_testing.py::TestHarness::test_remove_relation_marks_relation_as_inactive",
"test/test_testing.py::TestHarness::test_remove_relation_unit",
"test/test_testing.py::TestHarness::test_remove_specific_relation_id",
"test/test_testing.py::TestHarness::test_remove_storage_after_harness_begin",
"test/test_testing.py::TestHarness::test_remove_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_remove_storage_without_metadata_key_fails",
"test/test_testing.py::TestHarness::test_removing_invalid_relation_id_raises_exception",
"test/test_testing.py::TestHarness::test_removing_relation_refreshes_charm_model",
"test/test_testing.py::TestHarness::test_removing_relation_removes_remote_app_data",
"test/test_testing.py::TestHarness::test_removing_relation_unit_does_not_remove_other_unit_and_data",
"test/test_testing.py::TestHarness::test_removing_relation_unit_removes_data_also",
"test/test_testing.py::TestHarness::test_resource_folder_cleanup",
"test/test_testing.py::TestHarness::test_set_leader",
"test/test_testing.py::TestHarness::test_set_model_info_after_begin",
"test/test_testing.py::TestHarness::test_set_model_name",
"test/test_testing.py::TestHarness::test_set_model_name_after_begin",
"test/test_testing.py::TestHarness::test_set_model_uuid_after_begin",
"test/test_testing.py::TestHarness::test_set_workload_version",
"test/test_testing.py::TestHarness::test_storage_with_hyphens_works",
"test/test_testing.py::TestHarness::test_uncastable_config_option_type",
"test/test_testing.py::TestHarness::test_unit_status",
"test/test_testing.py::TestHarness::test_update_config",
"test/test_testing.py::TestHarness::test_update_config_bad_type",
"test/test_testing.py::TestHarness::test_update_config_undefined_option",
"test/test_testing.py::TestHarness::test_update_config_unset_boolean",
"test/test_testing.py::TestHarness::test_update_peer_relation_app_data",
"test/test_testing.py::TestHarness::test_update_peer_relation_no_local_unit_change_event",
"test/test_testing.py::TestHarness::test_update_relation_exposes_new_data",
"test/test_testing.py::TestHarness::test_update_relation_no_local_app_change_event",
"test/test_testing.py::TestHarness::test_update_relation_no_local_unit_change_event",
"test/test_testing.py::TestHarness::test_update_relation_remove_data",
"test/test_testing.py::TestNetwork::test_add_network_all_args",
"test/test_testing.py::TestNetwork::test_add_network_default_fallback",
"test/test_testing.py::TestNetwork::test_add_network_defaults",
"test/test_testing.py::TestNetwork::test_add_network_endpoint_and_relation_id_do_not_correspond",
"test/test_testing.py::TestNetwork::test_add_network_endpoint_fallback",
"test/test_testing.py::TestNetwork::test_add_network_endpoint_not_in_meta",
"test/test_testing.py::TestNetwork::test_add_network_ipv6",
"test/test_testing.py::TestNetwork::test_add_network_relation_id_incorrect",
"test/test_testing.py::TestNetwork::test_add_network_relation_id_set_endpoint_not_set",
"test/test_testing.py::TestNetwork::test_add_network_specific_endpoint",
"test/test_testing.py::TestNetwork::test_add_network_specific_relation",
"test/test_testing.py::TestNetwork::test_add_relation_network_get",
"test/test_testing.py::TestNetwork::test_network_get_relation_not_found",
"test/test_testing.py::TestTestingModelBackend::test_conforms_to_model_backend",
"test/test_testing.py::TestTestingModelBackend::test_get_pebble_methods",
"test/test_testing.py::TestTestingModelBackend::test_lazy_resource_directory",
"test/test_testing.py::TestTestingModelBackend::test_model_uuid_is_uuid_v4",
"test/test_testing.py::TestTestingModelBackend::test_reboot",
"test/test_testing.py::TestTestingModelBackend::test_relation_get_unknown_relation_id",
"test/test_testing.py::TestTestingModelBackend::test_relation_ids_unknown_relation",
"test/test_testing.py::TestTestingModelBackend::test_relation_list_unknown_relation_id",
"test/test_testing.py::TestTestingModelBackend::test_relation_remote_app_name",
"test/test_testing.py::TestTestingModelBackend::test_resource_get_no_resource",
"test/test_testing.py::TestTestingModelBackend::test_status_set_get_app",
"test/test_testing.py::TestTestingModelBackend::test_status_set_get_unit",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_no_override",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_override_merge",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_override_replace",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_override_unknown",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_merge",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_not_combined",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_three_services",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_autostart",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_bad_request",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_none",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_not_started",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_start_stop",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_subset",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_unknown",
"test/test_testing.py::TestTestingPebbleClient::test_invalid_start_service",
"test/test_testing.py::TestTestingPebbleClient::test_methods_match_pebble_client",
"test/test_testing.py::TestTestingPebbleClient::test_mixed_start_service",
"test/test_testing.py::TestTestingPebbleClient::test_send_signal",
"test/test_testing.py::TestTestingPebbleClient::test_start_service_str",
"test/test_testing.py::TestTestingPebbleClient::test_start_started_service",
"test/test_testing.py::TestTestingPebbleClient::test_stop_service_str",
"test/test_testing.py::TestTestingPebbleClient::test_stop_services_unknown",
"test/test_testing.py::TestTestingPebbleClient::test_stop_stopped_service",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_container_storage_mounts",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_list_directory_object_itself",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_list_files_not_found_raises",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_list_files_unnamed",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_dir_with_ownership",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_dir_with_permission_mask",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_directory",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_directory_recursively",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_directory_with_relative_path_fails",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_subdir_of_file_fails",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_pull_directory",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_pull_not_found",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_list_file",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_pull_bytes",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_pull_larger_file",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_pull_non_utf8_data",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_as_child_of_file_raises_error",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_bytes_ignore_encoding",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_bytesio_ignore_encoding",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_file_with_relative_path_fails",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_files_and_list",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_files_and_list_by_pattern",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_to_non_existent_subdir",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_with_ownership",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_with_permission_mask",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_remove_path",
"test/test_testing.py::TestFilesystem::test_list_files",
"test/test_testing.py::TestFilesystem::test_make_dir",
"test/test_testing.py::TestFilesystem::test_pull",
"test/test_testing.py::TestFilesystem::test_pull_path",
"test/test_testing.py::TestFilesystem::test_push",
"test/test_testing.py::TestFilesystem::test_push_create_parent",
"test/test_testing.py::TestFilesystem::test_push_path",
"test/test_testing.py::TestFilesystem::test_storage_add_with_later_attach",
"test/test_testing.py::TestFilesystem::test_storage_attach_begin_no_emit",
"test/test_testing.py::TestFilesystem::test_storage_attach_begin_with_hooks_emits",
"test/test_testing.py::TestFilesystem::test_storage_machine_charm_metadata",
"test/test_testing.py::TestFilesystem::test_storage_mount",
"test/test_testing.py::TestFilesystem::test_storage_multiple_storage_instances",
"test/test_testing.py::TestSecrets::test_add_model_secret_by_app_instance",
"test/test_testing.py::TestSecrets::test_add_model_secret_by_app_name_str",
"test/test_testing.py::TestSecrets::test_add_model_secret_by_unit_instance",
"test/test_testing.py::TestSecrets::test_add_model_secret_invalid_content",
"test/test_testing.py::TestSecrets::test_get_secret_and_refresh",
"test/test_testing.py::TestSecrets::test_get_secret_as_owner",
"test/test_testing.py::TestSecrets::test_get_secret_by_label",
"test/test_testing.py::TestSecrets::test_get_secret_grants",
"test/test_testing.py::TestSecrets::test_get_secret_removed",
"test/test_testing.py::TestSecrets::test_grant_secret_and_revoke_secret",
"test/test_testing.py::TestSecrets::test_grant_secret_no_relation",
"test/test_testing.py::TestSecrets::test_grant_secret_wrong_app",
"test/test_testing.py::TestSecrets::test_grant_secret_wrong_unit",
"test/test_testing.py::TestSecrets::test_secret_permissions_leader",
"test/test_testing.py::TestSecrets::test_secret_permissions_nonleader",
"test/test_testing.py::TestSecrets::test_secret_permissions_unit",
"test/test_testing.py::TestSecrets::test_set_secret_content",
"test/test_testing.py::TestSecrets::test_set_secret_content_invalid_content",
"test/test_testing.py::TestSecrets::test_set_secret_content_invalid_secret_id",
"test/test_testing.py::TestSecrets::test_set_secret_content_wrong_owner",
"test/test_testing.py::TestSecrets::test_trigger_secret_expiration",
"test/test_testing.py::TestSecrets::test_trigger_secret_removal",
"test/test_testing.py::TestSecrets::test_trigger_secret_rotation",
"test/test_testing.py::TestPorts::test_errors",
"test/test_testing.py::TestPorts::test_ports",
"test/test_testing.py::TestHandleExec::test_combined_error",
"test/test_testing.py::TestHandleExec::test_exec_service_context",
"test/test_testing.py::TestHandleExec::test_exec_stdin",
"test/test_testing.py::TestHandleExec::test_exec_stdout_stderr",
"test/test_testing.py::TestHandleExec::test_exec_timeout",
"test/test_testing.py::TestHandleExec::test_re_register_handler",
"test/test_testing.py::TestHandleExec::test_register_handler",
"test/test_testing.py::TestHandleExec::test_register_match_all_prefix",
"test/test_testing.py::TestHandleExec::test_register_with_handler",
"test/test_testing.py::TestHandleExec::test_register_with_result",
"test/test_testing.py::TestActions::test_additional_params",
"test/test_testing.py::TestActions::test_bad_results",
"test/test_testing.py::TestActions::test_before_begin",
"test/test_testing.py::TestActions::test_fail_action",
"test/test_testing.py::TestActions::test_invalid_action",
"test/test_testing.py::TestActions::test_logs_and_results",
"test/test_testing.py::TestActions::test_required_param",
"test/test_testing.py::TestActions::test_run_action",
"test/test_testing.py::TestNotify::test_notify_basics",
"test/test_testing.py::TestNotify::test_notify_no_begin",
"test/test_testing.py::TestNotify::test_notify_no_repeat",
"test/test_testing.py::TestNotices::test_get_notice_by_id",
"test/test_testing.py::TestNotices::test_get_notices"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2024-03-13 07:35:54+00:00 | apache-2.0 | 1,481 |
|
canonical__operator-195 | diff --git a/ops/framework.py b/ops/framework.py
index c8df98b..edfe802 100755
--- a/ops/framework.py
+++ b/ops/framework.py
@@ -558,8 +558,18 @@ class Framework(Object):
raise RuntimeError(
'cannot save {} values before registering that type'.format(type(value).__name__))
data = value.snapshot()
- # Use marshal as a validator, enforcing the use of simple types.
- marshal.dumps(data)
+
+ # Use marshal as a validator, enforcing the use of simple types, as we later the
+ # information is really pickled, which is too error prone for future evolution of the
+ # stored data (e.g. if the developer stores a custom object and later changes its
+ # class name; when unpickling the original class will not be there and event
+ # data loading will fail).
+ try:
+ marshal.dumps(data)
+ except ValueError:
+ msg = "unable to save the data for {}, it must contain only simple types: {!r}"
+ raise ValueError(msg.format(value.__class__.__name__, data))
+
# Use pickle for serialization, so the value remains portable.
raw_data = pickle.dumps(data)
self._storage.save_snapshot(value.handle.path, raw_data)
| canonical/operator | 02fe304edae9bce56f13cef32a2c48bdd150eb00 | diff --git a/test/test_framework.py b/test/test_framework.py
index 1977c1e..fc64566 100644
--- a/test/test_framework.py
+++ b/test/test_framework.py
@@ -761,6 +761,26 @@ class TestFramework(unittest.TestCase):
self.assertIn('database is locked', str(cm.exception))
f.close()
+ def test_snapshot_saving_restricted_to_simple_types(self):
+ # this can not be saved, as it has not simple types!
+ to_be_saved = {"bar": TestFramework}
+
+ class FooEvent(EventSetBase):
+ def snapshot(self):
+ return to_be_saved
+
+ handle = Handle(None, "a_foo", "some_key")
+ event = FooEvent()
+
+ framework = self.create_framework()
+ framework.register_type(FooEvent, None, handle.kind)
+ with self.assertRaises(ValueError) as cm:
+ framework.save_snapshot(event)
+ expected = (
+ "unable to save the data for FooEvent, it must contain only simple types: "
+ "{'bar': <class 'test.test_framework.TestFramework'>}")
+ self.assertEqual(str(cm.exception), expected)
+
class TestStoredState(unittest.TestCase):
| save_shapshot validation failures unhelpful
The following validation code is in save_snapshot() in framework.py:
```python
# Use marshal as a validator, enforcing the use of simple types.
marshal.dumps(data)
```
Unfortunately, when validation fails the charm developer is left with nothing but the useless traceback:
```
Traceback (most recent call last):
File "/var/lib/juju/agents/unit-plinth-4/charm/hooks/db-relation-changed", line 499, in <module>
ops.main.main(PlinthCharm)
File "lib/ops/main.py", line 197, in main
framework.commit()
File "lib/ops/framework.py", line 496, in commit
self.on.commit.emit()
File "lib/ops/framework.py", line 199, in emit
framework._emit(event)
File "lib/ops/framework.py", line 633, in _emit
self._reemit(event_path)
File "lib/ops/framework.py", line 668, in _reemit
custom_handler(event)
File "lib/ops/framework.py", line 717, in on_commit
self.framework.save_snapshot(self)
File "lib/ops/framework.py", line 526, in save_snapshot
marshal.dumps(data)
ValueError: unmarshallable object
```
As you can see, there is no context about what was being snapshotted, nor information about what bad data the object contained.
The unmarshallable exception needs to be caught, and a new one raised with more context so the developer can continue without hacking framework.py. | 0.0 | 02fe304edae9bce56f13cef32a2c48bdd150eb00 | [
"test/test_framework.py::TestFramework::test_snapshot_saving_restricted_to_simple_types"
]
| [
"test/test_framework.py::TestFramework::test_auto_register_event_types",
"test/test_framework.py::TestFramework::test_bad_sig_observer",
"test/test_framework.py::TestFramework::test_ban_concurrent_frameworks",
"test/test_framework.py::TestFramework::test_conflicting_event_attributes",
"test/test_framework.py::TestFramework::test_custom_event_data",
"test/test_framework.py::TestFramework::test_defer_and_reemit",
"test/test_framework.py::TestFramework::test_dynamic_event_types",
"test/test_framework.py::TestFramework::test_event_key_roundtrip",
"test/test_framework.py::TestFramework::test_events_base",
"test/test_framework.py::TestFramework::test_forget_and_multiple_objects",
"test/test_framework.py::TestFramework::test_forget_and_multiple_objects_with_load_snapshot",
"test/test_framework.py::TestFramework::test_handle_attrs_readonly",
"test/test_framework.py::TestFramework::test_handle_path",
"test/test_framework.py::TestFramework::test_helper_properties",
"test/test_framework.py::TestFramework::test_on_pre_commit_emitted",
"test/test_framework.py::TestFramework::test_reemit_ignores_unknown_event_type",
"test/test_framework.py::TestFramework::test_restore_unknown",
"test/test_framework.py::TestFramework::test_simple_event_observer",
"test/test_framework.py::TestFramework::test_snapshot_roundtrip",
"test/test_framework.py::TestFramework::test_weak_observer",
"test/test_framework.py::TestStoredState::test_basic_state_storage",
"test/test_framework.py::TestStoredState::test_comparison_operations",
"test/test_framework.py::TestStoredState::test_mutable_types",
"test/test_framework.py::TestStoredState::test_mutable_types_invalid",
"test/test_framework.py::TestStoredState::test_same_name_two_classes",
"test/test_framework.py::TestStoredState::test_set_default",
"test/test_framework.py::TestStoredState::test_set_operations",
"test/test_framework.py::TestStoredState::test_straight_sub_subclass",
"test/test_framework.py::TestStoredState::test_straight_subclass",
"test/test_framework.py::TestStoredState::test_the_crazy_thing",
"test/test_framework.py::TestStoredState::test_two_names_one_state",
"test/test_framework.py::TestStoredState::test_two_subclasses",
"test/test_framework.py::TestStoredState::test_two_subclasses_no_conflicts"
]
| {
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2020-03-30 14:44:57+00:00 | apache-2.0 | 1,482 |
|
canonical__operator-199 | diff --git a/ops/framework.py b/ops/framework.py
index abdd1fe..c8df98b 100755
--- a/ops/framework.py
+++ b/ops/framework.py
@@ -812,39 +812,78 @@ class BoundStoredState:
class StoredState:
+ """A class used to store data the charm needs persisted across invocations.
+
+ Example::
+
+ class MyClass(Object):
+ _stored = StoredState()
+
+ Instances of `MyClass` can transparently save state between invocations by
+ setting attributes on `stored`. Initial state should be set with
+ `set_default` on the bound object, that is::
+
+ class MyClass(Object):
+ _stored = StoredState()
+
+ def __init__(self, parent, key):
+ super().__init__(parent, key)
+ self._stored.set_default(seen=set())
+ self.framework.observe(self.on.seen, self._on_seen)
+
+ def _on_seen(self, event):
+ self._stored.seen.add(event.uuid)
+
+ """
def __init__(self):
self.parent_type = None
self.attr_name = None
def __get__(self, parent, parent_type=None):
- if self.parent_type is None:
- self.parent_type = parent_type
- elif self.parent_type is not parent_type:
+ if self.parent_type is not None and self.parent_type not in parent_type.mro():
+ # the StoredState instance is being shared between two unrelated classes
+ # -> unclear what is exepcted of us -> bail out
raise RuntimeError(
'StoredState shared by {} and {}'.format(
self.parent_type.__name__, parent_type.__name__))
if parent is None:
+ # accessing via the class directly (e.g. MyClass.stored)
return self
- bound = parent.__dict__.get(self.attr_name)
- if bound is None:
- for attr_name, attr_value in parent_type.__dict__.items():
- if attr_value is self:
- if self.attr_name and attr_name != self.attr_name:
- parent_tname = parent_type.__name__
- raise RuntimeError("StoredState shared by {}.{} and {}.{}".format(
- parent_tname, self.attr_name, parent_tname, attr_name))
- self.attr_name = attr_name
- bound = BoundStoredState(parent, attr_name)
- parent.__dict__[attr_name] = bound
- break
- else:
- raise RuntimeError(
- 'cannot find StoredVariable attribute in type {}'.format(parent_type.__name__))
-
- return bound
+ bound = None
+ if self.attr_name is not None:
+ bound = parent.__dict__.get(self.attr_name)
+ if bound is not None:
+ # we already have the thing from a previous pass, huzzah
+ return bound
+
+ # need to find ourselves amongst the parent's bases
+ for cls in parent_type.mro():
+ for attr_name, attr_value in cls.__dict__.items():
+ if attr_value is not self:
+ continue
+ # we've found ourselves! is it the first time?
+ if bound is not None:
+ # the StoredState instance is being stored in two different
+ # attributes -> unclear what is expected of us -> bail out
+ raise RuntimeError("StoredState shared by {0}.{1} and {0}.{2}".format(
+ cls.__name__, self.attr_name, attr_name))
+ # we've found ourselves for the first time; save where, and bind the object
+ self.attr_name = attr_name
+ self.parent_type = cls
+ bound = BoundStoredState(parent, attr_name)
+
+ if bound is not None:
+ # cache the bound object to avoid the expensive lookup the next time
+ # (don't use setattr, to keep things symmetric with the fast-path lookup above)
+ parent.__dict__[self.attr_name] = bound
+ return bound
+
+ raise AttributeError(
+ 'cannot find {} attribute in type {}'.format(
+ self.__class__.__name__, parent_type.__name__))
def _wrap_stored(parent_data, value):
| canonical/operator | d259e0919fc19075b1e3636a5dd3c94ab81fd416 | diff --git a/test/test_framework.py b/test/test_framework.py
index 7671075..1977c1e 100644
--- a/test/test_framework.py
+++ b/test/test_framework.py
@@ -774,12 +774,63 @@ class TestStoredState(unittest.TestCase):
return framework
def test_basic_state_storage(self):
- framework = self.create_framework()
+ class SomeObject(Object):
+ state = StoredState()
+
+ self._stored_state_tests(SomeObject)
+
+ def test_straight_subclass(self):
+ class SomeObject(Object):
+ state = StoredState()
+
+ class Sub(SomeObject):
+ pass
+ self._stored_state_tests(Sub)
+
+ def test_straight_sub_subclass(self):
class SomeObject(Object):
state = StoredState()
- obj = SomeObject(framework, "1")
+ class Sub(SomeObject):
+ pass
+
+ class SubSub(SomeObject):
+ pass
+
+ self._stored_state_tests(SubSub)
+
+ def test_two_subclasses(self):
+ class SomeObject(Object):
+ state = StoredState()
+
+ class SubA(SomeObject):
+ pass
+
+ class SubB(SomeObject):
+ pass
+
+ self._stored_state_tests(SubA)
+ self._stored_state_tests(SubB)
+
+ def test_the_crazy_thing(self):
+ class NoState(Object):
+ pass
+
+ class StatedObject(NoState):
+ state = StoredState()
+
+ class Sibling(NoState):
+ pass
+
+ class FinalChild(StatedObject, Sibling):
+ pass
+
+ self._stored_state_tests(FinalChild)
+
+ def _stored_state_tests(self, cls):
+ framework = self.create_framework()
+ obj = cls(framework, "1")
try:
obj.state.foo
@@ -812,12 +863,88 @@ class TestStoredState(unittest.TestCase):
# Since this has the same absolute object handle, it will get its state back.
framework_copy = self.create_framework()
- obj_copy = SomeObject(framework_copy, "1")
+ obj_copy = cls(framework_copy, "1")
self.assertEqual(obj_copy.state.foo, 42)
self.assertEqual(obj_copy.state.bar, "s")
self.assertEqual(obj_copy.state.baz, 4.2)
self.assertEqual(obj_copy.state.bing, True)
+ framework_copy.close()
+
+ def test_two_subclasses_no_conflicts(self):
+ class Base(Object):
+ state = StoredState()
+
+ class SubA(Base):
+ pass
+
+ class SubB(Base):
+ pass
+
+ framework = self.create_framework()
+ a = SubA(framework, None)
+ b = SubB(framework, None)
+ z = Base(framework, None)
+
+ a.state.foo = 42
+ b.state.foo = "hello"
+ z.state.foo = {1}
+
+ framework.commit()
+ framework.close()
+
+ framework2 = self.create_framework()
+ a2 = SubA(framework2, None)
+ b2 = SubB(framework2, None)
+ z2 = Base(framework2, None)
+
+ self.assertEqual(a2.state.foo, 42)
+ self.assertEqual(b2.state.foo, "hello")
+ self.assertEqual(z2.state.foo, {1})
+
+ def test_two_names_one_state(self):
+ class Mine(Object):
+ state = StoredState()
+ stored = state
+
+ framework = self.create_framework()
+ obj = Mine(framework, None)
+
+ with self.assertRaises(RuntimeError):
+ obj.state.foo = 42
+
+ framework.close()
+
+ # make sure we're not changing the object on failure
+ self.assertNotIn("stored", obj.__dict__)
+ self.assertNotIn("state", obj.__dict__)
+
+ def test_same_name_two_classes(self):
+ class Base(Object):
+ pass
+
+ class A(Base):
+ stored = StoredState()
+
+ class B(Base):
+ stored = A.stored
+
+ framework = self.create_framework()
+ a = A(framework, None)
+ b = B(framework, None)
+
+ # NOTE it's the second one that actually triggers the
+ # exception, but that's an implementation detail
+ a.stored.foo = 42
+
+ with self.assertRaises(RuntimeError):
+ b.stored.foo = "xyzzy"
+
+ framework.close()
+
+ # make sure we're not changing the object on failure
+ self.assertNotIn("stored", b.__dict__)
+
def test_mutable_types_invalid(self):
framework = self.create_framework()
| Testing harness: cannot find StoredVariable attribute in type <CharmClass>
When the testing harness is used, having a state attribute on a charm [currently](https://github.com/canonical/operator/commit/44dff930667aa8e9b179c11fa87ceb8c9b85ec5a) raises an exception.
This happens because `Harness.begin` creates a new type by inheriting from the real charm class provided by a test developer so when the `__dict__` is used on it in the `StoredState.__get__`, the attribute lookup fails:
https://github.com/canonical/operator/blob/44dff930667aa8e9b179c11fa87ceb8c9b85ec5a/ops/testing.py#L95-L96
The lookup in `parent_type.__bases__[0].__dict__.items()` would succeed.
Example:
https://github.com/dshcherb/charm-haproxy/blob/ba9bae5aae3b89772361bfd4a973bb61d5abca83/test/test_charm.py#L21
```python3 -m unittest
[6] > /home/ubuntu/src/canonical/juju/charm-haproxy/lib/ops/framework.py(834)__get__()
-> if bound is None:
13 frames hidden (try 'help hidden_frames')
(Pdb++) l
829 return self
830
831 bound = parent.__dict__.get(self.attr_name)
832 import pdb
833 pdb.set_trace()
834 -> if bound is None:
835 for attr_name, attr_value in parent_type.__dict__.items():
836 if attr_value is self:
837 if self.attr_name and attr_name != self.attr_name:
838 parent_tname = parent_type.__name__
839 raise RuntimeError("StoredState shared by {}.{} and {}.{}".format(
(Pdb++) parent_type.__dict__.items()
dict_items([('__module__', 'ops.testing'), ('on', <ops.testing.Harness.begin.<locals>.TestEvents object at 0x7f935be611f0>), ('__doc__', None)])
(Pdb++) parent_type.__bases__[0].__dict__.items()
dict_items([('__module__', 'src.charm'), ('state', <ops.framework.StoredState object at 0x7f935c486400>), ('HAPROXY_ENV_FILE', PosixPath('/etc/default/haproxy')), ('__init__', <function HaproxyCharm.__init__ at 0x7f935c06faf0>), ('on_install', <function HaproxyCharm.on_install at 0x7f935befc160>), ('on_start', <function HaproxyCharm.on_start at 0x7f935befc1f0>), ('on_stop', <function HaproxyCharm.on_stop at 0x7f935befc280>), ('on_config_changed', <function HaproxyCharm.on_config_changed at 0x7f935befc310>), ('on_backends_changed', <function HaproxyCharm.on_backends_changed at 0x7f935befc3a0>), ('__doc__', None)])
(Pdb++) c
E
======================================================================
ERROR: test_install (test.test_charm.TestTestingModelBackend)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/lib/python3.8/unittest/mock.py", line 1348, in patched
return func(*newargs, **newkeywargs)
File "/home/ubuntu/src/canonical/juju/charm-haproxy/test/test_charm.py", line 21, in test_install
harness.begin()
File "lib/ops/testing.py", line 101, in begin
self._charm = TestCharm(self._framework, self._framework.meta.name)
File "/home/ubuntu/src/canonical/juju/charm-haproxy/src/charm.py", line 41, in __init__
self.state.set_default(started=False)
File "lib/ops/framework.py", line 834, in __get__
if bound is None:
RuntimeError: cannot find StoredVariable attribute in type HaproxyCharm
----------------------------------------------------------------------
Ran 1 test in 30.287s
FAILED (errors=1)
``` | 0.0 | d259e0919fc19075b1e3636a5dd3c94ab81fd416 | [
"test/test_framework.py::TestStoredState::test_straight_sub_subclass",
"test/test_framework.py::TestStoredState::test_straight_subclass",
"test/test_framework.py::TestStoredState::test_the_crazy_thing",
"test/test_framework.py::TestStoredState::test_two_names_one_state",
"test/test_framework.py::TestStoredState::test_two_subclasses",
"test/test_framework.py::TestStoredState::test_two_subclasses_no_conflicts"
]
| [
"test/test_framework.py::TestFramework::test_auto_register_event_types",
"test/test_framework.py::TestFramework::test_bad_sig_observer",
"test/test_framework.py::TestFramework::test_ban_concurrent_frameworks",
"test/test_framework.py::TestFramework::test_conflicting_event_attributes",
"test/test_framework.py::TestFramework::test_custom_event_data",
"test/test_framework.py::TestFramework::test_defer_and_reemit",
"test/test_framework.py::TestFramework::test_dynamic_event_types",
"test/test_framework.py::TestFramework::test_event_key_roundtrip",
"test/test_framework.py::TestFramework::test_events_base",
"test/test_framework.py::TestFramework::test_forget_and_multiple_objects",
"test/test_framework.py::TestFramework::test_forget_and_multiple_objects_with_load_snapshot",
"test/test_framework.py::TestFramework::test_handle_attrs_readonly",
"test/test_framework.py::TestFramework::test_handle_path",
"test/test_framework.py::TestFramework::test_helper_properties",
"test/test_framework.py::TestFramework::test_on_pre_commit_emitted",
"test/test_framework.py::TestFramework::test_reemit_ignores_unknown_event_type",
"test/test_framework.py::TestFramework::test_restore_unknown",
"test/test_framework.py::TestFramework::test_simple_event_observer",
"test/test_framework.py::TestFramework::test_snapshot_roundtrip",
"test/test_framework.py::TestFramework::test_weak_observer",
"test/test_framework.py::TestStoredState::test_basic_state_storage",
"test/test_framework.py::TestStoredState::test_comparison_operations",
"test/test_framework.py::TestStoredState::test_mutable_types",
"test/test_framework.py::TestStoredState::test_mutable_types_invalid",
"test/test_framework.py::TestStoredState::test_same_name_two_classes",
"test/test_framework.py::TestStoredState::test_set_default",
"test/test_framework.py::TestStoredState::test_set_operations"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2020-03-31 20:08:59+00:00 | apache-2.0 | 1,483 |
|
canonical__operator-475 | diff --git a/ops/model.py b/ops/model.py
index 9efec7d..d03a387 100644
--- a/ops/model.py
+++ b/ops/model.py
@@ -652,18 +652,28 @@ class Relation:
self.app = None
self.units = set()
- # For peer relations, both the remote and the local app are the same.
if is_peer:
+ # For peer relations, both the remote and the local app are the same.
self.app = our_unit.app
+
try:
for unit_name in backend.relation_list(self.id):
unit = cache.get(Unit, unit_name)
self.units.add(unit)
if self.app is None:
+ # Use the app of one of the units if available.
self.app = unit.app
except RelationNotFoundError:
# If the relation is dead, just treat it as if it has no remote units.
pass
+
+ # If we didn't get the remote app via our_unit.app or the units list,
+ # look it up via JUJU_REMOTE_APP or "relation-list --app".
+ if self.app is None:
+ app_name = backend.relation_remote_app_name(relation_id)
+ if app_name is not None:
+ self.app = cache.get(Application, app_name)
+
self.data = RelationData(self, our_unit, backend)
def __repr__(self):
@@ -1151,6 +1161,10 @@ class _ModelBackend:
else:
return text
+ @staticmethod
+ def _is_relation_not_found(model_error):
+ return 'relation not found' in str(model_error)
+
def relation_ids(self, relation_name):
relation_ids = self._run('relation-ids', relation_name, return_output=True, use_json=True)
return [int(relation_id.split(':')[-1]) for relation_id in relation_ids]
@@ -1160,10 +1174,32 @@ class _ModelBackend:
return self._run('relation-list', '-r', str(relation_id),
return_output=True, use_json=True)
except ModelError as e:
- if 'relation not found' in str(e):
+ if self._is_relation_not_found(e):
raise RelationNotFoundError() from e
raise
+ def relation_remote_app_name(self, relation_id: int) -> typing.Optional[str]:
+ """Return remote app name for given relation ID, or None if not known."""
+ if 'JUJU_RELATION_ID' in os.environ and 'JUJU_REMOTE_APP' in os.environ:
+ event_relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1])
+ if relation_id == event_relation_id:
+ # JUJU_RELATION_ID is this relation, use JUJU_REMOTE_APP.
+ return os.environ['JUJU_REMOTE_APP']
+
+ # If caller is asking for information about another relation, use
+ # "relation-list --app" to get it.
+ try:
+ return self._run('relation-list', '-r', str(relation_id), '--app',
+ return_output=True, use_json=True)
+ except ModelError as e:
+ if self._is_relation_not_found(e):
+ return None
+ if 'option provided but not defined: --app' in str(e):
+ # "--app" was introduced to relation-list in Juju 2.8.1, so
+ # handle previous verions of Juju gracefully
+ return None
+ raise
+
def relation_get(self, relation_id, member_name, is_app):
if not isinstance(is_app, bool):
raise TypeError('is_app parameter to relation_get must be a boolean')
@@ -1181,7 +1217,7 @@ class _ModelBackend:
try:
return self._run(*args, return_output=True, use_json=True)
except ModelError as e:
- if 'relation not found' in str(e):
+ if self._is_relation_not_found(e):
raise RelationNotFoundError() from e
raise
@@ -1202,7 +1238,7 @@ class _ModelBackend:
try:
return self._run(*args)
except ModelError as e:
- if 'relation not found' in str(e):
+ if self._is_relation_not_found(e):
raise RelationNotFoundError() from e
raise
@@ -1337,7 +1373,7 @@ class _ModelBackend:
try:
return self._run(*cmd, return_output=True, use_json=True)
except ModelError as e:
- if 'relation not found' in str(e):
+ if self._is_relation_not_found(e):
raise RelationNotFoundError() from e
raise
| canonical/operator | 38a82715626e124404402c6cf9dd7e3bea147652 | diff --git a/ops/testing.py b/ops/testing.py
index 0bce767..d7e6c2f 100755
--- a/ops/testing.py
+++ b/ops/testing.py
@@ -736,6 +736,12 @@ class _TestingModelBackend:
except KeyError as e:
raise model.RelationNotFoundError from e
+ def relation_remote_app_name(self, relation_id: int) -> typing.Optional[str]:
+ if relation_id not in self._relation_app_and_units:
+ # Non-existent or dead relation
+ return None
+ return self._relation_app_and_units[relation_id]['app']
+
def relation_get(self, relation_id, member_name, is_app):
if is_app and '/' in member_name:
member_name = member_name.split('/')[0]
diff --git a/test/test_model.py b/test/test_model.py
index 6c2bde5..afe18d6 100755
--- a/test/test_model.py
+++ b/test/test_model.py
@@ -120,6 +120,7 @@ class TestModel(unittest.TestCase):
self.assertEqual(dead_rel.data[self.model.unit], {})
self.assertBackendCalls([
('relation_list', 7),
+ ('relation_remote_app_name', 7),
('relation_get', 7, 'myapp/0', False),
])
@@ -134,7 +135,9 @@ class TestModel(unittest.TestCase):
self.assertBackendCalls([
('relation_ids', 'db0'),
('relation_list', self.relation_id_db0),
+ ('relation_remote_app_name', 0),
('relation_list', relation_id_db0_b),
+ ('relation_remote_app_name', 2),
])
def test_peer_relation_app(self):
@@ -403,6 +406,17 @@ class TestModel(unittest.TestCase):
('relation_get', relation_id, 'myapp/0', False),
])
+ def test_relation_no_units(self):
+ self.harness.add_relation('db1', 'remoteapp1')
+ rel = self.model.get_relation('db1')
+ self.assertEqual(rel.units, set())
+ self.assertIs(rel.app, self.model.get_app('remoteapp1'))
+ self.assertBackendCalls([
+ ('relation_ids', 'db1'),
+ ('relation_list', 1),
+ ('relation_remote_app_name', 1),
+ ])
+
def test_config(self):
self.harness.update_config({'foo': 'foo', 'bar': 1, 'qux': True})
self.assertEqual(self.model.config, {
@@ -1567,6 +1581,61 @@ class TestModelBackend(unittest.TestCase):
with self.assertRaises(ops.model.ModelError):
self.backend.add_metrics(metrics, labels)
+ def test_relation_remote_app_name_env(self):
+ self.addCleanup(os.environ.pop, 'JUJU_RELATION_ID', None)
+ self.addCleanup(os.environ.pop, 'JUJU_REMOTE_APP', None)
+
+ os.environ['JUJU_RELATION_ID'] = 'x:5'
+ os.environ['JUJU_REMOTE_APP'] = 'remoteapp1'
+ self.assertEqual(self.backend.relation_remote_app_name(5), 'remoteapp1')
+ os.environ['JUJU_RELATION_ID'] = '5'
+ self.assertEqual(self.backend.relation_remote_app_name(5), 'remoteapp1')
+
+ def test_relation_remote_app_name_script_success(self):
+ self.addCleanup(os.environ.pop, 'JUJU_RELATION_ID', None)
+ self.addCleanup(os.environ.pop, 'JUJU_REMOTE_APP', None)
+
+ # JUJU_RELATION_ID and JUJU_REMOTE_APP both unset
+ fake_script(self, 'relation-list', r"""
+echo '"remoteapp2"'
+""")
+ self.assertEqual(self.backend.relation_remote_app_name(1), 'remoteapp2')
+ self.assertEqual(fake_script_calls(self, clear=True), [
+ ['relation-list', '-r', '1', '--app', '--format=json'],
+ ])
+
+ # JUJU_RELATION_ID set but JUJU_REMOTE_APP unset
+ os.environ['JUJU_RELATION_ID'] = 'x:5'
+ self.assertEqual(self.backend.relation_remote_app_name(5), 'remoteapp2')
+
+ # JUJU_RELATION_ID unset but JUJU_REMOTE_APP set
+ del os.environ['JUJU_RELATION_ID']
+ os.environ['JUJU_REMOTE_APP'] = 'remoteapp1'
+ self.assertEqual(self.backend.relation_remote_app_name(5), 'remoteapp2')
+
+ # Both set, but JUJU_RELATION_ID a different relation
+ os.environ['JUJU_RELATION_ID'] = 'x:6'
+ self.assertEqual(self.backend.relation_remote_app_name(5), 'remoteapp2')
+
+ def test_relation_remote_app_name_script_errors(self):
+ fake_script(self, 'relation-list', r"""
+echo "ERROR invalid value \"6\" for option -r: relation not found" >&2 # NOQA
+exit 2
+""")
+ self.assertIs(self.backend.relation_remote_app_name(6), None)
+ self.assertEqual(fake_script_calls(self, clear=True), [
+ ['relation-list', '-r', '6', '--app', '--format=json'],
+ ])
+
+ fake_script(self, 'relation-list', r"""
+echo "ERROR option provided but not defined: --app" >&2
+exit 2
+""")
+ self.assertIs(self.backend.relation_remote_app_name(6), None)
+ self.assertEqual(fake_script_calls(self, clear=True), [
+ ['relation-list', '-r', '6', '--app', '--format=json'],
+ ])
+
class TestLazyMapping(unittest.TestCase):
diff --git a/test/test_testing.py b/test/test_testing.py
index 119cbed..be87ed7 100644
--- a/test/test_testing.py
+++ b/test/test_testing.py
@@ -763,6 +763,7 @@ class TestHarness(unittest.TestCase):
harness._get_backend_calls(reset=True), [
('relation_ids', 'db'),
('relation_list', rel_id),
+ ('relation_remote_app_name', 0),
])
# add_relation_unit resets the relation_list, but doesn't trigger backend calls
harness.add_relation_unit(rel_id, 'postgresql/0')
@@ -1591,3 +1592,20 @@ class TestTestingModelBackend(unittest.TestCase):
self.assertIn(
"units/unit-test-app-0/resources/foo: resource#test-app/foo not found",
str(cm.exception))
+
+ def test_relation_remote_app_name(self):
+ harness = Harness(CharmBase, meta='''
+ name: test-charm
+ ''')
+ self.addCleanup(harness.cleanup)
+ backend = harness._backend
+
+ self.assertIs(backend.relation_remote_app_name(1), None)
+
+ rel_id = harness.add_relation('db', 'postgresql')
+ self.assertEqual(backend.relation_remote_app_name(rel_id), 'postgresql')
+ harness.add_relation_unit(rel_id, 'postgresql/0')
+ harness.add_relation_unit(rel_id, 'postgresql/1')
+ self.assertEqual(backend.relation_remote_app_name(rel_id), 'postgresql')
+
+ self.assertIs(backend.relation_remote_app_name(7), None)
| Remote app is not added to RelationMapping for relations in some cases
- Two apps, one primary, one subordinate;
- Both have two units;
- A leader subordinate writes app relation data to a container-scoped relation for all primaries to read;
- The first primary observes the leader subordinate (relation-joined) which then writes some data to the app relation data bag;
- The second primary comes up but gets a relation-changed event for the app relation data before it observes (relation-joined) its own subordinate - `relation-list` returns no units.
The reason why it fails is that we currently retrieve a remote app from a remote unit and special-case the peer relation but do not do anything for subordinates we do not yet observe.
https://github.com/canonical/operator/blob/8ef8bd7276a5bacf33d6559db7a381b24a7c8544/ops/model.py#L335-L342
It might be that Juju needs to make a sure subordinate `-joined` event fires first for a primary and only then `-changed` events are fired (created https://bugs.launchpad.net/juju/+bug/1866828).
Additional info:
```
juju status
Model Controller Cloud/Region Version SLA Timestamp
default localhost-localhost localhost/localhost 2.7.4.1 unsupported 14:09:00+03:00
App Version Status Scale Charm Store Rev OS Notes
apache-httpd error 2 apache-httpd local 0 ubuntu
dummy-vhost waiting 1/2 dummy-vhost local 0 ubuntu
Unit Workload Agent Machine Public address Ports Message
apache-httpd/0* active idle 0 10.209.240.137
dummy-vhost/0* active idle 10.209.240.137
apache-httpd/1 error idle 1 10.209.240.253 hook failed: "vhost-config-relation-changed"
dummy-vhost/1 waiting allocating 10.209.240.253 agent initializing
```
The unit for which everything is OK:
```
juju show-status-log apache-httpd/0
Time Type Status Message
10 Mar 2020 14:07:41+03:00 juju-unit allocating
10 Mar 2020 14:07:41+03:00 workload waiting waiting for machine
10 Mar 2020 14:08:19+03:00 workload waiting installing agent
10 Mar 2020 14:08:21+03:00 workload waiting agent initializing
10 Mar 2020 14:08:22+03:00 workload maintenance installing charm software
10 Mar 2020 14:08:22+03:00 juju-unit executing running install hook
10 Mar 2020 14:08:32+03:00 juju-unit executing running leader-elected hook
10 Mar 2020 14:08:33+03:00 juju-unit executing running config-changed hook
10 Mar 2020 14:08:33+03:00 workload active
10 Mar 2020 14:08:33+03:00 juju-unit executing running start hook
10 Mar 2020 14:08:34+03:00 juju-unit idle
10 Mar 2020 14:08:40+03:00 juju-unit executing running vhost-config-relation-joined hook
10 Mar 2020 14:08:41+03:00 juju-unit executing running vhost-config-relation-changed hook
10 Mar 2020 14:08:42+03:00 juju-unit idle
10 Mar 2020 14:08:57+03:00 juju-unit executing running httpd-peer-relation-joined hook
10 Mar 2020 14:08:58+03:00 juju-unit executing running httpd-peer-relation-changed hook
10 Mar 2020 14:08:58+03:00 juju-unit idle
```
The failing unit:
```juju show-status-log apache-httpd/1
Time Type Status Message
10 Mar 2020 14:08:44+03:00 workload waiting agent initializing
10 Mar 2020 14:08:44+03:00 workload maintenance installing charm software
10 Mar 2020 14:08:44+03:00 juju-unit executing running install hook
10 Mar 2020 14:08:56+03:00 juju-unit executing running leader-settings-changed hook
10 Mar 2020 14:08:56+03:00 juju-unit executing running config-changed hook
10 Mar 2020 14:08:56+03:00 workload active
10 Mar 2020 14:08:56+03:00 juju-unit executing running start hook
10 Mar 2020 14:08:57+03:00 juju-unit executing running httpd-peer-relation-joined hook
10 Mar 2020 14:08:58+03:00 juju-unit executing running httpd-peer-relation-changed hook
10 Mar 2020 14:08:59+03:00 juju-unit executing running vhost-config-relation-changed hook
10 Mar 2020 14:08:59+03:00 juju-unit error hook failed: "vhost-config-relation-changed"
```
```
./hooks/vhost-config-relation-changed
> /var/lib/juju/agents/unit-apache-httpd-1/charm/hooks/vhost-config-relation-changed(212)on_vhost_config_relation_changed()
-> vhosts_serialized = event.relation.data[event.app].get('vhosts')
(Pdb) c
Traceback (most recent call last):
File "./hooks/vhost-config-relation-changed", line 272, in <module>
main(Charm)
File "lib/ops/main.py", line 195, in main
_emit_charm_event(charm, juju_event_name)
File "lib/ops/main.py", line 120, in _emit_charm_event
event_to_emit.emit(*args, **kwargs)
File "lib/ops/framework.py", line 199, in emit
framework._emit(event)
File "lib/ops/framework.py", line 633, in _emit
self._reemit(event_path)
File "lib/ops/framework.py", line 668, in _reemit
custom_handler(event)
File "./hooks/vhost-config-relation-changed", line 212, in on_vhost_config_relation_changed
vhosts_serialized = event.relation.data[event.app].get('vhosts')
File "lib/ops/model.py", line 372, in __getitem__
return self._data[key]
KeyError: <ops.model.Application
```
```
(Pdb) dict(event.relation.data)
{<ops.model.Unit apache-httpd/1>: <ops.model.RelationDataContent object at 0x7f5d38f4ab00>, <ops.model.Application apache-httpd>: <ops.model.RelationDataContent object at 0x7f5d38f4aa58>}
```
```
[1]+ Stopped ./hooks/vhost-config-relation-changed
root@juju-df5eba-1:/var/lib/juju/agents/unit-apache-httpd-1/charm# relation-ids
vhost-config:1
root@juju-df5eba-1:/var/lib/juju/agents/unit-apache-httpd-1/charm# relation-list
root@juju-df5eba-1:/var/lib/juju/agents/unit-apache-httpd-1/charm# relation-list ; echo $?
0
root@juju-df5eba-1:/var/lib/juju/agents/unit-apache-httpd-1/charm# env | grep JUJU_REMOTE
JUJU_REMOTE_UNIT=
JUJU_REMOTE_APP=dummy-vhost
root@juju-df5eba-1:/var/lib/juju/agents/unit-apache-httpd-1/charm# relation-get --app
vhosts: '- {port: "80", template: PFZpcnR1YWxIb3N0ICo6ODA+CglTZXJ2ZXJBZG1pbiB3ZWJtYXN0ZXJAbG9jYWxob3N0CglEb2N1bWVudFJvb3QgL3Zhci93d3cvZHVtbXktdmhvc3QKCUVycm9yTG9nICR7QVBBQ0hFX0xPR19ESVJ9L2Vycm9yLmxvZwoJQ3VzdG9tTG9nICR7QVBBQ0hFX0xPR19ESVJ9L2FjY2Vzcy5sb2cgY29tYmluZWQKPC9WaXJ0dWFsSG9zdD4K}'
```
```
cat metadata.yaml
name: apache-httpd
# ...
requires:
vhost-config:
interface: apache-vhost-config
scope: container
``` | 0.0 | 38a82715626e124404402c6cf9dd7e3bea147652 | [
"test/test_model.py::TestModel::test_get_relation",
"test/test_model.py::TestModel::test_relation_no_units",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_env",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_errors",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_success",
"test/test_testing.py::TestHarness::test_get_backend_calls"
]
| [
"test/test_model.py::TestModel::test_active_message_default",
"test/test_model.py::TestModel::test_app_immutable",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_leader",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_minion",
"test/test_model.py::TestModel::test_base_status_instance_raises",
"test/test_model.py::TestModel::test_config",
"test/test_model.py::TestModel::test_config_immutable",
"test/test_model.py::TestModel::test_is_leader",
"test/test_model.py::TestModel::test_local_set_valid_app_status",
"test/test_model.py::TestModel::test_local_set_valid_unit_status",
"test/test_model.py::TestModel::test_model_attributes",
"test/test_model.py::TestModel::test_model_name_from_backend",
"test/test_model.py::TestModel::test_our_unit_is_our",
"test/test_model.py::TestModel::test_peer_relation_app",
"test/test_model.py::TestModel::test_pod_immutable",
"test/test_model.py::TestModel::test_pod_spec",
"test/test_model.py::TestModel::test_relation_data_del_key",
"test/test_model.py::TestModel::test_relation_data_del_missing_key",
"test/test_model.py::TestModel::test_relation_data_modify_our",
"test/test_model.py::TestModel::test_relation_data_modify_remote",
"test/test_model.py::TestModel::test_relation_data_type_check",
"test/test_model.py::TestModel::test_relation_set_fail",
"test/test_model.py::TestModel::test_relations_immutable",
"test/test_model.py::TestModel::test_relations_keys",
"test/test_model.py::TestModel::test_remote_app_relation_data",
"test/test_model.py::TestModel::test_remote_app_status",
"test/test_model.py::TestModel::test_remote_unit_status",
"test/test_model.py::TestModel::test_remote_units_is_our",
"test/test_model.py::TestModel::test_resources",
"test/test_model.py::TestModel::test_resources_immutable",
"test/test_model.py::TestModel::test_set_app_status_invalid",
"test/test_model.py::TestModel::test_set_app_status_non_leader_raises",
"test/test_model.py::TestModel::test_set_unit_status_invalid",
"test/test_model.py::TestModel::test_status_eq",
"test/test_model.py::TestModel::test_status_repr",
"test/test_model.py::TestModel::test_storage",
"test/test_model.py::TestModel::test_storages_immutable",
"test/test_model.py::TestModel::test_unit_immutable",
"test/test_model.py::TestModel::test_unit_relation_data",
"test/test_model.py::TestModel::test_workload_version",
"test/test_model.py::TestModel::test_workload_version_invalid",
"test/test_model.py::TestContainers::test_unit_containers",
"test/test_model.py::TestContainers::test_unit_get_container",
"test/test_model.py::TestContainerPebble::test_add_layer",
"test/test_model.py::TestContainerPebble::test_autostart",
"test/test_model.py::TestContainerPebble::test_get_layer",
"test/test_model.py::TestContainerPebble::test_socket_path",
"test/test_model.py::TestContainerPebble::test_start",
"test/test_model.py::TestContainerPebble::test_stop",
"test/test_model.py::TestContainerPebble::test_type_errors",
"test/test_model.py::TestModelBindings::test_binding_by_relation",
"test/test_model.py::TestModelBindings::test_binding_by_relation_name",
"test/test_model.py::TestModelBindings::test_binding_no_iface_name",
"test/test_model.py::TestModelBindings::test_dead_relations",
"test/test_model.py::TestModelBindings::test_empty_bind_addresses",
"test/test_model.py::TestModelBindings::test_empty_interface_info",
"test/test_model.py::TestModelBindings::test_invalid_keys",
"test/test_model.py::TestModelBindings::test_missing_bind_addresses",
"test/test_model.py::TestModelBindings::test_missing_egress_subnets",
"test/test_model.py::TestModelBindings::test_missing_ingress_addresses",
"test/test_model.py::TestModelBackend::test_action_fail",
"test/test_model.py::TestModelBackend::test_action_get",
"test/test_model.py::TestModelBackend::test_action_get_error",
"test/test_model.py::TestModelBackend::test_action_log",
"test/test_model.py::TestModelBackend::test_action_log_error",
"test/test_model.py::TestModelBackend::test_action_set",
"test/test_model.py::TestModelBackend::test_action_set_error",
"test/test_model.py::TestModelBackend::test_application_version_set",
"test/test_model.py::TestModelBackend::test_application_version_set_invalid",
"test/test_model.py::TestModelBackend::test_invalid_metric_label_values",
"test/test_model.py::TestModelBackend::test_invalid_metric_labels",
"test/test_model.py::TestModelBackend::test_invalid_metric_names",
"test/test_model.py::TestModelBackend::test_invalid_metric_values",
"test/test_model.py::TestModelBackend::test_is_leader_refresh",
"test/test_model.py::TestModelBackend::test_juju_log",
"test/test_model.py::TestModelBackend::test_local_set_invalid_status",
"test/test_model.py::TestModelBackend::test_network_get",
"test/test_model.py::TestModelBackend::test_network_get_errors",
"test/test_model.py::TestModelBackend::test_relation_get_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_get_set_is_app_arg",
"test/test_model.py::TestModelBackend::test_relation_set_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_tool_errors",
"test/test_model.py::TestModelBackend::test_status_get",
"test/test_model.py::TestModelBackend::test_status_is_app_forced_kwargs",
"test/test_model.py::TestModelBackend::test_status_set_is_app_not_bool_raises",
"test/test_model.py::TestModelBackend::test_storage_tool_errors",
"test/test_model.py::TestModelBackend::test_valid_metrics",
"test/test_model.py::TestLazyMapping::test_invalidate",
"test/test_testing.py::TestHarness::test_actions_from_directory",
"test/test_testing.py::TestHarness::test_actions_passed_in",
"test/test_testing.py::TestHarness::test_add_oci_resource_custom",
"test/test_testing.py::TestHarness::test_add_oci_resource_no_image",
"test/test_testing.py::TestHarness::test_add_peer_relation_with_initial_data_leader",
"test/test_testing.py::TestHarness::test_add_relation",
"test/test_testing.py::TestHarness::test_add_relation_and_unit",
"test/test_testing.py::TestHarness::test_add_relation_with_our_initial_data",
"test/test_testing.py::TestHarness::test_add_relation_with_remote_app_data",
"test/test_testing.py::TestHarness::test_add_resource_but_oci",
"test/test_testing.py::TestHarness::test_add_resource_bytes",
"test/test_testing.py::TestHarness::test_add_resource_string",
"test/test_testing.py::TestHarness::test_add_resource_unknown",
"test/test_testing.py::TestHarness::test_add_resource_unknown_filename",
"test/test_testing.py::TestHarness::test_app_status",
"test/test_testing.py::TestHarness::test_begin_twice",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_multiple_relation_same_endpoint",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_no_relations",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_no_relations_not_leader",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_peer_relation_pre_defined",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_relation_charm_with_no_relation",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_application_data",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_multiple_units",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_one_relation",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_peer_relation",
"test/test_testing.py::TestHarness::test_config_from_directory",
"test/test_testing.py::TestHarness::test_create_harness_twice",
"test/test_testing.py::TestHarness::test_get_backend_calls_with_kwargs",
"test/test_testing.py::TestHarness::test_get_pod_spec",
"test/test_testing.py::TestHarness::test_get_relation_data",
"test/test_testing.py::TestHarness::test_hooks_disabled_contextmanager",
"test/test_testing.py::TestHarness::test_hooks_disabled_nested_contextmanager",
"test/test_testing.py::TestHarness::test_hooks_disabled_noop",
"test/test_testing.py::TestHarness::test_hooks_enabled_and_disabled",
"test/test_testing.py::TestHarness::test_metadata_from_directory",
"test/test_testing.py::TestHarness::test_populate_oci_resources",
"test/test_testing.py::TestHarness::test_relation_events",
"test/test_testing.py::TestHarness::test_relation_set_app_not_leader",
"test/test_testing.py::TestHarness::test_relation_set_deletes",
"test/test_testing.py::TestHarness::test_resource_folder_cleanup",
"test/test_testing.py::TestHarness::test_set_leader",
"test/test_testing.py::TestHarness::test_set_model_name",
"test/test_testing.py::TestHarness::test_set_model_name_after_begin",
"test/test_testing.py::TestHarness::test_set_workload_version",
"test/test_testing.py::TestHarness::test_unit_status",
"test/test_testing.py::TestHarness::test_update_config",
"test/test_testing.py::TestHarness::test_update_peer_relation_app_data",
"test/test_testing.py::TestHarness::test_update_peer_relation_no_local_unit_change_event",
"test/test_testing.py::TestHarness::test_update_relation_exposes_new_data",
"test/test_testing.py::TestHarness::test_update_relation_no_local_app_change_event",
"test/test_testing.py::TestHarness::test_update_relation_no_local_unit_change_event",
"test/test_testing.py::TestHarness::test_update_relation_remove_data",
"test/test_testing.py::TestTestingModelBackend::test_lazy_resource_directory",
"test/test_testing.py::TestTestingModelBackend::test_relation_get_unknown_relation_id",
"test/test_testing.py::TestTestingModelBackend::test_relation_ids_unknown_relation",
"test/test_testing.py::TestTestingModelBackend::test_relation_list_unknown_relation_id",
"test/test_testing.py::TestTestingModelBackend::test_relation_remote_app_name",
"test/test_testing.py::TestTestingModelBackend::test_resource_get_no_resource",
"test/test_testing.py::TestTestingModelBackend::test_status_set_get_app",
"test/test_testing.py::TestTestingModelBackend::test_status_set_get_unit"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2021-02-10 04:42:55+00:00 | apache-2.0 | 1,484 |
|
canonical__operator-492 | diff --git a/ops/pebble.py b/ops/pebble.py
index b95c969..2f78aa3 100644
--- a/ops/pebble.py
+++ b/ops/pebble.py
@@ -447,7 +447,12 @@ class Layer:
class Service:
- """Represents a service description in a Pebble configuration layer."""
+ """Represents a service description in a Pebble configuration layer.
+
+ The "environment" attribute is parsed as a list of (key, value) tuples,
+ because that seems most natural for ordered keys and values in Python.
+ In the YAML, however, it's a list of 1-item {key: value} objects.
+ """
def __init__(self, name: str, raw: Dict = None):
self.name = name
@@ -460,7 +465,18 @@ class Service:
self.after = list(raw.get('after', []))
self.before = list(raw.get('before', []))
self.requires = list(raw.get('requires', []))
- self.environment = dict(raw.get('environment') or {})
+ self.environment = self._dicts_to_tuples(raw.get('environment', []))
+
+ @staticmethod
+ def _dicts_to_tuples(dicts):
+ """Convert list of 1-item {k: v} dicts to list of (k, v) tuples."""
+ tuples = []
+ for d in dicts:
+ if len(d) != 1:
+ raise ValueError('expected 1-item dict, got {!r}'.format(d))
+ kv = list(d.items())[0]
+ tuples.append(kv)
+ return tuples
def to_dict(self) -> Dict:
"""Convert this service object to its dict representation."""
@@ -473,7 +489,7 @@ class Service:
('after', self.after),
('before', self.before),
('requires', self.requires),
- ('environment', self.environment),
+ ('environment', [{k: v} for k, v in self.environment]),
]
return {name: value for name, value in fields if value}
| canonical/operator | 3d5170ee2c490bf96642d9f011424ff9a4eb187c | diff --git a/test/pebble_cli.py b/test/pebble_cli.py
index 706ce51..ae3e198 100644
--- a/test/pebble_cli.py
+++ b/test/pebble_cli.py
@@ -109,7 +109,7 @@ def main():
result = client.get_changes(select=pebble.ChangeState(args.select),
service=args.service)
elif args.command == 'plan':
- result = client.get_plan().raw_yaml
+ result = client.get_plan().to_yaml()
elif args.command == 'start':
result = client.start_services(args.service)
elif args.command == 'stop':
diff --git a/test/test_pebble.py b/test/test_pebble.py
index 6539381..7b63bff 100644
--- a/test/test_pebble.py
+++ b/test/test_pebble.py
@@ -418,6 +418,9 @@ class TestLayer(unittest.TestCase):
services:
bar:
command: echo bar
+ environment:
+ - ENV1: value1
+ - ENV2: value2
summary: Bar
foo:
command: echo foo
@@ -433,6 +436,8 @@ summary: Sum Mary
self.assertEqual(s.services['bar'].name, 'bar')
self.assertEqual(s.services['bar'].summary, 'Bar')
self.assertEqual(s.services['bar'].command, 'echo bar')
+ self.assertEqual(s.services['bar'].environment,
+ [('ENV1', 'value1'), ('ENV2', 'value2')])
self.assertEqual(s.to_yaml(), yaml)
self.assertEqual(str(s), yaml)
@@ -449,7 +454,7 @@ class TestService(unittest.TestCase):
self.assertEqual(service.after, [])
self.assertEqual(service.before, [])
self.assertEqual(service.requires, [])
- self.assertEqual(service.environment, {})
+ self.assertEqual(service.environment, [])
self.assertEqual(service.to_dict(), {})
def test_name_only(self):
@@ -469,7 +474,7 @@ class TestService(unittest.TestCase):
'after': ['a1', 'a2'],
'before': ['b1', 'b2'],
'requires': ['r1', 'r2'],
- 'environment': {'k1': 'v1', 'k2': 'v2'},
+ 'environment': [{'k1': 'v1'}, {'k2': 'v2'}],
}
s = pebble.Service('Name 2', d)
self.assertEqual(s.name, 'Name 2')
@@ -480,7 +485,7 @@ class TestService(unittest.TestCase):
self.assertEqual(s.after, ['a1', 'a2'])
self.assertEqual(s.before, ['b1', 'b2'])
self.assertEqual(s.requires, ['r1', 'r2'])
- self.assertEqual(s.environment, {'k1': 'v1', 'k2': 'v2'})
+ self.assertEqual(s.environment, [('k1', 'v1'), ('k2', 'v2')])
self.assertEqual(s.to_dict(), d)
@@ -488,15 +493,15 @@ class TestService(unittest.TestCase):
s.after.append('a3')
s.before.append('b3')
s.requires.append('r3')
- s.environment['k3'] = 'v3'
+ s.environment.append(('k3', 'v3'))
self.assertEqual(s.after, ['a1', 'a2', 'a3'])
self.assertEqual(s.before, ['b1', 'b2', 'b3'])
self.assertEqual(s.requires, ['r1', 'r2', 'r3'])
- self.assertEqual(s.environment, {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'})
+ self.assertEqual(s.environment, [('k1', 'v1'), ('k2', 'v2'), ('k3', 'v3')])
self.assertEqual(d['after'], ['a1', 'a2'])
self.assertEqual(d['before'], ['b1', 'b2'])
self.assertEqual(d['requires'], ['r1', 'r2'])
- self.assertEqual(d['environment'], {'k1': 'v1', 'k2': 'v2'})
+ self.assertEqual(d['environment'], [{'k1': 'v1'}, {'k2': 'v2'}])
class MockClient(pebble.Client):
| Can't send environment to pebble
I'm working on converting a charm to use pebble (as a proof of concept) and am running into problems trying to send it an `environment` configuration.
The pebble docs specify:
```
environment:
- VAR1: val1
- VAR2: val2
- VAR3: val3
```
So I tried passing a list of dictionaries to the environment config, but I got:
```
unit-gunicorn-0: 10:04:56 INFO unit.gunicorn/0.juju-log About to dump yaml config <<EOM
description: gunicorn layer
services:
gunicorn:
command: /srv/gunicorn/run
default: start
environment:
- FAVOURITEFOOD: burgers
- FAVOURITEDRINK: ale
override: replace
summary: gunicorn service
summary: gunicorn layer
EOM
unit-gunicorn-0: 10:04:56 ERROR unit.gunicorn/0.juju-log Uncaught exception while in charm code:
Traceback (most recent call last):
File "./src/charm.py", line 438, in <module>
main(GunicornK8sCharm, use_juju_for_storage=True)
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/main.py", line 406, in main
_emit_charm_event(charm, dispatcher.event_name)
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/main.py", line 140, in _emit_charm_event
event_to_emit.emit(*args, **kwargs)
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/framework.py", line 278, in emit
framework._emit(event)
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/framework.py", line 722, in _emit
self._reemit(event_path)
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/framework.py", line 767, in _reemit
custom_handler(event)
File "./src/charm.py", line 137, in _on_gunicorn_workload_ready
container.add_layer("gunicorn", pebble_config)
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/model.py", line 1065, in add_layer
self._pebble.add_layer(label, layer, combine=combine)
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/pebble.py", line 668, in add_layer
layer_yaml = Layer(layer).to_yaml()
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/pebble.py", line 427, in __init__
self.services = {name: Service(name, service)
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/pebble.py", line 427, in <dictcomp>
self.services = {name: Service(name, service)
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/pebble.py", line 463, in __init__
self.environment = dict(raw.get('environment') or {})
ValueError: dictionary update sequence element #0 has length 1; 2 is required
```
Looking at the code for the operator framework `self.environment = dict(raw.get('environment') or {})` so I tried just passing in a dictionary, but I got the following:
```
unit-gunicorn-0: 10:18:33 ERROR juju.worker.uniter pebble poll failed: hook failed
unit-gunicorn-0: 10:18:37 INFO unit.gunicorn/0.juju-log About to dump yaml config <<EOM
description: gunicorn layer
services:
gunicorn:
command: /srv/gunicorn/run
default: start
environment:
FAVOURITEDRINK: ale
FAVOURITEFOOD: burgers
override: replace
summary: gunicorn service
summary: gunicorn layer
EOM
unit-gunicorn-0: 10:18:37 ERROR unit.gunicorn/0.juju-log Uncaught exception while in charm code:
Traceback (most recent call last):
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/pebble.py", line 531, in _request
response = self.opener.open(request, timeout=self.timeout)
File "/usr/lib/python3.8/urllib/request.py", line 531, in open
response = meth(req, response)
File "/usr/lib/python3.8/urllib/request.py", line 640, in http_response
response = self.parent.error(
File "/usr/lib/python3.8/urllib/request.py", line 569, in error
return self._call_chain(*args)
File "/usr/lib/python3.8/urllib/request.py", line 502, in _call_chain
result = func(*args)
File "/usr/lib/python3.8/urllib/request.py", line 649, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 400: Bad Request
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "./src/charm.py", line 450, in <module>
main(GunicornK8sCharm, use_juju_for_storage=True)
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/main.py", line 406, in main
_emit_charm_event(charm, dispatcher.event_name)
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/main.py", line 140, in _emit_charm_event
event_to_emit.emit(*args, **kwargs)
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/framework.py", line 278, in emit
framework._emit(event)
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/framework.py", line 722, in _emit
self._reemit(event_path)
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/framework.py", line 767, in _reemit
custom_handler(event)
File "./src/charm.py", line 150, in _on_gunicorn_workload_ready
container.add_layer("gunicorn", pebble_config)
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/model.py", line 1065, in add_layer
self._pebble.add_layer(label, layer, combine=combine)
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/pebble.py", line 682, in add_layer
self._request('POST', '/v1/layers', body=body)
File "/var/lib/juju/agents/unit-gunicorn-0/charm/venv/ops/pebble.py", line 542, in _request
raise APIError(body, code, status, message)
ops.pebble.APIError: cannot parse layer YAML: cannot parse layer "gunicorn": yaml: unmarshal errors:
line 7: cannot unmarshal !!map into []plan.StringVariable
unit-gunicorn-0: 10:18:38 ERROR juju.worker.uniter.operation hook "gunicorn-workload-ready" (via hook dispatching script: dispatch) failed: exit status 1
``` | 0.0 | 3d5170ee2c490bf96642d9f011424ff9a4eb187c | [
"test/test_pebble.py::TestLayer::test_yaml",
"test/test_pebble.py::TestService::test_dict",
"test/test_pebble.py::TestService::test_name_only"
]
| [
"test/test_pebble.py::TestHelpers::test_parse_timestamp",
"test/test_pebble.py::TestTypes::test_api_error",
"test/test_pebble.py::TestTypes::test_change_error",
"test/test_pebble.py::TestTypes::test_change_from_dict",
"test/test_pebble.py::TestTypes::test_change_id",
"test/test_pebble.py::TestTypes::test_change_init",
"test/test_pebble.py::TestTypes::test_change_state",
"test/test_pebble.py::TestTypes::test_connection_error",
"test/test_pebble.py::TestTypes::test_error",
"test/test_pebble.py::TestTypes::test_system_info_from_dict",
"test/test_pebble.py::TestTypes::test_system_info_init",
"test/test_pebble.py::TestTypes::test_task_from_dict",
"test/test_pebble.py::TestTypes::test_task_id",
"test/test_pebble.py::TestTypes::test_task_init",
"test/test_pebble.py::TestTypes::test_task_progress_from_dict",
"test/test_pebble.py::TestTypes::test_task_progress_init",
"test/test_pebble.py::TestTypes::test_timeout_error",
"test/test_pebble.py::TestTypes::test_warning_from_dict",
"test/test_pebble.py::TestTypes::test_warning_init",
"test/test_pebble.py::TestTypes::test_warning_state",
"test/test_pebble.py::TestPlan::test_no_args",
"test/test_pebble.py::TestPlan::test_services",
"test/test_pebble.py::TestPlan::test_yaml",
"test/test_pebble.py::TestLayer::test_dict",
"test/test_pebble.py::TestLayer::test_no_args",
"test/test_pebble.py::TestClient::test_abort_change",
"test/test_pebble.py::TestClient::test_ack_warnings",
"test/test_pebble.py::TestClient::test_add_layer",
"test/test_pebble.py::TestClient::test_add_layer_invalid_type",
"test/test_pebble.py::TestClient::test_autostart_services",
"test/test_pebble.py::TestClient::test_autostart_services_async",
"test/test_pebble.py::TestClient::test_change_error",
"test/test_pebble.py::TestClient::test_client_init",
"test/test_pebble.py::TestClient::test_get_change",
"test/test_pebble.py::TestClient::test_get_changes",
"test/test_pebble.py::TestClient::test_get_plan",
"test/test_pebble.py::TestClient::test_get_system_info",
"test/test_pebble.py::TestClient::test_get_warnings",
"test/test_pebble.py::TestClient::test_start_services",
"test/test_pebble.py::TestClient::test_start_services_async",
"test/test_pebble.py::TestClient::test_stop_services",
"test/test_pebble.py::TestClient::test_stop_services_async",
"test/test_pebble.py::TestClient::test_wait_change_error",
"test/test_pebble.py::TestClient::test_wait_change_timeout",
"test/test_pebble.py::TestSocketClient::test_real_client",
"test/test_pebble.py::TestSocketClient::test_socket_not_found"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2021-03-21 23:36:48+00:00 | apache-2.0 | 1,485 |
|
canonical__operator-507 | diff --git a/ops/model.py b/ops/model.py
index 9f9a41f..ef49f71 100644
--- a/ops/model.py
+++ b/ops/model.py
@@ -559,8 +559,10 @@ class Network:
# interfaces with the same name.
for interface_info in network_info.get('bind-addresses', []):
interface_name = interface_info.get('interface-name')
- for address_info in interface_info.get('addresses', []):
- self.interfaces.append(NetworkInterface(interface_name, address_info))
+ addrs = interface_info.get('addresses')
+ if addrs is not None:
+ for address_info in addrs:
+ self.interfaces.append(NetworkInterface(interface_name, address_info))
self.ingress_addresses = []
for address in network_info.get('ingress-addresses', []):
self.ingress_addresses.append(ipaddress.ip_address(address))
| canonical/operator | 67e7eedf7a16e6fb35834b68ab8e2cc04242eae9 | diff --git a/test/test_model.py b/test/test_model.py
index a18fc28..768c971 100755
--- a/test/test_model.py
+++ b/test/test_model.py
@@ -1161,6 +1161,14 @@ class TestModelBindings(unittest.TestCase):
binding = self.model.get_binding(self.model.get_relation(binding_name))
self.assertEqual(binding.network.interfaces, [])
+ def test_no_bind_addresses(self):
+ network_data = json.dumps({'bind-addresses': [{'addresses': None}]})
+ fake_script(self, 'network-get',
+ '''[ "$1" = db0 ] && echo '{}' || exit 1'''.format(network_data))
+ binding_name = 'db0'
+ binding = self.model.get_binding(self.model.get_relation(binding_name))
+ self.assertEqual(binding.network.interfaces, [])
+
def test_empty_interface_info(self):
network_data = json.dumps({
'bind-addresses': [{
| get_binding method raises a TypeError if called too early in charm lifecycle
## Bug
If the `self.model.get_binding()` is called too early in a charms lifecycle it raises a `TypeError`. This error is unexpected and is not documented in the `ops.model` docstrings. It would be OK return an empty result or even raise a known documented exception, but `TypeError` seems out of place.
## Traceback
For example when `get_binding()` is invoked in the first `config_changed` event, a traceback such as the following results
```
application-prometheus: 14:08:42 ERROR unit.prometheus/0.juju-log Uncaught exception while in charm code:
Traceback (most recent call last):
File "./src/charm.py", line 369, in <module>
main(PrometheusCharm)
File "/var/lib/juju/agents/unit-prometheus-0/charm/venv/ops/main.py", line 406, in main
_emit_charm_event(charm, dispatcher.event_name)
File "/var/lib/juju/agents/unit-prometheus-0/charm/venv/ops/main.py", line 140, in _emit_charm_event
event_to_emit.emit(*args, **kwargs)
File "/var/lib/juju/agents/unit-prometheus-0/charm/venv/ops/framework.py", line 278, in emit
framework._emit(event)
File "/var/lib/juju/agents/unit-prometheus-0/charm/venv/ops/framework.py", line 722, in _emit
self._reemit(event_path)
File "/var/lib/juju/agents/unit-prometheus-0/charm/venv/ops/framework.py", line 767, in _reemit
custom_handler(event)
File "./src/charm.py", line 48, in _on_config_changed
self.ingress_address()
File "./src/charm.py", line 359, in ingress_address
ingress = str(self.model.get_binding('prometheus').network.ingress_address)
File "/var/lib/juju/agents/unit-prometheus-0/charm/venv/ops/model.py", line 523, in network
self._network = Network(self._backend.network_get(self.name, self._relation_id))
File "/var/lib/juju/agents/unit-prometheus-0/charm/venv/ops/model.py", line 562, in __init__
for address_info in interface_info.get('addresses', []):
TypeError: 'NoneType' object is not iterable
application-prometheus: 14:08:42 ERROR juju.worker.caasoperator.uniter.prometheus/0.operation hook "config-changed" (via hook dispatching script: dispatch) failed: exit status 1
```
## Notes
The bug was triaged by @jameinel and he pointed out "from the traceback it looks like the code was trying to do "If it doesn't exist replace it with an empty list", but in actuality it does exist but with a content of None. So we just need to change it to:"
```
addrs = ...get()
if addrs is not None:
for addr in addrs:
``` | 0.0 | 67e7eedf7a16e6fb35834b68ab8e2cc04242eae9 | [
"test/test_model.py::TestModelBindings::test_no_bind_addresses"
]
| [
"test/test_model.py::TestModel::test_active_message_default",
"test/test_model.py::TestModel::test_app_immutable",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_leader",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_minion",
"test/test_model.py::TestModel::test_base_status_instance_raises",
"test/test_model.py::TestModel::test_config",
"test/test_model.py::TestModel::test_config_immutable",
"test/test_model.py::TestModel::test_get_relation",
"test/test_model.py::TestModel::test_is_leader",
"test/test_model.py::TestModel::test_local_set_valid_app_status",
"test/test_model.py::TestModel::test_local_set_valid_unit_status",
"test/test_model.py::TestModel::test_model_attributes",
"test/test_model.py::TestModel::test_model_name_from_backend",
"test/test_model.py::TestModel::test_our_unit_is_our",
"test/test_model.py::TestModel::test_peer_relation_app",
"test/test_model.py::TestModel::test_pod_immutable",
"test/test_model.py::TestModel::test_pod_spec",
"test/test_model.py::TestModel::test_relation_data_del_key",
"test/test_model.py::TestModel::test_relation_data_del_missing_key",
"test/test_model.py::TestModel::test_relation_data_modify_our",
"test/test_model.py::TestModel::test_relation_data_modify_remote",
"test/test_model.py::TestModel::test_relation_data_type_check",
"test/test_model.py::TestModel::test_relation_no_units",
"test/test_model.py::TestModel::test_relation_set_fail",
"test/test_model.py::TestModel::test_relations_immutable",
"test/test_model.py::TestModel::test_relations_keys",
"test/test_model.py::TestModel::test_remote_app_relation_data",
"test/test_model.py::TestModel::test_remote_app_status",
"test/test_model.py::TestModel::test_remote_unit_status",
"test/test_model.py::TestModel::test_remote_units_is_our",
"test/test_model.py::TestModel::test_resources",
"test/test_model.py::TestModel::test_resources_immutable",
"test/test_model.py::TestModel::test_set_app_status_invalid",
"test/test_model.py::TestModel::test_set_app_status_non_leader_raises",
"test/test_model.py::TestModel::test_set_unit_status_invalid",
"test/test_model.py::TestModel::test_status_eq",
"test/test_model.py::TestModel::test_status_repr",
"test/test_model.py::TestModel::test_storage",
"test/test_model.py::TestModel::test_storages_immutable",
"test/test_model.py::TestModel::test_unit_immutable",
"test/test_model.py::TestModel::test_unit_relation_data",
"test/test_model.py::TestModel::test_workload_version",
"test/test_model.py::TestModel::test_workload_version_invalid",
"test/test_model.py::TestContainers::test_unit_containers",
"test/test_model.py::TestContainers::test_unit_get_container",
"test/test_model.py::TestContainerPebble::test_add_layer",
"test/test_model.py::TestContainerPebble::test_autostart",
"test/test_model.py::TestContainerPebble::test_get_plan",
"test/test_model.py::TestContainerPebble::test_get_service",
"test/test_model.py::TestContainerPebble::test_get_services",
"test/test_model.py::TestContainerPebble::test_socket_path",
"test/test_model.py::TestContainerPebble::test_start",
"test/test_model.py::TestContainerPebble::test_stop",
"test/test_model.py::TestContainerPebble::test_type_errors",
"test/test_model.py::TestModelBindings::test_binding_by_relation",
"test/test_model.py::TestModelBindings::test_binding_by_relation_name",
"test/test_model.py::TestModelBindings::test_binding_no_iface_name",
"test/test_model.py::TestModelBindings::test_dead_relations",
"test/test_model.py::TestModelBindings::test_empty_bind_addresses",
"test/test_model.py::TestModelBindings::test_empty_interface_info",
"test/test_model.py::TestModelBindings::test_invalid_keys",
"test/test_model.py::TestModelBindings::test_missing_bind_addresses",
"test/test_model.py::TestModelBindings::test_missing_egress_subnets",
"test/test_model.py::TestModelBindings::test_missing_ingress_addresses",
"test/test_model.py::TestModelBackend::test_action_fail",
"test/test_model.py::TestModelBackend::test_action_get",
"test/test_model.py::TestModelBackend::test_action_get_error",
"test/test_model.py::TestModelBackend::test_action_log",
"test/test_model.py::TestModelBackend::test_action_log_error",
"test/test_model.py::TestModelBackend::test_action_set",
"test/test_model.py::TestModelBackend::test_action_set_error",
"test/test_model.py::TestModelBackend::test_application_version_set",
"test/test_model.py::TestModelBackend::test_application_version_set_invalid",
"test/test_model.py::TestModelBackend::test_invalid_metric_label_values",
"test/test_model.py::TestModelBackend::test_invalid_metric_labels",
"test/test_model.py::TestModelBackend::test_invalid_metric_names",
"test/test_model.py::TestModelBackend::test_invalid_metric_values",
"test/test_model.py::TestModelBackend::test_is_leader_refresh",
"test/test_model.py::TestModelBackend::test_juju_log",
"test/test_model.py::TestModelBackend::test_local_set_invalid_status",
"test/test_model.py::TestModelBackend::test_network_get",
"test/test_model.py::TestModelBackend::test_network_get_errors",
"test/test_model.py::TestModelBackend::test_relation_get_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_get_set_is_app_arg",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_env",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_errors",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_success",
"test/test_model.py::TestModelBackend::test_relation_set_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_tool_errors",
"test/test_model.py::TestModelBackend::test_status_get",
"test/test_model.py::TestModelBackend::test_status_is_app_forced_kwargs",
"test/test_model.py::TestModelBackend::test_status_set_is_app_not_bool_raises",
"test/test_model.py::TestModelBackend::test_storage_tool_errors",
"test/test_model.py::TestModelBackend::test_valid_metrics",
"test/test_model.py::TestLazyMapping::test_invalidate"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2021-04-14 14:26:44+00:00 | apache-2.0 | 1,486 |
|
canonical__operator-556 | diff --git a/ops/model.py b/ops/model.py
index b58d729..d10c3f0 100644
--- a/ops/model.py
+++ b/ops/model.py
@@ -1043,10 +1043,14 @@ class Container:
def start(self, *service_names: str):
"""Start given service(s) by name."""
+ if not service_names:
+ raise TypeError('start expected at least 1 argument, got 0')
self._pebble.start_services(service_names)
def stop(self, *service_names: str):
"""Stop given service(s) by name."""
+ if not service_names:
+ raise TypeError('stop expected at least 1 argument, got 0')
self._pebble.stop_services(service_names)
# TODO(benhoyt) - should be: layer: typing.Union[str, typing.Dict, 'pebble.Layer'],
| canonical/operator | 184776476063a8234647e4787b252b729340da57 | diff --git a/test/test_model.py b/test/test_model.py
index ed64add..030393a 100755
--- a/test/test_model.py
+++ b/test/test_model.py
@@ -824,6 +824,10 @@ containers:
('start', ('foo', 'bar')),
])
+ def test_start_no_arguments(self):
+ with self.assertRaises(TypeError):
+ self.container.start()
+
def test_stop(self):
self.container.stop('foo')
self.container.stop('foo', 'bar')
@@ -832,6 +836,10 @@ containers:
('stop', ('foo', 'bar')),
])
+ def test_stop_no_arguments(self):
+ with self.assertRaises(TypeError):
+ self.container.stop()
+
def test_type_errors(self):
meta = ops.charm.CharmMeta.from_yaml("""
name: k8s-charm
| Container.start() silently accepts no arguments and does nothing
`Container.start()` takes a variable number of strings as arguments, each specifying a service to be started. The issue is that it also accepts zero arguments without complain, even though it clearly indicates a mistake on the user.
I believe it should raise some sort of exception in the case of no arguments.
Applying this patch to current master (`18477647`) demonstrates the issue:
```diff
diff --git a/test/test_model.py b/test/test_model.py
index ed64add..564681a 100755
--- a/test/test_model.py
+++ b/test/test_model.py
@@ -824,6 +824,10 @@ containers:
('start', ('foo', 'bar')),
])
+ def test_start_no_argument(self):
+ with self.assertRaises(Exception):
+ self.container.start()
+
def test_stop(self):
self.container.stop('foo')
self.container.stop('foo', 'bar')
``` | 0.0 | 184776476063a8234647e4787b252b729340da57 | [
"test/test_model.py::TestContainerPebble::test_start_no_arguments",
"test/test_model.py::TestContainerPebble::test_stop_no_arguments"
]
| [
"test/test_model.py::TestModel::test_active_message_default",
"test/test_model.py::TestModel::test_app_immutable",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_leader",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_minion",
"test/test_model.py::TestModel::test_base_status_instance_raises",
"test/test_model.py::TestModel::test_config",
"test/test_model.py::TestModel::test_config_immutable",
"test/test_model.py::TestModel::test_get_relation",
"test/test_model.py::TestModel::test_is_leader",
"test/test_model.py::TestModel::test_local_set_valid_app_status",
"test/test_model.py::TestModel::test_local_set_valid_unit_status",
"test/test_model.py::TestModel::test_model_attributes",
"test/test_model.py::TestModel::test_model_name_from_backend",
"test/test_model.py::TestModel::test_our_unit_is_our",
"test/test_model.py::TestModel::test_peer_relation_app",
"test/test_model.py::TestModel::test_pod_immutable",
"test/test_model.py::TestModel::test_pod_spec",
"test/test_model.py::TestModel::test_relation_data_del_key",
"test/test_model.py::TestModel::test_relation_data_del_missing_key",
"test/test_model.py::TestModel::test_relation_data_modify_our",
"test/test_model.py::TestModel::test_relation_data_modify_remote",
"test/test_model.py::TestModel::test_relation_data_type_check",
"test/test_model.py::TestModel::test_relation_no_units",
"test/test_model.py::TestModel::test_relation_set_fail",
"test/test_model.py::TestModel::test_relations_immutable",
"test/test_model.py::TestModel::test_relations_keys",
"test/test_model.py::TestModel::test_remote_app_relation_data",
"test/test_model.py::TestModel::test_remote_app_status",
"test/test_model.py::TestModel::test_remote_unit_status",
"test/test_model.py::TestModel::test_remote_units_is_our",
"test/test_model.py::TestModel::test_resources",
"test/test_model.py::TestModel::test_resources_immutable",
"test/test_model.py::TestModel::test_set_app_status_invalid",
"test/test_model.py::TestModel::test_set_app_status_non_leader_raises",
"test/test_model.py::TestModel::test_set_unit_status_invalid",
"test/test_model.py::TestModel::test_status_eq",
"test/test_model.py::TestModel::test_status_repr",
"test/test_model.py::TestModel::test_storage",
"test/test_model.py::TestModel::test_storages_immutable",
"test/test_model.py::TestModel::test_unit_immutable",
"test/test_model.py::TestModel::test_unit_relation_data",
"test/test_model.py::TestModel::test_workload_version",
"test/test_model.py::TestModel::test_workload_version_invalid",
"test/test_model.py::TestContainers::test_unit_containers",
"test/test_model.py::TestContainers::test_unit_get_container",
"test/test_model.py::TestContainerPebble::test_add_layer",
"test/test_model.py::TestContainerPebble::test_autostart",
"test/test_model.py::TestContainerPebble::test_get_plan",
"test/test_model.py::TestContainerPebble::test_get_service",
"test/test_model.py::TestContainerPebble::test_get_services",
"test/test_model.py::TestContainerPebble::test_list_files",
"test/test_model.py::TestContainerPebble::test_make_dir",
"test/test_model.py::TestContainerPebble::test_pull",
"test/test_model.py::TestContainerPebble::test_push",
"test/test_model.py::TestContainerPebble::test_remove_path",
"test/test_model.py::TestContainerPebble::test_socket_path",
"test/test_model.py::TestContainerPebble::test_start",
"test/test_model.py::TestContainerPebble::test_stop",
"test/test_model.py::TestContainerPebble::test_type_errors",
"test/test_model.py::TestModelBindings::test_binding_by_relation",
"test/test_model.py::TestModelBindings::test_binding_by_relation_name",
"test/test_model.py::TestModelBindings::test_binding_no_iface_name",
"test/test_model.py::TestModelBindings::test_dead_relations",
"test/test_model.py::TestModelBindings::test_empty_bind_addresses",
"test/test_model.py::TestModelBindings::test_empty_interface_info",
"test/test_model.py::TestModelBindings::test_invalid_keys",
"test/test_model.py::TestModelBindings::test_missing_bind_addresses",
"test/test_model.py::TestModelBindings::test_missing_egress_subnets",
"test/test_model.py::TestModelBindings::test_missing_ingress_addresses",
"test/test_model.py::TestModelBindings::test_no_bind_addresses",
"test/test_model.py::TestModelBackend::test_action_fail",
"test/test_model.py::TestModelBackend::test_action_get",
"test/test_model.py::TestModelBackend::test_action_get_error",
"test/test_model.py::TestModelBackend::test_action_log",
"test/test_model.py::TestModelBackend::test_action_log_error",
"test/test_model.py::TestModelBackend::test_action_set",
"test/test_model.py::TestModelBackend::test_action_set_error",
"test/test_model.py::TestModelBackend::test_application_version_set",
"test/test_model.py::TestModelBackend::test_application_version_set_invalid",
"test/test_model.py::TestModelBackend::test_invalid_metric_label_values",
"test/test_model.py::TestModelBackend::test_invalid_metric_labels",
"test/test_model.py::TestModelBackend::test_invalid_metric_names",
"test/test_model.py::TestModelBackend::test_invalid_metric_values",
"test/test_model.py::TestModelBackend::test_is_leader_refresh",
"test/test_model.py::TestModelBackend::test_juju_log",
"test/test_model.py::TestModelBackend::test_local_set_invalid_status",
"test/test_model.py::TestModelBackend::test_network_get",
"test/test_model.py::TestModelBackend::test_network_get_errors",
"test/test_model.py::TestModelBackend::test_relation_get_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_get_set_is_app_arg",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_env",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_errors",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_success",
"test/test_model.py::TestModelBackend::test_relation_set_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_tool_errors",
"test/test_model.py::TestModelBackend::test_status_get",
"test/test_model.py::TestModelBackend::test_status_is_app_forced_kwargs",
"test/test_model.py::TestModelBackend::test_status_set_is_app_not_bool_raises",
"test/test_model.py::TestModelBackend::test_storage_tool_errors",
"test/test_model.py::TestModelBackend::test_valid_metrics",
"test/test_model.py::TestLazyMapping::test_invalidate"
]
| {
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2021-06-23 22:26:12+00:00 | apache-2.0 | 1,487 |
|
canonical__operator-567 | diff --git a/ops/model.py b/ops/model.py
index 88c0e29..0ee56ef 100644
--- a/ops/model.py
+++ b/ops/model.py
@@ -18,6 +18,7 @@ import datetime
import decimal
import ipaddress
import json
+import logging
import os
import re
import shutil
@@ -37,6 +38,17 @@ import ops
import ops.pebble as pebble
+logger = logging.getLogger(__name__)
+
+ErrorsWithMessage = (
+ pebble.APIError,
+ pebble.ConnectionError,
+ pebble.PathError,
+ pebble.ProtocolError,
+ pebble.TimeoutError,
+)
+
+
class Model:
"""Represents the Juju Model as seen from this unit.
@@ -1040,12 +1052,56 @@ class Container:
socket_path = '/charm/containers/{}/pebble.socket'.format(name)
pebble_client = backend.get_pebble(socket_path)
self._pebble = pebble_client
+ self._completed = None
@property
def pebble(self) -> 'pebble.Client':
"""The low-level :class:`ops.pebble.Client` instance for this container."""
return self._pebble
+ @property
+ def completed(self) -> bool:
+ """Whether or not a :meth:`is_ready` context finished successfully."""
+ return self._completed
+
+ def is_ready(self) -> '_ContainerReady':
+ """Check whether or not Pebble is ready as a simple property.
+
+ :meth:`is_ready` returns a :class:_ContainerReady `contextmanager` which
+ can be used in charms to wrap :class:`Container` operations which depend
+ on the Pebble backend being available. When `is_ready` is used, exceptions
+ from the underlying Pebble operations will log error messages rather than
+ raising exceptions.
+
+ Example:
+ ```
+ container = self.unit.get_container("example")
+ with container.is_ready() as c:
+ c.pull('/does/not/exist')
+
+ # This point of execution will not be reached if an exception
+ # was caught earlier
+ c.get_service("foo")
+ c.completed # False
+ ```
+
+ This will result in an `ERROR` log from PathError, but not a
+ traceback. In addition, the block running inside the contextmanager
+ will exit and return to the previous point of execution. Whether
+ or not the block completed successfully is available as a property
+
+ :meth:`is_ready` can also be used as a bare function, which will log an
+ error if the container is not ready.
+
+ Example:
+ ```
+ if container.is_ready():
+ do_something()
+ else:
+ do_something_else()
+ """
+ return _ContainerReady(self)
+
def autostart(self):
"""Autostart all services marked as startup: enabled."""
self._pebble.autostart_services()
@@ -1054,12 +1110,22 @@ class Container:
"""Start given service(s) by name."""
if not service_names:
raise TypeError('start expected at least 1 argument, got 0')
+
+ self._pebble.start_services(service_names)
+
+ def restart(self, *service_names: str):
+ """Restart the given service(s) by name."""
+ if not service_names:
+ raise TypeError('restart expected at least 1 argument, got 0')
+
+ self._pebble.stop_services(service_names)
self._pebble.start_services(service_names)
def stop(self, *service_names: str):
"""Stop given service(s) by name."""
if not service_names:
raise TypeError('stop expected at least 1 argument, got 0')
+
self._pebble.stop_services(service_names)
# TODO(benhoyt) - should be: layer: typing.Union[str, typing.Dict, 'pebble.Layer'],
@@ -1193,6 +1259,47 @@ class Container:
self._pebble.remove_path(path, recursive=recursive)
+class _ContainerReady:
+ """Represents whether or not a container is ready as a Context Manager.
+
+ This class should not be instantiated directly, instead use :meth:`Container.is_ready`
+
+ Attributes:
+ container: A :class:`Container` object
+ """
+
+ def __init__(self, container: Container):
+ self.container = container
+
+ def __bool__(self) -> bool:
+ try:
+ # We don't care at all whether not the services are up in
+ # this case, just whether Pebble throws an error. If it doesn't,
+ # carry on with the contextmanager.
+ self.container._pebble.get_services()
+ except ErrorsWithMessage as e:
+ logger.error("Pebble is not ready! (%s) was raised due to: %s",
+ e.name, e.message)
+ return False
+ return True
+
+ def __enter__(self) -> 'Container':
+ self.container._completed = True
+ return self.container
+
+ def __exit__(self, exc_type, e, exc_tb):
+ if exc_type in ErrorsWithMessage:
+ logger.error("(%s) was raised due to: %s", e.name, e.message)
+ self.container._completed = False
+ return True
+
+ if exc_type is pebble.ChangeError:
+ logger.error("Pebble could not apply the requested change (%s) "
+ "due to %s", e.change, e.err)
+ self.container._completed = False
+ return True
+
+
class ContainerMapping(Mapping):
"""Map of container names to Container objects.
@@ -1244,6 +1351,19 @@ class ModelError(Exception):
pass
+class UnknownServiceError(Exception):
+ """Raised by :class:`Container` objects when Pebble cannot find a service.
+
+ This is done so authors can have a single catch-all exception if the service
+ cannot be found, typically due to asking for the service before
+ :meth:`Container.add_layer` has been called.
+ """
+
+
+class PebbleNotReadyError(Exception):
+ """Raised by :class:`Container` methods if the underlying Pebble socket returns an error."""
+
+
class TooManyRelatedAppsError(ModelError):
"""Raised by :meth:`Model.get_relation` if there is more than one related application."""
diff --git a/ops/pebble.py b/ops/pebble.py
index c65e8c9..4155d61 100644
--- a/ops/pebble.py
+++ b/ops/pebble.py
@@ -126,6 +126,17 @@ def _json_loads(s: typing.Union[str, bytes]) -> typing.Dict:
class Error(Exception):
"""Base class of most errors raised by the Pebble client."""
+ def __repr__(self):
+ return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.args)
+
+ def name(self):
+ """Return a string representation of the model plus class."""
+ return '<{}.{}>'.format(type(self).__module__, type(self).__name__)
+
+ def message(self):
+ """Return the message passed as an argument."""
+ return self.args[0]
+
class TimeoutError(TimeoutError, Error):
"""Raised when a polling timeout occurs."""
| canonical/operator | 5943a59ccde766c832d59229f3ac431587799f34 | diff --git a/test/test_model.py b/test/test_model.py
index 030393a..3f80664 100755
--- a/test/test_model.py
+++ b/test/test_model.py
@@ -1013,6 +1013,22 @@ containers:
('remove_path', '/path/2', True),
])
+ def test_no_exception_with_contextmanager(self):
+ with self.assertLogs() as logs:
+ self.pebble.responses.append('dummy')
+ with self.container.is_ready() as c:
+ raise ops.pebble.ConnectionError("Some dummy message")
+ self.assertIn("was raised due to", logs.records[0].getMessage())
+ self.assertEqual(c.completed, False)
+
+ def test_exception_without_contextmanager(self):
+ with self.assertRaises(ops.pebble.ConnectionError):
+ raise ops.pebble.ConnectionError("Some dummy message")
+
+ def test_bare_is_ready_call(self):
+ self.pebble.responses.append('dummy')
+ self.assertTrue(self.container.is_ready())
+
class MockPebbleBackend(ops.model._ModelBackend):
def get_pebble(self, socket_path):
| Confusing / inconsistent errors from interaction between Pebble and Juju events and handlers
I've frequently seen charm authors who are new to the sidecar pattern write something along the lines of this in their charm:
```python
class MyCharm(CharmBase):
def __init__(self, *args):
super().__init__(*args)
self.framework.observe(self.on.mycharm_pebble_ready, self._create_layer)
self.framework.observe(self.on.config_changed, self._restart_service)
def _create_layer(self, event):
container = self.unit.get_container("mycharm")
container.add_layer("mycharm", {"services": {"mycharm": {...}}})
container.autostart()
def _restart_service(self, event):
container = self.unit.get_container("mycharm")
# presumably introspect and / or update the service, then...
container.stop("mycharm")
container.start("mycharm")
```
This seems like a straightforward and reasonable way for someone to start out approaching this, but can result in any one of 5 different outcomes:
* It might work fine. Depending on timing and how the `_restart_service` handler does or doesn't implement updating the service based on config changes, this might deploy and run fine, at least most of the time.
* It might raise `ops.pebble.ConnectionError`. Depending on how quickly Pebble becomes ready to accept connections, it might fail trying to talk to Pebble at all. Worse, this could be an intermittent failure. Additionally, this won't ever happen in unit tests because the `_TestingPebbleClient` is always ready immediately.
* It might raise `RuntimeError: 400 Bad Request: service "mycharm" does not exist`. This is somewhat related to #514 but is slightly different and might be specific to the `_TestingPebbleClient`.
* It might raise `ops.pebble.APIError: 400 Bad Request: service "mycharm" does not exist`. A charm that raised the previous `RuntimeError` during unit tests would most likely raise this during an actual deployment.
* It might raise `ops.model.ModelError: service 'mycharm' not found`. If the `_restart_service` handler does an explicit `container.get_service("mycharm")`, then it will get this rather than either of the previous two errors, unless it calls `container.add_layer(layer_name, layer_definition, combine=True)` first.
It would at least be good to ensure that all of the latter 3 cases result in a single `ops.pebble.UnknownServiceError` or something, but I've found that new charmers will still be confused as to why the service isn't recognized despite them having defined it during the `pebble-ready` event. Maybe the additional message on the `UnknownServiceError` can include a hint such as `(typically due to referencing a service before add_layer is called)`.
It would also be good to make the testing harness always raise `ConnectionError` until the `pebble-ready` event is triggered, to force charm authors to consider that possibility.
A `container.restart(service)` helper would be nice, as well. | 0.0 | 5943a59ccde766c832d59229f3ac431587799f34 | [
"test/test_model.py::TestContainerPebble::test_bare_is_ready_call",
"test/test_model.py::TestContainerPebble::test_no_exception_with_contextmanager"
]
| [
"test/test_model.py::TestModel::test_active_message_default",
"test/test_model.py::TestModel::test_app_immutable",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_leader",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_minion",
"test/test_model.py::TestModel::test_base_status_instance_raises",
"test/test_model.py::TestModel::test_config",
"test/test_model.py::TestModel::test_config_immutable",
"test/test_model.py::TestModel::test_get_relation",
"test/test_model.py::TestModel::test_is_leader",
"test/test_model.py::TestModel::test_local_set_valid_app_status",
"test/test_model.py::TestModel::test_local_set_valid_unit_status",
"test/test_model.py::TestModel::test_model_attributes",
"test/test_model.py::TestModel::test_model_name_from_backend",
"test/test_model.py::TestModel::test_our_unit_is_our",
"test/test_model.py::TestModel::test_peer_relation_app",
"test/test_model.py::TestModel::test_pod_immutable",
"test/test_model.py::TestModel::test_pod_spec",
"test/test_model.py::TestModel::test_relation_data_del_key",
"test/test_model.py::TestModel::test_relation_data_del_missing_key",
"test/test_model.py::TestModel::test_relation_data_modify_our",
"test/test_model.py::TestModel::test_relation_data_modify_remote",
"test/test_model.py::TestModel::test_relation_data_type_check",
"test/test_model.py::TestModel::test_relation_no_units",
"test/test_model.py::TestModel::test_relation_set_fail",
"test/test_model.py::TestModel::test_relations_immutable",
"test/test_model.py::TestModel::test_relations_keys",
"test/test_model.py::TestModel::test_remote_app_relation_data",
"test/test_model.py::TestModel::test_remote_app_status",
"test/test_model.py::TestModel::test_remote_unit_status",
"test/test_model.py::TestModel::test_remote_units_is_our",
"test/test_model.py::TestModel::test_resources",
"test/test_model.py::TestModel::test_resources_immutable",
"test/test_model.py::TestModel::test_set_app_status_invalid",
"test/test_model.py::TestModel::test_set_app_status_non_leader_raises",
"test/test_model.py::TestModel::test_set_unit_status_invalid",
"test/test_model.py::TestModel::test_status_eq",
"test/test_model.py::TestModel::test_status_repr",
"test/test_model.py::TestModel::test_storage",
"test/test_model.py::TestModel::test_storages_immutable",
"test/test_model.py::TestModel::test_unit_immutable",
"test/test_model.py::TestModel::test_unit_relation_data",
"test/test_model.py::TestModel::test_workload_version",
"test/test_model.py::TestModel::test_workload_version_invalid",
"test/test_model.py::TestContainers::test_unit_containers",
"test/test_model.py::TestContainers::test_unit_get_container",
"test/test_model.py::TestContainerPebble::test_add_layer",
"test/test_model.py::TestContainerPebble::test_autostart",
"test/test_model.py::TestContainerPebble::test_exception_without_contextmanager",
"test/test_model.py::TestContainerPebble::test_get_plan",
"test/test_model.py::TestContainerPebble::test_get_service",
"test/test_model.py::TestContainerPebble::test_get_services",
"test/test_model.py::TestContainerPebble::test_list_files",
"test/test_model.py::TestContainerPebble::test_make_dir",
"test/test_model.py::TestContainerPebble::test_pull",
"test/test_model.py::TestContainerPebble::test_push",
"test/test_model.py::TestContainerPebble::test_remove_path",
"test/test_model.py::TestContainerPebble::test_socket_path",
"test/test_model.py::TestContainerPebble::test_start",
"test/test_model.py::TestContainerPebble::test_start_no_arguments",
"test/test_model.py::TestContainerPebble::test_stop",
"test/test_model.py::TestContainerPebble::test_stop_no_arguments",
"test/test_model.py::TestContainerPebble::test_type_errors",
"test/test_model.py::TestModelBindings::test_binding_by_relation",
"test/test_model.py::TestModelBindings::test_binding_by_relation_name",
"test/test_model.py::TestModelBindings::test_binding_no_iface_name",
"test/test_model.py::TestModelBindings::test_dead_relations",
"test/test_model.py::TestModelBindings::test_empty_bind_addresses",
"test/test_model.py::TestModelBindings::test_empty_interface_info",
"test/test_model.py::TestModelBindings::test_invalid_keys",
"test/test_model.py::TestModelBindings::test_missing_bind_addresses",
"test/test_model.py::TestModelBindings::test_missing_egress_subnets",
"test/test_model.py::TestModelBindings::test_missing_ingress_addresses",
"test/test_model.py::TestModelBindings::test_no_bind_addresses",
"test/test_model.py::TestModelBackend::test_action_fail",
"test/test_model.py::TestModelBackend::test_action_get",
"test/test_model.py::TestModelBackend::test_action_get_error",
"test/test_model.py::TestModelBackend::test_action_log",
"test/test_model.py::TestModelBackend::test_action_log_error",
"test/test_model.py::TestModelBackend::test_action_set",
"test/test_model.py::TestModelBackend::test_action_set_error",
"test/test_model.py::TestModelBackend::test_application_version_set",
"test/test_model.py::TestModelBackend::test_application_version_set_invalid",
"test/test_model.py::TestModelBackend::test_invalid_metric_label_values",
"test/test_model.py::TestModelBackend::test_invalid_metric_labels",
"test/test_model.py::TestModelBackend::test_invalid_metric_names",
"test/test_model.py::TestModelBackend::test_invalid_metric_values",
"test/test_model.py::TestModelBackend::test_is_leader_refresh",
"test/test_model.py::TestModelBackend::test_juju_log",
"test/test_model.py::TestModelBackend::test_local_set_invalid_status",
"test/test_model.py::TestModelBackend::test_network_get",
"test/test_model.py::TestModelBackend::test_network_get_errors",
"test/test_model.py::TestModelBackend::test_relation_get_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_get_set_is_app_arg",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_env",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_errors",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_success",
"test/test_model.py::TestModelBackend::test_relation_set_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_tool_errors",
"test/test_model.py::TestModelBackend::test_status_get",
"test/test_model.py::TestModelBackend::test_status_is_app_forced_kwargs",
"test/test_model.py::TestModelBackend::test_status_set_is_app_not_bool_raises",
"test/test_model.py::TestModelBackend::test_storage_tool_errors",
"test/test_model.py::TestModelBackend::test_valid_metrics",
"test/test_model.py::TestLazyMapping::test_invalidate"
]
| {
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2021-07-13 23:57:55+00:00 | apache-2.0 | 1,488 |
|
canonical__operator-601 | diff --git a/ops/charm.py b/ops/charm.py
index 85c4a4d..11ed17f 100755
--- a/ops/charm.py
+++ b/ops/charm.py
@@ -893,7 +893,92 @@ class ContainerMeta:
Attributes:
name: Name of container (key in the YAML)
+ mounts: :class:`ContainerStorageMeta` mounts available to the container
"""
-
def __init__(self, name, raw):
self.name = name
+ self._mounts = {}
+
+ # This is not guaranteed to be populated/is not enforced yet
+ if raw:
+ self._populate_mounts(raw.get('mounts', []))
+
+ @property
+ def mounts(self) -> typing.Dict:
+ """An accessor for the mounts in a container.
+
+ Dict keys match key name in :class:`StorageMeta`
+
+ Example::
+
+ storage:
+ foo:
+ type: filesystem
+ location: /test
+ containers:
+ bar:
+ mounts:
+ - storage: foo
+ - location: /test/mount
+ """
+ return self._mounts
+
+ def _populate_mounts(self, mounts: typing.List):
+ """Populate a list of container mountpoints.
+
+ Since Charm Metadata v2 specifies the mounts as a List, do a little data manipulation
+ to convert the values to "friendly" names which contain a list of mountpoints
+ under each key.
+ """
+ for mount in mounts:
+ storage = mount.get("storage", "")
+ mount = mount.get("location", "")
+
+ if not mount:
+ continue
+
+ if storage in self._mounts:
+ self._mounts[storage].add_location(mount)
+ else:
+ self._mounts[storage] = ContainerStorageMeta(storage, mount)
+
+
+class ContainerStorageMeta:
+ """Metadata about storage for an individual container.
+
+ Attributes:
+ storage: a name for the mountpoint, which should exist the keys for :class:`StorageMeta`
+ for the charm
+ location: the location `storage` is mounted at
+ locations: a list of mountpoints for the key
+
+ If multiple locations are specified for the same storage, such as Kubernetes subPath mounts,
+ `location` will not be an accessible attribute, as it would not be possible to determine
+ which mount point was desired, and `locations` should be iterated over.
+ """
+ def __init__(self, storage, location):
+ self.storage = storage
+ self._locations = [location]
+
+ def add_location(self, location):
+ """Add an additional mountpoint to a known storage."""
+ self._locations.append(location)
+
+ @property
+ def locations(self) -> typing.List:
+ """An accessor for the list of locations for a mount."""
+ return self._locations
+
+ def __getattr__(self, name):
+ if name == "location":
+ if len(self._locations) == 1:
+ return self._locations[0]
+ else:
+ raise RuntimeError(
+ "container has more than one mountpoint with the same backing storage. "
+ "Request .locations to see a list"
+ )
+ else:
+ raise AttributeError(
+ "{.__class__.__name__} has no such attribute: {}!".format(self, name)
+ )
| canonical/operator | d90a2a26505ac851bc6d50e38b60f078e08796af | diff --git a/test/test_charm.py b/test/test_charm.py
index 78dd4aa..556c606 100755
--- a/test/test_charm.py
+++ b/test/test_charm.py
@@ -24,6 +24,7 @@ from ops.charm import (
CharmMeta,
CharmEvents,
ContainerMeta,
+ ContainerStorageMeta,
)
from ops.framework import Framework, EventSource, EventBase
from ops.model import Model, _ModelBackend
@@ -430,3 +431,51 @@ containers:
self.assertIsInstance(meta.containers['test2'], ContainerMeta)
self.assertEqual(meta.containers['test1'].name, 'test1')
self.assertEqual(meta.containers['test2'].name, 'test2')
+
+ def test_containers_storage(self):
+ meta = CharmMeta.from_yaml("""
+name: k8s-charm
+storage:
+ data:
+ type: filesystem
+ location: /test/storage
+ other:
+ type: filesystem
+ location: /test/other
+containers:
+ test1:
+ mounts:
+ - storage: data
+ location: /test/storagemount
+ - storage: other
+ location: /test/otherdata
+""")
+ self.assertIsInstance(meta.containers['test1'], ContainerMeta)
+ self.assertIsInstance(meta.containers['test1'].mounts["data"], ContainerStorageMeta)
+ self.assertEqual(meta.containers['test1'].mounts["data"].location, '/test/storagemount')
+ self.assertEqual(meta.containers['test1'].mounts["other"].location, '/test/otherdata')
+
+ def test_containers_storage_multiple_mounts(self):
+ meta = CharmMeta.from_yaml("""
+name: k8s-charm
+storage:
+ data:
+ type: filesystem
+ location: /test/storage
+containers:
+ test1:
+ mounts:
+ - storage: data
+ location: /test/storagemount
+ - storage: data
+ location: /test/otherdata
+""")
+ self.assertIsInstance(meta.containers['test1'], ContainerMeta)
+ self.assertIsInstance(meta.containers['test1'].mounts["data"], ContainerStorageMeta)
+ self.assertEqual(
+ meta.containers['test1'].mounts["data"].locations[0],
+ '/test/storagemount')
+ self.assertEqual(meta.containers['test1'].mounts["data"].locations[1], '/test/otherdata')
+
+ with self.assertRaises(RuntimeError):
+ meta.containers["test1"].mounts["data"].location
| ingest mounted location into CharmMeta
Obtaining storage location via the storages section,
```python
self.meta.storages["data"].location
```
is not very useful for k8s charms because for k8s charms that sepcifies mount point in the charm container, not the workload.
For the workload, would need something like:
```python
self.meta.containers[self._container_name].mounts[0].location
```
or
```python
self.meta.containers[self._container_name].mounts.data.location
```
However, mounted location is not ingested by CharmMeta
https://github.com/canonical/operator/blob/b1a8fffc053fbbdaf43b826e78c220bfdae4c8fe/ops/charm.py#L891-L892 | 0.0 | d90a2a26505ac851bc6d50e38b60f078e08796af | [
"test/test_charm.py::TestCharm::test_action_event_defer_fails",
"test/test_charm.py::TestCharm::test_action_events",
"test/test_charm.py::TestCharm::test_basic",
"test/test_charm.py::TestCharm::test_containers",
"test/test_charm.py::TestCharm::test_containers_storage",
"test/test_charm.py::TestCharm::test_containers_storage_multiple_mounts",
"test/test_charm.py::TestCharm::test_empty_action",
"test/test_charm.py::TestCharm::test_helper_properties",
"test/test_charm.py::TestCharm::test_relation_events",
"test/test_charm.py::TestCharm::test_relations_meta",
"test/test_charm.py::TestCharm::test_relations_meta_limit_type_validation",
"test/test_charm.py::TestCharm::test_relations_meta_scope_type_validation",
"test/test_charm.py::TestCharm::test_storage_events",
"test/test_charm.py::TestCharm::test_workload_events"
]
| []
| {
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2021-09-03 02:51:42+00:00 | apache-2.0 | 1,489 |
|
canonical__operator-661 | diff --git a/ops/model.py b/ops/model.py
index ec99173..c20d883 100644
--- a/ops/model.py
+++ b/ops/model.py
@@ -951,10 +951,10 @@ class Resources:
"""Fetch the resource from the controller or store.
If successfully fetched, this returns a Path object to where the resource is stored
- on disk, otherwise it raises a ModelError.
+ on disk, otherwise it raises a NameError.
"""
if name not in self._paths:
- raise RuntimeError('invalid resource name: {}'.format(name))
+ raise NameError('invalid resource name: {}'.format(name))
if self._paths[name] is None:
self._paths[name] = Path(self._backend.resource_get(name))
return self._paths[name]
| canonical/operator | 91b6551ac2cdda5ade84e2fd2bddd14b198cae71 | diff --git a/test/test_model.py b/test/test_model.py
index d243df3..01325d6 100755
--- a/test/test_model.py
+++ b/test/test_model.py
@@ -490,7 +490,7 @@ class TestModel(unittest.TestCase):
self.harness.add_resource('foo', 'foo contents\n')
self.harness.add_resource('bar', '')
- with self.assertRaises(RuntimeError):
+ with self.assertRaises(NameError):
self.harness.model.resources.fetch('qux')
self.assertEqual(self.harness.model.resources.fetch('foo').name, 'foo.txt')
| Failing to fetch a resource does not raise the expected exception
According to the doc string and, as a consequence, the rtfm docs, `charm.model.resources.fetch` is expected to throw a `ModelError`. In reality, it's a `RuntimeError`. We should probably change either the docs or the exception raised.
https://github.com/canonical/operator/blob/9baf2ab3deafcaff5d9c6679cdf1d11fc88e3fa3/ops/model.py#L950-L960 | 0.0 | 91b6551ac2cdda5ade84e2fd2bddd14b198cae71 | [
"test/test_model.py::TestModel::test_resources"
]
| [
"test/test_model.py::TestModel::test_active_message_default",
"test/test_model.py::TestModel::test_app_immutable",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_leader",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_minion",
"test/test_model.py::TestModel::test_base_status_instance_raises",
"test/test_model.py::TestModel::test_config",
"test/test_model.py::TestModel::test_config_immutable",
"test/test_model.py::TestModel::test_get_relation",
"test/test_model.py::TestModel::test_is_leader",
"test/test_model.py::TestModel::test_local_set_valid_app_status",
"test/test_model.py::TestModel::test_local_set_valid_unit_status",
"test/test_model.py::TestModel::test_model_attributes",
"test/test_model.py::TestModel::test_model_name_from_backend",
"test/test_model.py::TestModel::test_our_unit_is_our",
"test/test_model.py::TestModel::test_peer_relation_app",
"test/test_model.py::TestModel::test_pod_immutable",
"test/test_model.py::TestModel::test_pod_spec",
"test/test_model.py::TestModel::test_relation_data_del_key",
"test/test_model.py::TestModel::test_relation_data_del_missing_key",
"test/test_model.py::TestModel::test_relation_data_modify_our",
"test/test_model.py::TestModel::test_relation_data_modify_remote",
"test/test_model.py::TestModel::test_relation_data_type_check",
"test/test_model.py::TestModel::test_relation_no_units",
"test/test_model.py::TestModel::test_relation_set_fail",
"test/test_model.py::TestModel::test_relations_immutable",
"test/test_model.py::TestModel::test_relations_keys",
"test/test_model.py::TestModel::test_remote_app_relation_data",
"test/test_model.py::TestModel::test_remote_app_status",
"test/test_model.py::TestModel::test_remote_unit_status",
"test/test_model.py::TestModel::test_remote_units_is_our",
"test/test_model.py::TestModel::test_resources_immutable",
"test/test_model.py::TestModel::test_set_app_status_invalid",
"test/test_model.py::TestModel::test_set_app_status_non_leader_raises",
"test/test_model.py::TestModel::test_set_unit_status_invalid",
"test/test_model.py::TestModel::test_status_eq",
"test/test_model.py::TestModel::test_status_repr",
"test/test_model.py::TestModel::test_storage",
"test/test_model.py::TestModel::test_storages_immutable",
"test/test_model.py::TestModel::test_unit_immutable",
"test/test_model.py::TestModel::test_unit_relation_data",
"test/test_model.py::TestModel::test_workload_version",
"test/test_model.py::TestModel::test_workload_version_invalid",
"test/test_model.py::TestApplication::test_planned_units",
"test/test_model.py::TestContainers::test_unit_containers",
"test/test_model.py::TestContainers::test_unit_get_container",
"test/test_model.py::TestContainerPebble::test_add_layer",
"test/test_model.py::TestContainerPebble::test_autostart",
"test/test_model.py::TestContainerPebble::test_bare_can_connect_call",
"test/test_model.py::TestContainerPebble::test_exec",
"test/test_model.py::TestContainerPebble::test_get_plan",
"test/test_model.py::TestContainerPebble::test_get_service",
"test/test_model.py::TestContainerPebble::test_get_services",
"test/test_model.py::TestContainerPebble::test_get_system_info",
"test/test_model.py::TestContainerPebble::test_list_files",
"test/test_model.py::TestContainerPebble::test_make_dir",
"test/test_model.py::TestContainerPebble::test_pull",
"test/test_model.py::TestContainerPebble::test_push",
"test/test_model.py::TestContainerPebble::test_remove_path",
"test/test_model.py::TestContainerPebble::test_replan",
"test/test_model.py::TestContainerPebble::test_restart",
"test/test_model.py::TestContainerPebble::test_restart_fallback",
"test/test_model.py::TestContainerPebble::test_restart_fallback_non_400_error",
"test/test_model.py::TestContainerPebble::test_restart_no_arguments",
"test/test_model.py::TestContainerPebble::test_socket_path",
"test/test_model.py::TestContainerPebble::test_start",
"test/test_model.py::TestContainerPebble::test_start_no_arguments",
"test/test_model.py::TestContainerPebble::test_stop",
"test/test_model.py::TestContainerPebble::test_stop_no_arguments",
"test/test_model.py::TestContainerPebble::test_type_errors",
"test/test_model.py::TestModelBindings::test_binding_by_relation",
"test/test_model.py::TestModelBindings::test_binding_by_relation_name",
"test/test_model.py::TestModelBindings::test_binding_no_iface_name",
"test/test_model.py::TestModelBindings::test_dead_relations",
"test/test_model.py::TestModelBindings::test_empty_bind_addresses",
"test/test_model.py::TestModelBindings::test_empty_interface_info",
"test/test_model.py::TestModelBindings::test_invalid_keys",
"test/test_model.py::TestModelBindings::test_missing_bind_addresses",
"test/test_model.py::TestModelBindings::test_missing_egress_subnets",
"test/test_model.py::TestModelBindings::test_missing_ingress_addresses",
"test/test_model.py::TestModelBindings::test_no_bind_addresses",
"test/test_model.py::TestModelBackend::test_action_fail",
"test/test_model.py::TestModelBackend::test_action_get",
"test/test_model.py::TestModelBackend::test_action_get_error",
"test/test_model.py::TestModelBackend::test_action_log",
"test/test_model.py::TestModelBackend::test_action_log_error",
"test/test_model.py::TestModelBackend::test_action_set",
"test/test_model.py::TestModelBackend::test_action_set_dotted_dict",
"test/test_model.py::TestModelBackend::test_action_set_duplicated_keys",
"test/test_model.py::TestModelBackend::test_action_set_error",
"test/test_model.py::TestModelBackend::test_action_set_key_validation",
"test/test_model.py::TestModelBackend::test_action_set_more_nested",
"test/test_model.py::TestModelBackend::test_action_set_nested",
"test/test_model.py::TestModelBackend::test_application_version_set",
"test/test_model.py::TestModelBackend::test_application_version_set_invalid",
"test/test_model.py::TestModelBackend::test_invalid_metric_label_values",
"test/test_model.py::TestModelBackend::test_invalid_metric_labels",
"test/test_model.py::TestModelBackend::test_invalid_metric_names",
"test/test_model.py::TestModelBackend::test_invalid_metric_values",
"test/test_model.py::TestModelBackend::test_is_leader_refresh",
"test/test_model.py::TestModelBackend::test_juju_log",
"test/test_model.py::TestModelBackend::test_local_set_invalid_status",
"test/test_model.py::TestModelBackend::test_network_get",
"test/test_model.py::TestModelBackend::test_network_get_errors",
"test/test_model.py::TestModelBackend::test_relation_get_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_get_set_is_app_arg",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_env",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_errors",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_success",
"test/test_model.py::TestModelBackend::test_relation_set_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_tool_errors",
"test/test_model.py::TestModelBackend::test_status_get",
"test/test_model.py::TestModelBackend::test_status_is_app_forced_kwargs",
"test/test_model.py::TestModelBackend::test_status_set_is_app_not_bool_raises",
"test/test_model.py::TestModelBackend::test_storage_tool_errors",
"test/test_model.py::TestModelBackend::test_valid_metrics",
"test/test_model.py::TestLazyMapping::test_invalidate"
]
| {
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2021-11-03 14:36:22+00:00 | apache-2.0 | 1,490 |
|
canonical__operator-702 | diff --git a/ops/model.py b/ops/model.py
index 9974af9..e627951 100644
--- a/ops/model.py
+++ b/ops/model.py
@@ -1194,6 +1194,7 @@ class Container:
If no service names are specified, return status information for all
services, otherwise return information for only the given services.
"""
+ service_names = service_names or None
services = self._pebble.get_services(service_names)
return ServiceInfoMapping(services)
| canonical/operator | 88529c95575301f47a1b420dc29394c1a1b9b50b | diff --git a/test/test_model.py b/test/test_model.py
index c8e890f..5004d88 100755
--- a/test/test_model.py
+++ b/test/test_model.py
@@ -772,9 +772,26 @@ class TestApplication(unittest.TestCase):
resources:
foo: {type: file, filename: foo.txt}
bar: {type: file, filename: bar.txt}
+ containers:
+ bar:
+ k: v
''')
self.peer_rel_id = self.harness.add_relation('db2', 'db2')
self.app = self.harness.model.app
+ self.addCleanup(self.harness.cleanup)
+
+ # Tests fix for https://github.com/canonical/operator/issues/694.
+ def test_mocked_get_services(self):
+ self.harness.begin()
+ c = self.harness.charm.unit.get_container('bar')
+ c.add_layer('layer1', {
+ 'summary': 'layer',
+ 'services': {"baz": {'override': 'replace', 'summary': 'echo', 'command': 'echo 1'}},
+ })
+
+ s = c.get_service('baz') # So far, so good
+ self.assertTrue(s)
+ self.assertTrue('baz' in c.get_services())
def test_planned_units(self):
rel_id = self.peer_rel_id
@@ -1062,7 +1079,7 @@ containers:
self.assertEqual(services['s2'].current, ops.pebble.ServiceStatus.INACTIVE)
self.assertEqual(self.pebble.requests, [
- ('get_services', ()),
+ ('get_services', None),
('get_services', ('s1', 's2')),
])
| Harness.Container.get_services() does not seem correctly mocked
I think this test should work, but it does not with `ops 1.3.0`:
```
def test_mocked_get_services(self):
self.harness.begin()
traefik_container = self.harness.charm.unit.get_container("traefik")
traefik_container.add_layer("traefik_layer", {
"summary": "Traefik layer",
"description": "Pebble config layer for Traefik",
"services": {
"traefik": {
"override": "replace",
"summary": "Traefik",
"command": "/usr/bin/traefik",
},
},
})
assert traefik_container.get_service("traefik") # So far, so good
assert "traefik" in traefik_container.get_services() # FAILS, `traefik_container.get_services()` returns {}
``` | 0.0 | 88529c95575301f47a1b420dc29394c1a1b9b50b | [
"test/test_model.py::TestApplication::test_mocked_get_services",
"test/test_model.py::TestContainerPebble::test_get_services"
]
| [
"test/test_model.py::TestModel::test_active_message_default",
"test/test_model.py::TestModel::test_app_immutable",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_leader",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_minion",
"test/test_model.py::TestModel::test_base_status_instance_raises",
"test/test_model.py::TestModel::test_config",
"test/test_model.py::TestModel::test_config_immutable",
"test/test_model.py::TestModel::test_get_relation",
"test/test_model.py::TestModel::test_is_leader",
"test/test_model.py::TestModel::test_local_set_valid_app_status",
"test/test_model.py::TestModel::test_local_set_valid_unit_status",
"test/test_model.py::TestModel::test_model_attributes",
"test/test_model.py::TestModel::test_model_name_from_backend",
"test/test_model.py::TestModel::test_our_unit_is_our",
"test/test_model.py::TestModel::test_peer_relation_app",
"test/test_model.py::TestModel::test_pod_immutable",
"test/test_model.py::TestModel::test_pod_spec",
"test/test_model.py::TestModel::test_relation_data_del_key",
"test/test_model.py::TestModel::test_relation_data_del_missing_key",
"test/test_model.py::TestModel::test_relation_data_modify_our",
"test/test_model.py::TestModel::test_relation_data_modify_remote",
"test/test_model.py::TestModel::test_relation_data_type_check",
"test/test_model.py::TestModel::test_relation_no_units",
"test/test_model.py::TestModel::test_relation_set_fail",
"test/test_model.py::TestModel::test_relations_immutable",
"test/test_model.py::TestModel::test_relations_keys",
"test/test_model.py::TestModel::test_remote_app_relation_data",
"test/test_model.py::TestModel::test_remote_app_status",
"test/test_model.py::TestModel::test_remote_unit_status",
"test/test_model.py::TestModel::test_remote_units_is_our",
"test/test_model.py::TestModel::test_resources",
"test/test_model.py::TestModel::test_resources_immutable",
"test/test_model.py::TestModel::test_set_app_status_invalid",
"test/test_model.py::TestModel::test_set_app_status_non_leader_raises",
"test/test_model.py::TestModel::test_set_unit_status_invalid",
"test/test_model.py::TestModel::test_status_eq",
"test/test_model.py::TestModel::test_status_repr",
"test/test_model.py::TestModel::test_storage",
"test/test_model.py::TestModel::test_storages_immutable",
"test/test_model.py::TestModel::test_unit_immutable",
"test/test_model.py::TestModel::test_unit_relation_data",
"test/test_model.py::TestModel::test_workload_version",
"test/test_model.py::TestModel::test_workload_version_invalid",
"test/test_model.py::TestApplication::test_planned_units",
"test/test_model.py::TestApplication::test_planned_units_garbage_values",
"test/test_model.py::TestApplication::test_planned_units_override",
"test/test_model.py::TestApplication::test_planned_units_user_set",
"test/test_model.py::TestContainers::test_unit_containers",
"test/test_model.py::TestContainers::test_unit_get_container",
"test/test_model.py::TestContainerPebble::test_add_layer",
"test/test_model.py::TestContainerPebble::test_autostart",
"test/test_model.py::TestContainerPebble::test_bare_can_connect_call",
"test/test_model.py::TestContainerPebble::test_exec",
"test/test_model.py::TestContainerPebble::test_get_plan",
"test/test_model.py::TestContainerPebble::test_get_service",
"test/test_model.py::TestContainerPebble::test_get_system_info",
"test/test_model.py::TestContainerPebble::test_list_files",
"test/test_model.py::TestContainerPebble::test_make_dir",
"test/test_model.py::TestContainerPebble::test_pull",
"test/test_model.py::TestContainerPebble::test_push",
"test/test_model.py::TestContainerPebble::test_remove_path",
"test/test_model.py::TestContainerPebble::test_replan",
"test/test_model.py::TestContainerPebble::test_restart",
"test/test_model.py::TestContainerPebble::test_restart_fallback",
"test/test_model.py::TestContainerPebble::test_restart_fallback_non_400_error",
"test/test_model.py::TestContainerPebble::test_restart_no_arguments",
"test/test_model.py::TestContainerPebble::test_send_signal",
"test/test_model.py::TestContainerPebble::test_socket_path",
"test/test_model.py::TestContainerPebble::test_start",
"test/test_model.py::TestContainerPebble::test_start_no_arguments",
"test/test_model.py::TestContainerPebble::test_stop",
"test/test_model.py::TestContainerPebble::test_stop_no_arguments",
"test/test_model.py::TestContainerPebble::test_type_errors",
"test/test_model.py::TestModelBindings::test_binding_by_relation",
"test/test_model.py::TestModelBindings::test_binding_by_relation_name",
"test/test_model.py::TestModelBindings::test_binding_no_iface_name",
"test/test_model.py::TestModelBindings::test_dead_relations",
"test/test_model.py::TestModelBindings::test_empty_bind_addresses",
"test/test_model.py::TestModelBindings::test_empty_interface_info",
"test/test_model.py::TestModelBindings::test_invalid_keys",
"test/test_model.py::TestModelBindings::test_missing_bind_addresses",
"test/test_model.py::TestModelBindings::test_missing_egress_subnets",
"test/test_model.py::TestModelBindings::test_missing_ingress_addresses",
"test/test_model.py::TestModelBindings::test_no_bind_addresses",
"test/test_model.py::TestModelBackend::test_action_fail",
"test/test_model.py::TestModelBackend::test_action_get",
"test/test_model.py::TestModelBackend::test_action_get_error",
"test/test_model.py::TestModelBackend::test_action_log",
"test/test_model.py::TestModelBackend::test_action_log_error",
"test/test_model.py::TestModelBackend::test_action_set",
"test/test_model.py::TestModelBackend::test_action_set_dotted_dict",
"test/test_model.py::TestModelBackend::test_action_set_duplicated_keys",
"test/test_model.py::TestModelBackend::test_action_set_error",
"test/test_model.py::TestModelBackend::test_action_set_key_validation",
"test/test_model.py::TestModelBackend::test_action_set_more_nested",
"test/test_model.py::TestModelBackend::test_action_set_nested",
"test/test_model.py::TestModelBackend::test_application_version_set",
"test/test_model.py::TestModelBackend::test_application_version_set_invalid",
"test/test_model.py::TestModelBackend::test_invalid_metric_label_values",
"test/test_model.py::TestModelBackend::test_invalid_metric_labels",
"test/test_model.py::TestModelBackend::test_invalid_metric_names",
"test/test_model.py::TestModelBackend::test_invalid_metric_values",
"test/test_model.py::TestModelBackend::test_is_leader_refresh",
"test/test_model.py::TestModelBackend::test_juju_log",
"test/test_model.py::TestModelBackend::test_local_set_invalid_status",
"test/test_model.py::TestModelBackend::test_network_get",
"test/test_model.py::TestModelBackend::test_network_get_errors",
"test/test_model.py::TestModelBackend::test_relation_get_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_get_set_is_app_arg",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_env",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_errors",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_success",
"test/test_model.py::TestModelBackend::test_relation_set_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_tool_errors",
"test/test_model.py::TestModelBackend::test_status_get",
"test/test_model.py::TestModelBackend::test_status_is_app_forced_kwargs",
"test/test_model.py::TestModelBackend::test_status_set_is_app_not_bool_raises",
"test/test_model.py::TestModelBackend::test_storage_tool_errors",
"test/test_model.py::TestModelBackend::test_valid_metrics",
"test/test_model.py::TestLazyMapping::test_invalidate"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2022-02-22 17:43:53+00:00 | apache-2.0 | 1,491 |
|
canonical__operator-733 | diff --git a/ops/framework.py b/ops/framework.py
index f455bbd..f08845c 100755
--- a/ops/framework.py
+++ b/ops/framework.py
@@ -753,6 +753,31 @@ class Framework(Object):
self._reemit()
def _reemit(self, single_event_path=None):
+
+ class EventContext:
+ """Handles toggling the hook-is-running state in backends.
+
+ This allows e.g. harness logic to know if it is executing within a running hook context
+ or not. It sets backend._hook_is_running equal to the name of the currently running
+ hook (e.g. "set-leader") and reverts back to the empty string when the hook execution
+ is completed.
+ """
+
+ def __init__(self, framework, event_name):
+ self._event = event_name
+ self._backend = None
+ if framework.model is not None:
+ self._backend = framework.model._backend
+
+ def __enter__(self):
+ if self._backend:
+ self._backend._hook_is_running = self._event
+ return self
+
+ def __exit__(self, exception_type, exception, traceback):
+ if self._backend:
+ self._backend._hook_is_running = ''
+
last_event_path = None
deferred = True
for event_path, observer_path, method_name in self._storage.notices(single_event_path):
@@ -779,15 +804,16 @@ class Framework(Object):
if custom_handler:
event_is_from_juju = isinstance(event, charm.HookEvent)
event_is_action = isinstance(event, charm.ActionEvent)
- if (
- event_is_from_juju or event_is_action
- ) and self._juju_debug_at.intersection({'all', 'hook'}):
- # Present the welcome message and run under PDB.
- self._show_debug_code_message()
- pdb.runcall(custom_handler, event)
- else:
- # Regular call to the registered method.
- custom_handler(event)
+ with EventContext(self, event_handle.kind):
+ if (
+ event_is_from_juju or event_is_action
+ ) and self._juju_debug_at.intersection({'all', 'hook'}):
+ # Present the welcome message and run under PDB.
+ self._show_debug_code_message()
+ pdb.runcall(custom_handler, event)
+ else:
+ # Regular call to the registered method.
+ custom_handler(event)
if event.deferred:
deferred = True
diff --git a/ops/model.py b/ops/model.py
index 583c55b..388b18a 100644
--- a/ops/model.py
+++ b/ops/model.py
@@ -492,7 +492,9 @@ class RelationMapping(Mapping):
is_peer = relation_name in self._peers
return Relation(relation_name, relation_id, is_peer,
self._our_unit, self._backend, self._cache)
- num_related = len(self[relation_name])
+ relations = self[relation_name]
+ num_related = len(relations)
+ self._backend._validate_relation_access(relation_name, relations)
if num_related == 0:
return None
elif num_related == 1:
@@ -1585,6 +1587,7 @@ class _ModelBackend:
self._is_leader = None
self._leader_check_time = None
+ self._hook_is_running = ''
def _run(self, *args, return_output=False, use_json=False):
kwargs = dict(stdout=PIPE, stderr=PIPE, check=True)
@@ -1609,6 +1612,14 @@ class _ModelBackend:
def _is_relation_not_found(model_error):
return 'relation not found' in str(model_error)
+ def _validate_relation_access(self, relation_name, relations):
+ """Checks for relation usage inconsistent with the framework/backend state.
+
+ This is used for catching Harness configuration errors and the production implementation
+ here should remain empty.
+ """
+ pass
+
def relation_ids(self, relation_name):
relation_ids = self._run('relation-ids', relation_name, return_output=True, use_json=True)
return [int(relation_id.split(':')[-1]) for relation_id in relation_ids]
| canonical/operator | ff30d2f4bf0d8e2f24cd11d99e3de67d8e81e3b0 | diff --git a/ops/testing.py b/ops/testing.py
index 335ae52..48ae58a 100755
--- a/ops/testing.py
+++ b/ops/testing.py
@@ -952,16 +952,19 @@ class Harness(typing.Generic[CharmType]):
def set_leader(self, is_leader: bool = True) -> None:
"""Set whether this unit is the leader or not.
- If this charm becomes a leader then `leader_elected` will be triggered.
+ If this charm becomes a leader then `leader_elected` will be triggered. If Harness.begin()
+ has already been called, then the charm's peer relation should usually be added *prior* to
+ calling this method (i.e. with Harness.add_relation) to properly initialize and make
+ available relation data that leader elected hooks may want to access.
Args:
is_leader: True/False as to whether this unit is the leader.
"""
- was_leader = self._backend._is_leader
self._backend._is_leader = is_leader
+
# Note: jam 2020-03-01 currently is_leader is cached at the ModelBackend level, not in
# the Model objects, so this automatically gets noticed.
- if is_leader and not was_leader and self._charm is not None and self._hooks_enabled:
+ if is_leader and self._charm is not None and self._hooks_enabled:
self._charm.on.leader_elected.emit()
def set_planned_units(self, num_units: int) -> None:
@@ -1103,6 +1106,24 @@ class _TestingModelBackend:
self._pebble_clients = {} # type: {str: _TestingPebbleClient}
self._pebble_clients_can_connect = {} # type: {_TestingPebbleClient: bool}
self._planned_units = None
+ self._hook_is_running = ''
+
+ def _validate_relation_access(self, relation_name, relations):
+ """Ensures that the named relation exists/has been added.
+
+ This is called whenever relation data is accessed via model.get_relation(...).
+ """
+ if len(relations) > 0:
+ return
+
+ relations = list(self._meta.peers.keys())
+ relations.extend(self._meta.requires.keys())
+ relations.extend(self._meta.provides.keys())
+ if self._hook_is_running == 'leader_elected' and relation_name in relations:
+ raise RuntimeError(
+ 'cannot access relation data without first adding the relation: '
+ 'use Harness.add_relation({!r}, <app>) before calling set_leader'
+ .format(relation_name))
def _can_connect(self, pebble_client) -> bool:
"""Returns whether the mock client is active and can support API calls with no errors."""
diff --git a/test/test_testing.py b/test/test_testing.py
index 83c9333..795ea28 100644
--- a/test/test_testing.py
+++ b/test/test_testing.py
@@ -58,6 +58,20 @@ from ops.testing import (
is_linux = platform.system() == 'Linux'
+class SetLeaderErrorTester(CharmBase):
+ """Sets peer relation data inside leader-elected."""
+
+ def __init__(self, framework):
+ super().__init__(framework)
+ self._peer_name = 'peer'
+ self.framework.observe(self.on.leader_elected,
+ self._on_leader_elected)
+
+ def _on_leader_elected(self, event):
+ peers = self.model.get_relation(self._peer_name)
+ peers.data[self.app]["foo"] = "bar"
+
+
class StorageTester(CharmBase):
"""Record the relation-changed events."""
@@ -791,6 +805,21 @@ class TestHarness(unittest.TestCase):
self.assertEqual(rel.data[harness.charm.model.unit]['key'], 'v4')
self.assertEqual([], helper.changes)
+ def test_harness_leader_misconfig(self):
+ # language=YAML
+ harness = Harness(SetLeaderErrorTester, meta='''
+ name: postgresql
+ peers:
+ peer:
+ interface: foo
+ ''')
+ self.addCleanup(harness.cleanup)
+ harness.begin()
+
+ with self.assertRaises(RuntimeError) as cm:
+ harness.set_leader(is_leader=True)
+ self.assertTrue(cm.exception.args[0].find('use Harness.add_relation') != -1)
+
def test_update_peer_relation_app_data(self):
# language=YAML
harness = Harness(CharmBase, meta='''
| `ops.harness` prohibits setting application data both in peer relation created and leader elected events
## Issue
Attached to this report is a simple charm (zip file) demonstrating a problem with `ops.harness`. This (workload-less) charm only has handlers for `RelationCreated` and `LeaderElected` events. The charm sets different peer application data fields in response each of these two events. The charm can be built and deployed without any error. Also the charm successfully sets both data items in peer relation created and leader elected events (as can be seen using juju show-unit).
Also provided with the charm are two unit tests both of which fail. The only difference between these two unit tests is the order in which the peer relation created and leader elected events are executed. The tests fail because accessing/setting the peer application relation data fields fails. Note this failure is not a result of a missing leader guard which is implemented.
PS: If there is an ordering between these two events it was not obvious from the [reference docs](https://ops.readthedocs.io/en/latest/). If there is an ordering it will be great to have this made explicit in the doc strings. Note peer relation created and relation created are qualitatively different events in that the former will always happen when a charm is deployed. This issue pertains to the peer relation created event.
[harness-test.zip](https://github.com/canonical/operator/files/8098103/harness-test.zip)
| 0.0 | ff30d2f4bf0d8e2f24cd11d99e3de67d8e81e3b0 | [
"test/test_testing.py::TestHarness::test_harness_leader_misconfig"
]
| [
"test/test_testing.py::TestHarness::test_actions_from_directory",
"test/test_testing.py::TestHarness::test_actions_passed_in",
"test/test_testing.py::TestHarness::test_add_oci_resource_custom",
"test/test_testing.py::TestHarness::test_add_oci_resource_no_image",
"test/test_testing.py::TestHarness::test_add_peer_relation_with_initial_data_leader",
"test/test_testing.py::TestHarness::test_add_relation",
"test/test_testing.py::TestHarness::test_add_relation_and_unit",
"test/test_testing.py::TestHarness::test_add_relation_with_our_initial_data",
"test/test_testing.py::TestHarness::test_add_relation_with_remote_app_data",
"test/test_testing.py::TestHarness::test_add_resource_but_oci",
"test/test_testing.py::TestHarness::test_add_resource_bytes",
"test/test_testing.py::TestHarness::test_add_resource_string",
"test/test_testing.py::TestHarness::test_add_resource_unknown",
"test/test_testing.py::TestHarness::test_add_resource_unknown_filename",
"test/test_testing.py::TestHarness::test_add_storage_after_harness_begin",
"test/test_testing.py::TestHarness::test_add_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_add_storage_without_metadata_key_fails",
"test/test_testing.py::TestHarness::test_app_status",
"test/test_testing.py::TestHarness::test_attach_storage",
"test/test_testing.py::TestHarness::test_attach_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_begin_twice",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_install_sets_status",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_multiple_relation_same_endpoint",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_no_relations",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_no_relations_not_leader",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_peer_relation_pre_defined",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_relation_charm_with_no_relation",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_unknown_status",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_application_data",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_multiple_units",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_one_relation",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_peer_relation",
"test/test_testing.py::TestHarness::test_can_connect",
"test/test_testing.py::TestHarness::test_config_from_directory",
"test/test_testing.py::TestHarness::test_container_isdir_and_exists",
"test/test_testing.py::TestHarness::test_container_pebble_ready",
"test/test_testing.py::TestHarness::test_create_harness_twice",
"test/test_testing.py::TestHarness::test_detach_storage",
"test/test_testing.py::TestHarness::test_detach_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_empty_config_raises",
"test/test_testing.py::TestHarness::test_get_backend_calls",
"test/test_testing.py::TestHarness::test_get_backend_calls_with_kwargs",
"test/test_testing.py::TestHarness::test_get_pebble_container_plan",
"test/test_testing.py::TestHarness::test_get_pebble_container_plan_unknown",
"test/test_testing.py::TestHarness::test_get_pod_spec",
"test/test_testing.py::TestHarness::test_get_relation_data",
"test/test_testing.py::TestHarness::test_hooks_disabled_contextmanager",
"test/test_testing.py::TestHarness::test_hooks_disabled_nested_contextmanager",
"test/test_testing.py::TestHarness::test_hooks_disabled_noop",
"test/test_testing.py::TestHarness::test_hooks_enabled_and_disabled",
"test/test_testing.py::TestHarness::test_metadata_from_directory",
"test/test_testing.py::TestHarness::test_no_event_on_empty_update_relation_unit_app",
"test/test_testing.py::TestHarness::test_no_event_on_empty_update_relation_unit_bag",
"test/test_testing.py::TestHarness::test_no_event_on_no_diff_update_relation_unit_app",
"test/test_testing.py::TestHarness::test_no_event_on_no_diff_update_relation_unit_bag",
"test/test_testing.py::TestHarness::test_populate_oci_resources",
"test/test_testing.py::TestHarness::test_relation_events",
"test/test_testing.py::TestHarness::test_relation_set_app_not_leader",
"test/test_testing.py::TestHarness::test_relation_set_deletes",
"test/test_testing.py::TestHarness::test_remove_detached_storage",
"test/test_testing.py::TestHarness::test_remove_relation",
"test/test_testing.py::TestHarness::test_remove_relation_unit",
"test/test_testing.py::TestHarness::test_remove_specific_relation_id",
"test/test_testing.py::TestHarness::test_remove_storage_after_harness_begin",
"test/test_testing.py::TestHarness::test_remove_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_remove_storage_without_metadata_key_fails",
"test/test_testing.py::TestHarness::test_removing_invalid_relation_id_raises_exception",
"test/test_testing.py::TestHarness::test_removing_relation_refreshes_charm_model",
"test/test_testing.py::TestHarness::test_removing_relation_removes_remote_app_data",
"test/test_testing.py::TestHarness::test_removing_relation_unit_does_not_remove_other_unit_and_data",
"test/test_testing.py::TestHarness::test_removing_relation_unit_removes_data_also",
"test/test_testing.py::TestHarness::test_resource_folder_cleanup",
"test/test_testing.py::TestHarness::test_set_leader",
"test/test_testing.py::TestHarness::test_set_model_info_after_begin",
"test/test_testing.py::TestHarness::test_set_model_name",
"test/test_testing.py::TestHarness::test_set_model_name_after_begin",
"test/test_testing.py::TestHarness::test_set_model_uuid_after_begin",
"test/test_testing.py::TestHarness::test_set_workload_version",
"test/test_testing.py::TestHarness::test_simulate_can_connect",
"test/test_testing.py::TestHarness::test_storage_with_hyphens_works",
"test/test_testing.py::TestHarness::test_unit_status",
"test/test_testing.py::TestHarness::test_update_config",
"test/test_testing.py::TestHarness::test_update_config_undefined_option",
"test/test_testing.py::TestHarness::test_update_config_unset_boolean",
"test/test_testing.py::TestHarness::test_update_peer_relation_app_data",
"test/test_testing.py::TestHarness::test_update_peer_relation_no_local_unit_change_event",
"test/test_testing.py::TestHarness::test_update_relation_exposes_new_data",
"test/test_testing.py::TestHarness::test_update_relation_no_local_app_change_event",
"test/test_testing.py::TestHarness::test_update_relation_no_local_unit_change_event",
"test/test_testing.py::TestHarness::test_update_relation_remove_data",
"test/test_testing.py::TestTestingModelBackend::test_conforms_to_model_backend",
"test/test_testing.py::TestTestingModelBackend::test_get_pebble_methods",
"test/test_testing.py::TestTestingModelBackend::test_lazy_resource_directory",
"test/test_testing.py::TestTestingModelBackend::test_relation_get_unknown_relation_id",
"test/test_testing.py::TestTestingModelBackend::test_relation_ids_unknown_relation",
"test/test_testing.py::TestTestingModelBackend::test_relation_list_unknown_relation_id",
"test/test_testing.py::TestTestingModelBackend::test_relation_remote_app_name",
"test/test_testing.py::TestTestingModelBackend::test_resource_get_no_resource",
"test/test_testing.py::TestTestingModelBackend::test_status_set_get_app",
"test/test_testing.py::TestTestingModelBackend::test_status_set_get_unit",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_no_override",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_override_merge",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_override_replace",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_override_unknown",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_not_combined",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_three_services",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_autostart",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_bad_request",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_none",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_not_started",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_start_stop",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_subset",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_unknown",
"test/test_testing.py::TestTestingPebbleClient::test_invalid_start_service",
"test/test_testing.py::TestTestingPebbleClient::test_methods_match_pebble_client",
"test/test_testing.py::TestTestingPebbleClient::test_mixed_start_service",
"test/test_testing.py::TestTestingPebbleClient::test_send_signal",
"test/test_testing.py::TestTestingPebbleClient::test_start_service_str",
"test/test_testing.py::TestTestingPebbleClient::test_start_started_service",
"test/test_testing.py::TestTestingPebbleClient::test_stop_service_str",
"test/test_testing.py::TestTestingPebbleClient::test_stop_services_unknown",
"test/test_testing.py::TestTestingPebbleClient::test_stop_stopped_service",
"test/test_testing.py::TestMockFilesystem::test_create_and_read_with_different_encodings",
"test/test_testing.py::TestMockFilesystem::test_create_dir_with_extra_args",
"test/test_testing.py::TestMockFilesystem::test_create_file_fails_if_parent_dir_doesnt_exist",
"test/test_testing.py::TestMockFilesystem::test_create_file_from_bytes",
"test/test_testing.py::TestMockFilesystem::test_create_file_from_files",
"test/test_testing.py::TestMockFilesystem::test_create_file_from_str",
"test/test_testing.py::TestMockFilesystem::test_create_file_succeeds_if_parent_dir_doesnt_exist_when_make_dirs_true",
"test/test_testing.py::TestMockFilesystem::test_create_file_with_extra_args",
"test/test_testing.py::TestMockFilesystem::test_delete_file",
"test/test_testing.py::TestMockFilesystem::test_getattr",
"test/test_testing.py::TestMockFilesystem::test_getattr_file_not_found",
"test/test_testing.py::TestMockFilesystem::test_listdir",
"test/test_testing.py::TestMockFilesystem::test_listdir_on_file",
"test/test_testing.py::TestMockFilesystem::test_listdir_on_nonexistent_dir",
"test/test_testing.py::TestMockFilesystem::test_listdir_root_on_empty_os",
"test/test_testing.py::TestMockFilesystem::test_make_and_list_directory",
"test/test_testing.py::TestMockFilesystem::test_make_directory_recursively",
"test/test_testing.py::TestMockFilesystem::test_makedir",
"test/test_testing.py::TestMockFilesystem::test_makedir_fails_if_already_exists",
"test/test_testing.py::TestMockFilesystem::test_makedir_fails_if_parent_dir_doesnt_exist",
"test/test_testing.py::TestMockFilesystem::test_makedir_path_must_start_with_slash",
"test/test_testing.py::TestMockFilesystem::test_makedir_succeeds_if_already_exists_when_make_parents_true",
"test/test_testing.py::TestMockFilesystem::test_open_directory_fails",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_list_directory_object_itself",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_list_files_not_found_raises",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_dir_with_ownership",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_dir_with_permission_mask",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_directory",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_directory_recursively",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_directory_with_relative_path_fails",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_subdir_of_file_fails",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_list_file",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_pull_bytes",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_pull_larger_file",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_pull_non_utf8_data",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_as_child_of_file_raises_error",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_file_with_relative_path_fails",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_files_and_list",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_files_and_list_by_pattern",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_to_non_existent_subdir",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_with_ownership",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_with_permission_mask",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_remove_path"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2022-03-28 19:39:25+00:00 | apache-2.0 | 1,492 |
|
canonical__operator-765 | diff --git a/ops/model.py b/ops/model.py
index 8425736..45e6a48 100644
--- a/ops/model.py
+++ b/ops/model.py
@@ -887,6 +887,15 @@ class RelationData(Mapping['UnitOrApplication', 'RelationDataContent']):
return iter(self._data)
def __getitem__(self, key: 'UnitOrApplication'):
+ if key is None and self.relation.app is None:
+ # NOTE: if juju gets fixed to set JUJU_REMOTE_APP for relation-broken events, then that
+ # should fix the only case in which we expect key to be None - potentially removing the
+ # need for this error in future ops versions (i.e. if relation.app is guaranteed to not
+ # be None. See https://bugs.launchpad.net/juju/+bug/1960934.
+ raise KeyError(
+ 'Cannot index relation data with "None".'
+ ' Are you trying to access remote app data during a relation-broken event?'
+ ' This is not allowed.')
return self._data[key]
def __repr__(self):
@@ -2081,7 +2090,7 @@ class _ModelBackend:
event_relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1])
if relation_id == event_relation_id:
# JUJU_RELATION_ID is this relation, use JUJU_REMOTE_APP.
- return os.environ['JUJU_REMOTE_APP']
+ return os.getenv('JUJU_REMOTE_APP') or None
# If caller is asking for information about another relation, use
# "relation-list --app" to get it.
| canonical/operator | 2644ae9368c6b9915967a1e6a117b17ebacffb86 | diff --git a/ops/testing.py b/ops/testing.py
index a116104..c11080c 100755
--- a/ops/testing.py
+++ b/ops/testing.py
@@ -1175,9 +1175,21 @@ class _TestingModelBackend:
if relation_id not in self._relation_app_and_units:
# Non-existent or dead relation
return None
+ if 'relation_broken' in self._hook_is_running:
+ # TODO: if juju ever starts setting JUJU_REMOTE_APP in relation-broken hooks runs,
+ # then we should kill this if clause.
+ # See https://bugs.launchpad.net/juju/+bug/1960934
+ return None
return self._relation_app_and_units[relation_id]['app']
def relation_get(self, relation_id, member_name, is_app):
+ if 'relation_broken' in self._hook_is_running and not self.relation_remote_app_name(
+ relation_id):
+ # TODO: if juju gets fixed to set JUJU_REMOTE_APP for this case, then we may opt to
+ # allow charms to read/get that (stale) relation data.
+ # See https://bugs.launchpad.net/juju/+bug/1960934
+ raise RuntimeError(
+ 'remote-side relation data cannot be accessed during a relation-broken event')
if is_app and '/' in member_name:
member_name = member_name.split('/')[0]
if relation_id not in self._relation_data:
@@ -1185,6 +1197,10 @@ class _TestingModelBackend:
return self._relation_data[relation_id][member_name].copy()
def relation_set(self, relation_id, key, value, is_app):
+ if 'relation_broken' in self._hook_is_running and not self.relation_remote_app_name(
+ relation_id):
+ raise RuntimeError(
+ 'remote-side relation data cannot be accessed during a relation-broken event')
relation = self._relation_data[relation_id]
if is_app:
bucket_key = self.app_name
diff --git a/test/test_model.py b/test/test_model.py
index b672b84..4cd20a0 100755
--- a/test/test_model.py
+++ b/test/test_model.py
@@ -715,12 +715,9 @@ class TestModel(unittest.TestCase):
self.assertEqual(len(model.storages), 2)
self.assertEqual(model.storages.keys(), meta.storages.keys())
self.assertIn('disks', model.storages)
- try:
+
+ with pytest.raises(KeyError, match='Did you mean'):
model.storages['does-not-exist']
- except KeyError as err:
- assert 'Did you mean' in str(err), 'got wrong error message'
- except Exception as err:
- assert False, 'got wrong exception type: ' + str(err)
test_cases = {
0: {'name': 'disks', 'location': pathlib.Path('/var/srv/disks/0')},
@@ -968,7 +965,6 @@ def test_recursive_push_and_pull(case):
errors = []
try:
- print(push_path, case.dst)
c.push_path(push_path, case.dst)
except ops.model.MultiPushPullError as err:
if not case.errors:
@@ -978,9 +974,6 @@ def test_recursive_push_and_pull(case):
assert case.errors == errors, \
'push_path gave wrong expected errors: want {}, got {}'.format(case.errors, errors)
for fpath in case.want:
-
- for f in ops.model.Container._list_recursive(c.list_files, pathlib.Path('/')):
- print(f)
assert c.exists(fpath), 'push_path failed: file {} missing at destination'.format(fpath)
# create pull test case filesystem structure
diff --git a/test/test_testing.py b/test/test_testing.py
index 265f272..32326b9 100644
--- a/test/test_testing.py
+++ b/test/test_testing.py
@@ -25,6 +25,7 @@ import textwrap
import unittest
from io import BytesIO, StringIO
+import pytest
import yaml
import ops.testing
@@ -318,6 +319,24 @@ class TestHarness(unittest.TestCase):
self.assertTrue(len(harness.charm.observed_events), 1)
self.assertIsInstance(harness.charm.observed_events[0], RelationEvent)
+ def test_relation_get_when_broken(self):
+ harness = Harness(RelationBrokenTester, meta='''
+ name: test-app
+ requires:
+ foo:
+ interface: foofoo
+ ''')
+ self.addCleanup(harness.cleanup)
+ harness.begin()
+ harness.charm.observe_relation_events('foo')
+
+ # relation remote app is None to mirror production juju behavior where juju doesn't
+ # communicate the remote app to ops.
+ rel_id = harness.add_relation('foo', None)
+
+ with pytest.raises(KeyError, match='trying to access remote app data'):
+ harness.remove_relation(rel_id)
+
def test_remove_relation(self):
harness = Harness(RelationEventCharm, meta='''
name: test-app
@@ -2565,6 +2584,7 @@ class RelationEventCharm(RecordingCharm):
self.record_relation_data_on_events = False
def observe_relation_events(self, relation_name):
+ self.relation_name = relation_name
self.framework.observe(self.on[relation_name].relation_created, self._on_relation_created)
self.framework.observe(self.on[relation_name].relation_joined, self._on_relation_joined)
self.framework.observe(self.on[relation_name].relation_changed, self._on_relation_changed)
@@ -2607,6 +2627,16 @@ class RelationEventCharm(RecordingCharm):
self.changes.append(recording)
+class RelationBrokenTester(RelationEventCharm):
+ """Access inaccessible relation data."""
+
+ def __init__(self, framework):
+ super().__init__(framework)
+
+ def _on_relation_broken(self, event):
+ print(event.relation.data[event.relation.app]['bar'])
+
+
class ContainerEventCharm(RecordingCharm):
"""Record events related to container lifecycles."""
| OF should gracefully handle blank application names in relation-broken hooks
See the discussion here: https://bugs.launchpad.net/juju/+bug/1960934
In a relation-broken hook, Juju does not set JUJU_REMOTE_APP. This is Working as Intended for Juju, but it raises an exception in the framework, when iterating through relations.
We need to refactor so that we handle the empty value without throwing the exception. | 0.0 | 2644ae9368c6b9915967a1e6a117b17ebacffb86 | [
"test/test_testing.py::TestHarness::test_relation_get_when_broken"
]
| [
"test/test_model.py::TestModel::test_active_message_default",
"test/test_model.py::TestModel::test_app_immutable",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_leader",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_minion",
"test/test_model.py::TestModel::test_base_status_instance_raises",
"test/test_model.py::TestModel::test_config",
"test/test_model.py::TestModel::test_config_immutable",
"test/test_model.py::TestModel::test_get_relation",
"test/test_model.py::TestModel::test_is_leader",
"test/test_model.py::TestModel::test_local_set_valid_app_status",
"test/test_model.py::TestModel::test_local_set_valid_unit_status",
"test/test_model.py::TestModel::test_model_attributes",
"test/test_model.py::TestModel::test_model_name_from_backend",
"test/test_model.py::TestModel::test_our_unit_is_our",
"test/test_model.py::TestModel::test_peer_relation_app",
"test/test_model.py::TestModel::test_pod_immutable",
"test/test_model.py::TestModel::test_pod_spec",
"test/test_model.py::TestModel::test_relation_data_del_key",
"test/test_model.py::TestModel::test_relation_data_del_missing_key",
"test/test_model.py::TestModel::test_relation_data_modify_our",
"test/test_model.py::TestModel::test_relation_data_modify_remote",
"test/test_model.py::TestModel::test_relation_data_type_check",
"test/test_model.py::TestModel::test_relation_no_units",
"test/test_model.py::TestModel::test_relation_set_fail",
"test/test_model.py::TestModel::test_relations_immutable",
"test/test_model.py::TestModel::test_relations_keys",
"test/test_model.py::TestModel::test_remote_app_relation_data",
"test/test_model.py::TestModel::test_remote_app_status",
"test/test_model.py::TestModel::test_remote_unit_status",
"test/test_model.py::TestModel::test_remote_units_is_our",
"test/test_model.py::TestModel::test_resources",
"test/test_model.py::TestModel::test_resources_immutable",
"test/test_model.py::TestModel::test_set_app_status_invalid",
"test/test_model.py::TestModel::test_set_app_status_non_leader_raises",
"test/test_model.py::TestModel::test_set_unit_status_invalid",
"test/test_model.py::TestModel::test_status_eq",
"test/test_model.py::TestModel::test_status_repr",
"test/test_model.py::TestModel::test_storage",
"test/test_model.py::TestModel::test_storages_immutable",
"test/test_model.py::TestModel::test_unit_immutable",
"test/test_model.py::TestModel::test_unit_relation_data",
"test/test_model.py::TestModel::test_workload_version",
"test/test_model.py::TestModel::test_workload_version_invalid",
"test/test_model.py::test_recursive_list[case0]",
"test/test_model.py::test_recursive_list[case1]",
"test/test_model.py::test_recursive_list[case2]",
"test/test_model.py::test_recursive_push_and_pull[case0]",
"test/test_model.py::test_recursive_push_and_pull[case1]",
"test/test_model.py::test_recursive_push_and_pull[case2]",
"test/test_model.py::test_recursive_push_and_pull[case3]",
"test/test_model.py::test_recursive_push_and_pull[case4]",
"test/test_model.py::test_recursive_push_and_pull[case5]",
"test/test_model.py::test_recursive_push_and_pull[case6]",
"test/test_model.py::test_recursive_push_and_pull[case7]",
"test/test_model.py::test_recursive_push_and_pull[case8]",
"test/test_model.py::TestApplication::test_mocked_get_services",
"test/test_model.py::TestApplication::test_planned_units",
"test/test_model.py::TestApplication::test_planned_units_garbage_values",
"test/test_model.py::TestApplication::test_planned_units_override",
"test/test_model.py::TestApplication::test_planned_units_user_set",
"test/test_model.py::TestContainers::test_unit_containers",
"test/test_model.py::TestContainers::test_unit_get_container",
"test/test_model.py::TestContainerPebble::test_add_layer",
"test/test_model.py::TestContainerPebble::test_autostart",
"test/test_model.py::TestContainerPebble::test_bare_can_connect_call",
"test/test_model.py::TestContainerPebble::test_exec",
"test/test_model.py::TestContainerPebble::test_get_check",
"test/test_model.py::TestContainerPebble::test_get_checks",
"test/test_model.py::TestContainerPebble::test_get_plan",
"test/test_model.py::TestContainerPebble::test_get_service",
"test/test_model.py::TestContainerPebble::test_get_services",
"test/test_model.py::TestContainerPebble::test_get_system_info",
"test/test_model.py::TestContainerPebble::test_list_files",
"test/test_model.py::TestContainerPebble::test_make_dir",
"test/test_model.py::TestContainerPebble::test_pull",
"test/test_model.py::TestContainerPebble::test_push",
"test/test_model.py::TestContainerPebble::test_remove_path",
"test/test_model.py::TestContainerPebble::test_replan",
"test/test_model.py::TestContainerPebble::test_restart",
"test/test_model.py::TestContainerPebble::test_restart_fallback",
"test/test_model.py::TestContainerPebble::test_restart_fallback_non_400_error",
"test/test_model.py::TestContainerPebble::test_restart_no_arguments",
"test/test_model.py::TestContainerPebble::test_send_signal",
"test/test_model.py::TestContainerPebble::test_socket_path",
"test/test_model.py::TestContainerPebble::test_start",
"test/test_model.py::TestContainerPebble::test_start_no_arguments",
"test/test_model.py::TestContainerPebble::test_stop",
"test/test_model.py::TestContainerPebble::test_stop_no_arguments",
"test/test_model.py::TestContainerPebble::test_type_errors",
"test/test_model.py::TestModelBindings::test_binding_by_relation",
"test/test_model.py::TestModelBindings::test_binding_by_relation_name",
"test/test_model.py::TestModelBindings::test_binding_no_iface_name",
"test/test_model.py::TestModelBindings::test_dead_relations",
"test/test_model.py::TestModelBindings::test_empty_bind_addresses",
"test/test_model.py::TestModelBindings::test_empty_interface_info",
"test/test_model.py::TestModelBindings::test_invalid_keys",
"test/test_model.py::TestModelBindings::test_missing_bind_addresses",
"test/test_model.py::TestModelBindings::test_missing_egress_subnets",
"test/test_model.py::TestModelBindings::test_missing_ingress_addresses",
"test/test_model.py::TestModelBindings::test_no_bind_addresses",
"test/test_model.py::TestModelBackend::test_action_fail",
"test/test_model.py::TestModelBackend::test_action_get",
"test/test_model.py::TestModelBackend::test_action_get_error",
"test/test_model.py::TestModelBackend::test_action_log",
"test/test_model.py::TestModelBackend::test_action_log_error",
"test/test_model.py::TestModelBackend::test_action_set",
"test/test_model.py::TestModelBackend::test_action_set_dotted_dict",
"test/test_model.py::TestModelBackend::test_action_set_duplicated_keys",
"test/test_model.py::TestModelBackend::test_action_set_error",
"test/test_model.py::TestModelBackend::test_action_set_key_validation",
"test/test_model.py::TestModelBackend::test_action_set_more_nested",
"test/test_model.py::TestModelBackend::test_action_set_nested",
"test/test_model.py::TestModelBackend::test_application_version_set",
"test/test_model.py::TestModelBackend::test_application_version_set_invalid",
"test/test_model.py::TestModelBackend::test_invalid_metric_label_values",
"test/test_model.py::TestModelBackend::test_invalid_metric_labels",
"test/test_model.py::TestModelBackend::test_invalid_metric_names",
"test/test_model.py::TestModelBackend::test_invalid_metric_values",
"test/test_model.py::TestModelBackend::test_is_leader_refresh",
"test/test_model.py::TestModelBackend::test_juju_log",
"test/test_model.py::TestModelBackend::test_local_set_invalid_status",
"test/test_model.py::TestModelBackend::test_network_get",
"test/test_model.py::TestModelBackend::test_network_get_errors",
"test/test_model.py::TestModelBackend::test_relation_get_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_get_set_is_app_arg",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_env",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_errors",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_success",
"test/test_model.py::TestModelBackend::test_relation_set_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_tool_errors",
"test/test_model.py::TestModelBackend::test_status_get",
"test/test_model.py::TestModelBackend::test_status_is_app_forced_kwargs",
"test/test_model.py::TestModelBackend::test_status_set_is_app_not_bool_raises",
"test/test_model.py::TestModelBackend::test_storage_tool_errors",
"test/test_model.py::TestModelBackend::test_valid_metrics",
"test/test_model.py::TestLazyMapping::test_invalidate",
"test/test_testing.py::TestHarness::test_actions_from_directory",
"test/test_testing.py::TestHarness::test_actions_passed_in",
"test/test_testing.py::TestHarness::test_add_oci_resource_custom",
"test/test_testing.py::TestHarness::test_add_oci_resource_no_image",
"test/test_testing.py::TestHarness::test_add_peer_relation_with_initial_data_leader",
"test/test_testing.py::TestHarness::test_add_relation",
"test/test_testing.py::TestHarness::test_add_relation_and_unit",
"test/test_testing.py::TestHarness::test_add_relation_with_our_initial_data",
"test/test_testing.py::TestHarness::test_add_relation_with_remote_app_data",
"test/test_testing.py::TestHarness::test_add_resource_but_oci",
"test/test_testing.py::TestHarness::test_add_resource_bytes",
"test/test_testing.py::TestHarness::test_add_resource_string",
"test/test_testing.py::TestHarness::test_add_resource_unknown",
"test/test_testing.py::TestHarness::test_add_resource_unknown_filename",
"test/test_testing.py::TestHarness::test_add_storage_after_harness_begin",
"test/test_testing.py::TestHarness::test_add_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_add_storage_without_metadata_key_fails",
"test/test_testing.py::TestHarness::test_app_status",
"test/test_testing.py::TestHarness::test_attach_storage",
"test/test_testing.py::TestHarness::test_attach_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_begin_twice",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_install_sets_status",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_multiple_relation_same_endpoint",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_no_relations",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_no_relations_not_leader",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_peer_relation_pre_defined",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_relation_charm_with_no_relation",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_unknown_status",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_application_data",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_multiple_units",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_one_relation",
"test/test_testing.py::TestHarness::test_begin_with_initial_hooks_with_peer_relation",
"test/test_testing.py::TestHarness::test_can_connect_legacy",
"test/test_testing.py::TestHarness::test_config_from_directory",
"test/test_testing.py::TestHarness::test_container_isdir_and_exists",
"test/test_testing.py::TestHarness::test_container_pebble_ready",
"test/test_testing.py::TestHarness::test_create_harness_twice",
"test/test_testing.py::TestHarness::test_detach_storage",
"test/test_testing.py::TestHarness::test_detach_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_empty_config_raises",
"test/test_testing.py::TestHarness::test_get_backend_calls",
"test/test_testing.py::TestHarness::test_get_backend_calls_with_kwargs",
"test/test_testing.py::TestHarness::test_get_pebble_container_plan",
"test/test_testing.py::TestHarness::test_get_pebble_container_plan_unknown",
"test/test_testing.py::TestHarness::test_get_pod_spec",
"test/test_testing.py::TestHarness::test_get_relation_data",
"test/test_testing.py::TestHarness::test_harness_leader_misconfig",
"test/test_testing.py::TestHarness::test_hooks_disabled_contextmanager",
"test/test_testing.py::TestHarness::test_hooks_disabled_nested_contextmanager",
"test/test_testing.py::TestHarness::test_hooks_disabled_noop",
"test/test_testing.py::TestHarness::test_hooks_enabled_and_disabled",
"test/test_testing.py::TestHarness::test_metadata_from_directory",
"test/test_testing.py::TestHarness::test_no_event_on_empty_update_relation_unit_app",
"test/test_testing.py::TestHarness::test_no_event_on_empty_update_relation_unit_bag",
"test/test_testing.py::TestHarness::test_no_event_on_no_diff_update_relation_unit_app",
"test/test_testing.py::TestHarness::test_no_event_on_no_diff_update_relation_unit_bag",
"test/test_testing.py::TestHarness::test_populate_oci_resources",
"test/test_testing.py::TestHarness::test_relation_events",
"test/test_testing.py::TestHarness::test_relation_set_app_not_leader",
"test/test_testing.py::TestHarness::test_relation_set_deletes",
"test/test_testing.py::TestHarness::test_remove_detached_storage",
"test/test_testing.py::TestHarness::test_remove_relation",
"test/test_testing.py::TestHarness::test_remove_relation_unit",
"test/test_testing.py::TestHarness::test_remove_specific_relation_id",
"test/test_testing.py::TestHarness::test_remove_storage_after_harness_begin",
"test/test_testing.py::TestHarness::test_remove_storage_before_harness_begin",
"test/test_testing.py::TestHarness::test_remove_storage_without_metadata_key_fails",
"test/test_testing.py::TestHarness::test_removing_invalid_relation_id_raises_exception",
"test/test_testing.py::TestHarness::test_removing_relation_refreshes_charm_model",
"test/test_testing.py::TestHarness::test_removing_relation_removes_remote_app_data",
"test/test_testing.py::TestHarness::test_removing_relation_unit_does_not_remove_other_unit_and_data",
"test/test_testing.py::TestHarness::test_removing_relation_unit_removes_data_also",
"test/test_testing.py::TestHarness::test_resource_folder_cleanup",
"test/test_testing.py::TestHarness::test_set_leader",
"test/test_testing.py::TestHarness::test_set_model_info_after_begin",
"test/test_testing.py::TestHarness::test_set_model_name",
"test/test_testing.py::TestHarness::test_set_model_name_after_begin",
"test/test_testing.py::TestHarness::test_set_model_uuid_after_begin",
"test/test_testing.py::TestHarness::test_set_workload_version",
"test/test_testing.py::TestHarness::test_simulate_can_connect",
"test/test_testing.py::TestHarness::test_storage_with_hyphens_works",
"test/test_testing.py::TestHarness::test_unit_status",
"test/test_testing.py::TestHarness::test_update_config",
"test/test_testing.py::TestHarness::test_update_config_undefined_option",
"test/test_testing.py::TestHarness::test_update_config_unset_boolean",
"test/test_testing.py::TestHarness::test_update_peer_relation_app_data",
"test/test_testing.py::TestHarness::test_update_peer_relation_no_local_unit_change_event",
"test/test_testing.py::TestHarness::test_update_relation_exposes_new_data",
"test/test_testing.py::TestHarness::test_update_relation_no_local_app_change_event",
"test/test_testing.py::TestHarness::test_update_relation_no_local_unit_change_event",
"test/test_testing.py::TestHarness::test_update_relation_remove_data",
"test/test_testing.py::TestTestingModelBackend::test_conforms_to_model_backend",
"test/test_testing.py::TestTestingModelBackend::test_get_pebble_methods",
"test/test_testing.py::TestTestingModelBackend::test_lazy_resource_directory",
"test/test_testing.py::TestTestingModelBackend::test_relation_get_unknown_relation_id",
"test/test_testing.py::TestTestingModelBackend::test_relation_ids_unknown_relation",
"test/test_testing.py::TestTestingModelBackend::test_relation_list_unknown_relation_id",
"test/test_testing.py::TestTestingModelBackend::test_relation_remote_app_name",
"test/test_testing.py::TestTestingModelBackend::test_resource_get_no_resource",
"test/test_testing.py::TestTestingModelBackend::test_status_set_get_app",
"test/test_testing.py::TestTestingModelBackend::test_status_set_get_unit",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_no_override",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_override_merge",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_override_replace",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_combine_override_unknown",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_merge",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_not_combined",
"test/test_testing.py::TestTestingPebbleClient::test_add_layer_three_services",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_autostart",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_bad_request",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_none",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_not_started",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_start_stop",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_subset",
"test/test_testing.py::TestTestingPebbleClient::test_get_services_unknown",
"test/test_testing.py::TestTestingPebbleClient::test_invalid_start_service",
"test/test_testing.py::TestTestingPebbleClient::test_methods_match_pebble_client",
"test/test_testing.py::TestTestingPebbleClient::test_mixed_start_service",
"test/test_testing.py::TestTestingPebbleClient::test_send_signal",
"test/test_testing.py::TestTestingPebbleClient::test_start_service_str",
"test/test_testing.py::TestTestingPebbleClient::test_start_started_service",
"test/test_testing.py::TestTestingPebbleClient::test_stop_service_str",
"test/test_testing.py::TestTestingPebbleClient::test_stop_services_unknown",
"test/test_testing.py::TestTestingPebbleClient::test_stop_stopped_service",
"test/test_testing.py::TestTestingFilesystem::test_create_and_read_with_different_encodings",
"test/test_testing.py::TestTestingFilesystem::test_create_dir_with_extra_args",
"test/test_testing.py::TestTestingFilesystem::test_create_file_fails_if_parent_dir_doesnt_exist",
"test/test_testing.py::TestTestingFilesystem::test_create_file_from_bytes",
"test/test_testing.py::TestTestingFilesystem::test_create_file_from_files",
"test/test_testing.py::TestTestingFilesystem::test_create_file_from_str",
"test/test_testing.py::TestTestingFilesystem::test_create_file_succeeds_if_parent_dir_doesnt_exist_when_make_dirs_true",
"test/test_testing.py::TestTestingFilesystem::test_create_file_with_extra_args",
"test/test_testing.py::TestTestingFilesystem::test_delete_file",
"test/test_testing.py::TestTestingFilesystem::test_getattr",
"test/test_testing.py::TestTestingFilesystem::test_getattr_file_not_found",
"test/test_testing.py::TestTestingFilesystem::test_listdir",
"test/test_testing.py::TestTestingFilesystem::test_listdir_on_file",
"test/test_testing.py::TestTestingFilesystem::test_listdir_on_nonexistent_dir",
"test/test_testing.py::TestTestingFilesystem::test_listdir_root_on_empty_os",
"test/test_testing.py::TestTestingFilesystem::test_make_and_list_directory",
"test/test_testing.py::TestTestingFilesystem::test_make_directory_recursively",
"test/test_testing.py::TestTestingFilesystem::test_makedir",
"test/test_testing.py::TestTestingFilesystem::test_makedir_fails_if_already_exists",
"test/test_testing.py::TestTestingFilesystem::test_makedir_fails_if_parent_dir_doesnt_exist",
"test/test_testing.py::TestTestingFilesystem::test_makedir_path_must_start_with_slash",
"test/test_testing.py::TestTestingFilesystem::test_makedir_succeeds_if_already_exists_when_make_parents_true",
"test/test_testing.py::TestTestingFilesystem::test_open_directory_fails",
"test/test_testing.py::TestTestingFilesystem::test_storage_mount",
"test/test_testing.py::TestTestingStorageMount::test_create_and_read_with_different_encodings",
"test/test_testing.py::TestTestingStorageMount::test_create_dir_with_extra_args",
"test/test_testing.py::TestTestingStorageMount::test_create_file_fails_if_parent_dir_doesnt_exist",
"test/test_testing.py::TestTestingStorageMount::test_create_file_from_bytes",
"test/test_testing.py::TestTestingStorageMount::test_create_file_from_files",
"test/test_testing.py::TestTestingStorageMount::test_create_file_from_str",
"test/test_testing.py::TestTestingStorageMount::test_create_file_succeeds_if_parent_dir_doesnt_exist_when_make_dirs_true",
"test/test_testing.py::TestTestingStorageMount::test_create_file_with_extra_args",
"test/test_testing.py::TestTestingStorageMount::test_delete_file",
"test/test_testing.py::TestTestingStorageMount::test_getattr",
"test/test_testing.py::TestTestingStorageMount::test_getattr_file_not_found",
"test/test_testing.py::TestTestingStorageMount::test_listdir",
"test/test_testing.py::TestTestingStorageMount::test_listdir_on_file",
"test/test_testing.py::TestTestingStorageMount::test_listdir_on_nonexistent_dir",
"test/test_testing.py::TestTestingStorageMount::test_listdir_root_on_empty_os",
"test/test_testing.py::TestTestingStorageMount::test_make_and_list_directory",
"test/test_testing.py::TestTestingStorageMount::test_make_directory_recursively",
"test/test_testing.py::TestTestingStorageMount::test_makedir",
"test/test_testing.py::TestTestingStorageMount::test_makedir_fails_if_already_exists",
"test/test_testing.py::TestTestingStorageMount::test_makedir_fails_if_parent_dir_doesnt_exist",
"test/test_testing.py::TestTestingStorageMount::test_makedir_path_must_start_with_slash",
"test/test_testing.py::TestTestingStorageMount::test_makedir_succeeds_if_already_exists_when_make_parents_true",
"test/test_testing.py::TestTestingStorageMount::test_open_directory_fails",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_container_storage_mounts",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_list_directory_object_itself",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_list_files_not_found_raises",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_dir_with_ownership",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_dir_with_permission_mask",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_directory",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_directory_recursively",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_directory_with_relative_path_fails",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_make_subdir_of_file_fails",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_list_file",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_pull_bytes",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_pull_larger_file",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_and_pull_non_utf8_data",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_as_child_of_file_raises_error",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_file_with_relative_path_fails",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_files_and_list",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_files_and_list_by_pattern",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_to_non_existent_subdir",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_with_ownership",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_push_with_permission_mask",
"test/test_testing.py::TestPebbleStorageAPIsUsingMocks::test_remove_path"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2022-06-02 18:32:47+00:00 | apache-2.0 | 1,493 |
|
canonical__operator-805 | diff --git a/ops/model.py b/ops/model.py
index b4940d2..9682548 100644
--- a/ops/model.py
+++ b/ops/model.py
@@ -28,7 +28,7 @@ import typing
import weakref
from abc import ABC, abstractmethod
from pathlib import Path
-from subprocess import PIPE, CalledProcessError, run
+from subprocess import PIPE, CalledProcessError, CompletedProcess, run
from typing import (
Any,
BinaryIO,
@@ -2025,9 +2025,12 @@ class _ModelBackend:
self._leader_check_time = None
self._hook_is_running = ''
- def _run(self, *args: str, return_output: bool = False, use_json: bool = False
+ def _run(self, *args: str, return_output: bool = False,
+ use_json: bool = False, input_stream: Optional[bytes] = None
) -> Union[str, 'JsonObject', None]:
- kwargs = dict(stdout=PIPE, stderr=PIPE, check=True)
+ kwargs = dict(stdout=PIPE, stderr=PIPE, check=True) # type: Dict[str, Any]
+ if input_stream:
+ kwargs.update({"input": input_stream})
which_cmd = shutil.which(args[0])
if which_cmd is None:
raise RuntimeError('command not found: {}'.format(args[0]))
@@ -2036,6 +2039,10 @@ class _ModelBackend:
args += ('--format=json',)
try:
result = run(args, **kwargs)
+
+ # pyright infers the first match when argument overloading/unpacking is used,
+ # so this needs to be coerced into the right type
+ result = typing.cast('CompletedProcess[bytes]', result)
except CalledProcessError as e:
raise ModelError(e.stderr)
if return_output:
@@ -2133,12 +2140,14 @@ class _ModelBackend:
raise RuntimeError(
'setting application data is not supported on Juju version {}'.format(version))
- args = ['relation-set', '-r', str(relation_id), '{}={}'.format(key, value)]
+ args = ['relation-set', '-r', str(relation_id)]
if is_app:
args.append('--app')
+ args.extend(["--file", "-"])
try:
- return self._run(*args)
+ content = yaml.safe_dump({key: value}, encoding='utf8') # type: ignore
+ return self._run(*args, input_stream=content)
except ModelError as e:
if self._is_relation_not_found(e):
raise RelationNotFoundError() from e
| canonical/operator | bf71c4a1aa84c4f0ddc2a0e334baafaf85b534cb | diff --git a/test/test_model.py b/test/test_model.py
index 838f653..0d38eed 100755
--- a/test/test_model.py
+++ b/test/test_model.py
@@ -1969,17 +1969,17 @@ class TestModelBackend(unittest.TestCase):
lambda: fake_script(self, 'relation-set', 'echo fooerror >&2 ; exit 1'),
lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=False),
ops.model.ModelError,
- [['relation-set', '-r', '3', 'foo=bar']],
+ [['relation-set', '-r', '3', '--file', '-']],
), (
lambda: fake_script(self, 'relation-set', 'echo {} >&2 ; exit 2'.format(err_msg)),
lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=False),
ops.model.RelationNotFoundError,
- [['relation-set', '-r', '3', 'foo=bar']],
+ [['relation-set', '-r', '3', '--file', '-']],
), (
lambda: None,
lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=True),
ops.model.RelationNotFoundError,
- [['relation-set', '-r', '3', 'foo=bar', '--app']],
+ [['relation-set', '-r', '3', '--app', '--file', '-']],
), (
lambda: fake_script(self, 'relation-get', 'echo fooerror >&2 ; exit 1'),
lambda: self.backend.relation_get(3, 'remote/0', is_app=False),
@@ -2027,15 +2027,25 @@ class TestModelBackend(unittest.TestCase):
def test_relation_set_juju_version_quirks(self):
self.addCleanup(os.environ.pop, 'JUJU_VERSION', None)
- fake_script(self, 'relation-set', 'exit 0')
-
# on 2.7.0+, things proceed as expected
for v in ['2.8.0', '2.7.0']:
with self.subTest(v):
- os.environ['JUJU_VERSION'] = v
- self.backend.relation_set(1, 'foo', 'bar', is_app=True)
- calls = [' '.join(i) for i in fake_script_calls(self, clear=True)]
- self.assertEqual(calls, ['relation-set -r 1 foo=bar --app'])
+ t = tempfile.NamedTemporaryFile()
+ try:
+ fake_script(self, 'relation-set', dedent("""
+ cat >> {}
+ """).format(pathlib.Path(t.name).as_posix()))
+ os.environ['JUJU_VERSION'] = v
+ self.backend.relation_set(1, 'foo', 'bar', is_app=True)
+ calls = [' '.join(i) for i in fake_script_calls(self, clear=True)]
+ self.assertEqual(calls, ['relation-set -r 1 --app --file -'])
+ t.seek(0)
+ content = t.read()
+ finally:
+ t.close()
+ self.assertEqual(content.decode('utf-8'), dedent("""\
+ foo: bar
+ """))
# before 2.7.0, it just fails always (no --app support)
os.environ['JUJU_VERSION'] = '2.6.9'
| `RelationDataContent.__setitem__` should dynamically dispatch to a file if it's too long
We've already seen this with Grafana Dashboards, which routinely overflow the maximum argument length from `subprocess`, but it was also observed that relating Prometheus to a very large number of targets could overflow and cause a strange looking `OSError` on a `RelationChangedEvent`
Ultimately, this is due to [`relation_set`](https://github.com/canonical/operator/blob/0a097748299506c7651bdef99524638146f9724a/ops/model.py#L951) calling back to `subprocess` to handle [`relation-set ...`](https://github.com/canonical/operator/blob/0a097748299506c7651bdef99524638146f9724a/ops/model.py#L2136).
We already [split long log messages](https://github.com/canonical/operator/pull/632), and `relation-set` takes a `--file` parameter which reads in YAML, allowing the limit to be bypassed. If OF determines that the length of the relation data is anywhere near the limit, we could defer to something like:
```python
with tempfile.TempFile() as relation_data:
with open(relation_data, "w") as f:
f.write(yaml.dump({key: value})
self._backend.relation_set(..., data_file=relation_data)
```
If an optarg were added to `relation_set` where, if present, data was loaded from a file. This seems easy enough to add, avoids requiring charm authors to carefully think about the size/length of their data bags and potentially destructure them to avoid it mapping back to a `map[string]string` on the backend, and yields the desired behavior. | 0.0 | bf71c4a1aa84c4f0ddc2a0e334baafaf85b534cb | [
"test/test_model.py::TestModelBackend::test_relation_set_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_tool_errors"
]
| [
"test/test_model.py::TestModel::test_active_message_default",
"test/test_model.py::TestModel::test_app_immutable",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_leader",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_minion",
"test/test_model.py::TestModel::test_base_status_instance_raises",
"test/test_model.py::TestModel::test_config",
"test/test_model.py::TestModel::test_config_immutable",
"test/test_model.py::TestModel::test_get_relation",
"test/test_model.py::TestModel::test_invalid_type_relation_data",
"test/test_model.py::TestModel::test_is_leader",
"test/test_model.py::TestModel::test_local_set_valid_app_status",
"test/test_model.py::TestModel::test_local_set_valid_unit_status",
"test/test_model.py::TestModel::test_model_attributes",
"test/test_model.py::TestModel::test_model_name_from_backend",
"test/test_model.py::TestModel::test_our_unit_is_our",
"test/test_model.py::TestModel::test_peer_relation_app",
"test/test_model.py::TestModel::test_pod_immutable",
"test/test_model.py::TestModel::test_pod_spec",
"test/test_model.py::TestModel::test_relation_data_del_key",
"test/test_model.py::TestModel::test_relation_data_del_missing_key",
"test/test_model.py::TestModel::test_relation_data_modify_our",
"test/test_model.py::TestModel::test_relation_data_modify_remote",
"test/test_model.py::TestModel::test_relation_data_type_check",
"test/test_model.py::TestModel::test_relation_no_units",
"test/test_model.py::TestModel::test_relation_set_fail",
"test/test_model.py::TestModel::test_relations_immutable",
"test/test_model.py::TestModel::test_relations_keys",
"test/test_model.py::TestModel::test_remote_app_relation_data",
"test/test_model.py::TestModel::test_remote_app_status",
"test/test_model.py::TestModel::test_remote_unit_status",
"test/test_model.py::TestModel::test_remote_units_is_our",
"test/test_model.py::TestModel::test_resources",
"test/test_model.py::TestModel::test_resources_immutable",
"test/test_model.py::TestModel::test_set_app_status_invalid",
"test/test_model.py::TestModel::test_set_app_status_non_leader_raises",
"test/test_model.py::TestModel::test_set_unit_status_invalid",
"test/test_model.py::TestModel::test_status_eq",
"test/test_model.py::TestModel::test_status_repr",
"test/test_model.py::TestModel::test_storage",
"test/test_model.py::TestModel::test_storages_immutable",
"test/test_model.py::TestModel::test_unit_immutable",
"test/test_model.py::TestModel::test_unit_relation_data",
"test/test_model.py::TestModel::test_workload_version",
"test/test_model.py::TestModel::test_workload_version_invalid",
"test/test_model.py::test_recursive_list[case0]",
"test/test_model.py::test_recursive_list[case1]",
"test/test_model.py::test_recursive_list[case2]",
"test/test_model.py::test_recursive_push_and_pull[case0]",
"test/test_model.py::test_recursive_push_and_pull[case1]",
"test/test_model.py::test_recursive_push_and_pull[case2]",
"test/test_model.py::test_recursive_push_and_pull[case3]",
"test/test_model.py::test_recursive_push_and_pull[case4]",
"test/test_model.py::test_recursive_push_and_pull[case5]",
"test/test_model.py::test_recursive_push_and_pull[case6]",
"test/test_model.py::test_recursive_push_and_pull[case7]",
"test/test_model.py::test_recursive_push_and_pull[case8]",
"test/test_model.py::TestApplication::test_mocked_get_services",
"test/test_model.py::TestApplication::test_planned_units",
"test/test_model.py::TestApplication::test_planned_units_garbage_values",
"test/test_model.py::TestApplication::test_planned_units_override",
"test/test_model.py::TestApplication::test_planned_units_user_set",
"test/test_model.py::TestContainers::test_unit_containers",
"test/test_model.py::TestContainers::test_unit_get_container",
"test/test_model.py::TestContainerPebble::test_add_layer",
"test/test_model.py::TestContainerPebble::test_autostart",
"test/test_model.py::TestContainerPebble::test_bare_can_connect_call",
"test/test_model.py::TestContainerPebble::test_exec",
"test/test_model.py::TestContainerPebble::test_get_check",
"test/test_model.py::TestContainerPebble::test_get_checks",
"test/test_model.py::TestContainerPebble::test_get_plan",
"test/test_model.py::TestContainerPebble::test_get_service",
"test/test_model.py::TestContainerPebble::test_get_services",
"test/test_model.py::TestContainerPebble::test_get_system_info",
"test/test_model.py::TestContainerPebble::test_list_files",
"test/test_model.py::TestContainerPebble::test_make_dir",
"test/test_model.py::TestContainerPebble::test_pull",
"test/test_model.py::TestContainerPebble::test_push",
"test/test_model.py::TestContainerPebble::test_remove_path",
"test/test_model.py::TestContainerPebble::test_replan",
"test/test_model.py::TestContainerPebble::test_restart",
"test/test_model.py::TestContainerPebble::test_restart_fallback",
"test/test_model.py::TestContainerPebble::test_restart_fallback_non_400_error",
"test/test_model.py::TestContainerPebble::test_restart_no_arguments",
"test/test_model.py::TestContainerPebble::test_send_signal",
"test/test_model.py::TestContainerPebble::test_socket_path",
"test/test_model.py::TestContainerPebble::test_start",
"test/test_model.py::TestContainerPebble::test_start_no_arguments",
"test/test_model.py::TestContainerPebble::test_stop",
"test/test_model.py::TestContainerPebble::test_stop_no_arguments",
"test/test_model.py::TestContainerPebble::test_type_errors",
"test/test_model.py::TestModelBindings::test_binding_by_relation",
"test/test_model.py::TestModelBindings::test_binding_by_relation_name",
"test/test_model.py::TestModelBindings::test_binding_no_iface_name",
"test/test_model.py::TestModelBindings::test_dead_relations",
"test/test_model.py::TestModelBindings::test_empty_bind_addresses",
"test/test_model.py::TestModelBindings::test_empty_interface_info",
"test/test_model.py::TestModelBindings::test_invalid_keys",
"test/test_model.py::TestModelBindings::test_missing_bind_addresses",
"test/test_model.py::TestModelBindings::test_missing_egress_subnets",
"test/test_model.py::TestModelBindings::test_missing_ingress_addresses",
"test/test_model.py::TestModelBindings::test_no_bind_addresses",
"test/test_model.py::TestModelBackend::test_action_fail",
"test/test_model.py::TestModelBackend::test_action_get",
"test/test_model.py::TestModelBackend::test_action_get_error",
"test/test_model.py::TestModelBackend::test_action_log",
"test/test_model.py::TestModelBackend::test_action_log_error",
"test/test_model.py::TestModelBackend::test_action_set",
"test/test_model.py::TestModelBackend::test_action_set_dotted_dict",
"test/test_model.py::TestModelBackend::test_action_set_duplicated_keys",
"test/test_model.py::TestModelBackend::test_action_set_error",
"test/test_model.py::TestModelBackend::test_action_set_key_validation",
"test/test_model.py::TestModelBackend::test_action_set_more_nested",
"test/test_model.py::TestModelBackend::test_action_set_nested",
"test/test_model.py::TestModelBackend::test_application_version_set",
"test/test_model.py::TestModelBackend::test_application_version_set_invalid",
"test/test_model.py::TestModelBackend::test_invalid_metric_label_values",
"test/test_model.py::TestModelBackend::test_invalid_metric_labels",
"test/test_model.py::TestModelBackend::test_invalid_metric_names",
"test/test_model.py::TestModelBackend::test_invalid_metric_values",
"test/test_model.py::TestModelBackend::test_is_leader_refresh",
"test/test_model.py::TestModelBackend::test_juju_log",
"test/test_model.py::TestModelBackend::test_local_set_invalid_status",
"test/test_model.py::TestModelBackend::test_network_get",
"test/test_model.py::TestModelBackend::test_network_get_errors",
"test/test_model.py::TestModelBackend::test_relation_get_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_get_set_is_app_arg",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_env",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_errors",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_success",
"test/test_model.py::TestModelBackend::test_status_get",
"test/test_model.py::TestModelBackend::test_status_is_app_forced_kwargs",
"test/test_model.py::TestModelBackend::test_status_set_is_app_not_bool_raises",
"test/test_model.py::TestModelBackend::test_storage_tool_errors",
"test/test_model.py::TestModelBackend::test_valid_metrics",
"test/test_model.py::TestLazyMapping::test_invalidate"
]
| {
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2022-07-19 10:12:16+00:00 | apache-2.0 | 1,494 |
|
canonical__operator-823 | diff --git a/ops/model.py b/ops/model.py
index 6c51a81..5532a70 100644
--- a/ops/model.py
+++ b/ops/model.py
@@ -93,7 +93,7 @@ if typing.TYPE_CHECKING:
_RelationsMeta_Raw = Dict[str, ops.charm.RelationMeta]
# mapping from container name to container metadata
_ContainerMeta_Raw = Dict[str, ops.charm.ContainerMeta]
- _IPAddress = Union[ipaddress.IPv4Address, ipaddress.IPv6Address]
+ _NetworkAddress = Union[ipaddress.IPv4Address, ipaddress.IPv6Address, str]
_Network = Union[ipaddress.IPv4Network, ipaddress.IPv6Network]
_ServiceInfoMapping = Mapping[str, pebble.ServiceInfo]
@@ -692,6 +692,17 @@ class Binding:
return self._network
+def _cast_network_address(raw: str) -> '_NetworkAddress':
+ # fields marked as network addresses need not be IPs; they could be
+ # hostnames that juju failed to resolve. In that case, we'll log a
+ # debug message and leave it as-is.
+ try:
+ return ipaddress.ip_address(raw)
+ except ValueError:
+ logger.debug("could not cast {} to IPv4/v6 address".format(raw))
+ return raw
+
+
class Network:
"""Network space details.
@@ -725,15 +736,15 @@ class Network:
if addrs is not None:
for address_info in addrs:
self.interfaces.append(NetworkInterface(interface_name, address_info))
- self.ingress_addresses = [] # type: List[_IPAddress]
+ self.ingress_addresses = [] # type: List[_NetworkAddress]
for address in network_info.get('ingress-addresses', []):
- self.ingress_addresses.append(ipaddress.ip_address(address))
+ self.ingress_addresses.append(_cast_network_address(address))
self.egress_subnets = [] # type: List[_Network]
for subnet in network_info.get('egress-subnets', []):
self.egress_subnets.append(ipaddress.ip_network(subnet))
@property
- def bind_address(self) -> Optional['_IPAddress']:
+ def bind_address(self) -> Optional['_NetworkAddress']:
"""A single address that your application should bind() to.
For the common case where there is a single answer. This represents a single
@@ -746,7 +757,7 @@ class Network:
return None
@property
- def ingress_address(self):
+ def ingress_address(self) -> Optional['_NetworkAddress']:
"""The address other applications should use to connect to your unit.
Due to things like public/private addresses, NAT and tunneling, the address you bind()
@@ -782,8 +793,8 @@ class NetworkInterface:
address = address_info.get('address')
# The value field may be empty.
- address_ = ipaddress.ip_address(address) if address else None
- self.address = address_ # type: Optional[_IPAddress]
+ address_ = _cast_network_address(address) if address else None
+ self.address = address_ # type: Optional[_NetworkAddress]
cidr = address_info.get('cidr') # type: str
# The cidr field may be empty, see LP: #1864102.
if cidr:
| canonical/operator | 77ac36224aa7f79d4ab7c2b11d060a52c8b83ae4 | diff --git a/test/test_model.py b/test/test_model.py
index 495859f..173168f 100755
--- a/test/test_model.py
+++ b/test/test_model.py
@@ -1912,6 +1912,16 @@ class TestModelBindings(unittest.TestCase):
self.assertEqual(binding.network.interfaces[i].address, ipaddress.ip_address(address))
self.assertEqual(binding.network.interfaces[i].subnet, ipaddress.ip_network(subnet))
+ for (i, (name, address, subnet)) in enumerate([
+ ('lo', '192.0.2.2', '192.0.2.0/24'),
+ ('lo', 'dead:beef::1', 'dead:beef::/64'),
+ ('tun', '192.0.3.3', '192.0.3.3/32'),
+ ('tun', '2001:db8::3', '2001:db8::3/128'),
+ ('tun', 'fe80::1:1', 'fe80::/64')]):
+ self.assertEqual(binding.network.interfaces[i].name, name)
+ self.assertEqual(binding.network.interfaces[i].address, ipaddress.ip_address(address))
+ self.assertEqual(binding.network.interfaces[i].subnet, ipaddress.ip_network(subnet))
+
def test_invalid_keys(self):
# Basic validation for passing invalid keys.
for name in (object, 0):
@@ -2058,6 +2068,20 @@ class TestModelBindings(unittest.TestCase):
binding = self.model.get_binding(self.model.get_relation(binding_name))
self.assertEqual(binding.network.egress_subnets, [])
+ def test_unresolved_ingress_addresses(self):
+ # sometimes juju fails to resolve an url to an IP, in which case
+ # ingress-addresses will be the 'raw' url instead of an IP.
+ network_data = json.dumps({
+ 'ingress-addresses': [
+ 'foo.bar.baz.com'
+ ],
+ })
+ fake_script(self, 'network-get',
+ '''[ "$1" = db0 ] && echo '{}' || exit 1'''.format(network_data))
+ binding_name = 'db0'
+ binding = self.model.get_binding(self.model.get_relation(binding_name))
+ self.assertEqual(binding.network.ingress_addresses, ['foo.bar.baz.com'])
+
class TestModelBackend(unittest.TestCase):
| traefik errors on cos layer with error: hook failed: "metrics-endpoint-relation-joined"
### Bug Description
Solutions QA team has two runs in which traefik errors on cos layer with error: hook failed: "metrics-endpoint-relation-joined"
The cos layer is built on top of kubernetes-aws.
From the logs:
2022-08-21 02:20:42 ERROR juju-log metrics-endpoint:10: Uncaught exception while in charm code:
Traceback (most recent call last):
File "./src/charm.py", line 678, in <module>
main(TraefikIngressCharm)
File "/var/lib/juju/agents/unit-traefik-0/charm/venv/ops/main.py", line 431, in main
_emit_charm_event(charm, dispatcher.event_name)
File "/var/lib/juju/agents/unit-traefik-0/charm/venv/ops/main.py", line 142, in _emit_charm_event
event_to_emit.emit(*args, **kwargs)
File "/var/lib/juju/agents/unit-traefik-0/charm/venv/ops/framework.py", line 316, in emit
framework._emit(event)
File "/var/lib/juju/agents/unit-traefik-0/charm/venv/ops/framework.py", line 784, in _emit
self._reemit(event_path)
File "/var/lib/juju/agents/unit-traefik-0/charm/venv/ops/framework.py", line 857, in _reemit
custom_handler(event)
File "/var/lib/juju/agents/unit-traefik-0/charm/lib/charms/prometheus_k8s/v0/prometheus_scrape.py", line 1545, in _set_scrape_job_spec
self._set_unit_ip(event)
File "/var/lib/juju/agents/unit-traefik-0/charm/lib/charms/prometheus_k8s/v0/prometheus_scrape.py", line 1576, in _set_unit_ip
unit_ip = str(self._charm.model.get_binding(relation).network.bind_address)
File "/var/lib/juju/agents/unit-traefik-0/charm/venv/ops/model.py", line 679, in network
self._network = self._network_get(self.name, self._relation_id)
File "/var/lib/juju/agents/unit-traefik-0/charm/venv/ops/model.py", line 672, in _network_get
return Network(self._backend.network_get(name, relation_id))
File "/var/lib/juju/agents/unit-traefik-0/charm/venv/ops/model.py", line 724, in __init__
self.ingress_addresses.append(ipaddress.ip_address(address))
File "/usr/lib/python3.8/ipaddress.py", line 53, in ip_address
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
ValueError: 'acb396cf5563d429f9ebc8aa23ae47ed-1516332866.us-east-1.elb.amazonaws.com' does not appear to be an IPv4 or IPv6 address
Failed runs:
https://solutions.qa.canonical.com/testruns/testRun/0b35ee4c-efcf-4f50-a393-696803b31ac9
https://solutions.qa.canonical.com/testruns/testRun/ef230601-4af3-4d6f-89af-4e578985e666
Logs are found on the bottom on the page, on the artifacts repository.
### To Reproduce
These errors were found on the fkb-master-kubernetes-focal-aws SKU of our automated test suite.
### Environment
charmed-kubernetes 1.24
stable channel for cos charms.
### Relevant log output
```shell
Traceback (most recent call last):
File "./src/charm.py", line 678, in <module>
main(TraefikIngressCharm)
File "/var/lib/juju/agents/unit-traefik-0/charm/venv/ops/main.py", line 431, in main
_emit_charm_event(charm, dispatcher.event_name)
File "/var/lib/juju/agents/unit-traefik-0/charm/venv/ops/main.py", line 142, in _emit_charm_event
event_to_emit.emit(*args, **kwargs)
File "/var/lib/juju/agents/unit-traefik-0/charm/venv/ops/framework.py", line 316, in emit
framework._emit(event)
File "/var/lib/juju/agents/unit-traefik-0/charm/venv/ops/framework.py", line 784, in _emit
self._reemit(event_path)
File "/var/lib/juju/agents/unit-traefik-0/charm/venv/ops/framework.py", line 857, in _reemit
custom_handler(event)
File "/var/lib/juju/agents/unit-traefik-0/charm/lib/charms/prometheus_k8s/v0/prometheus_scrape.py", line 1545, in _set_scrape_job_spec
self._set_unit_ip(event)
File "/var/lib/juju/agents/unit-traefik-0/charm/lib/charms/prometheus_k8s/v0/prometheus_scrape.py", line 1576, in _set_unit_ip
unit_ip = str(self._charm.model.get_binding(relation).network.bind_address)
File "/var/lib/juju/agents/unit-traefik-0/charm/venv/ops/model.py", line 679, in network
self._network = self._network_get(self.name, self._relation_id)
File "/var/lib/juju/agents/unit-traefik-0/charm/venv/ops/model.py", line 672, in _network_get
return Network(self._backend.network_get(name, relation_id))
File "/var/lib/juju/agents/unit-traefik-0/charm/venv/ops/model.py", line 724, in __init__
self.ingress_addresses.append(ipaddress.ip_address(address))
File "/usr/lib/python3.8/ipaddress.py", line 53, in ip_address
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
ValueError: 'acb396cf5563d429f9ebc8aa23ae47ed-1516332866.us-east-1.elb.amazonaws.com' does not appear to be an IPv4 or IPv6 address
```
### Additional context
_No response_ | 0.0 | 77ac36224aa7f79d4ab7c2b11d060a52c8b83ae4 | [
"test/test_model.py::TestModelBindings::test_unresolved_ingress_addresses"
]
| [
"test/test_model.py::TestModel::test_active_message_default",
"test/test_model.py::TestModel::test_app_immutable",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_leader",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_minion",
"test/test_model.py::TestModel::test_base_status_instance_raises",
"test/test_model.py::TestModel::test_config",
"test/test_model.py::TestModel::test_config_immutable",
"test/test_model.py::TestModel::test_get_app_relation_data",
"test/test_model.py::TestModel::test_get_relation",
"test/test_model.py::TestModel::test_invalid_type_relation_data",
"test/test_model.py::TestModel::test_is_leader",
"test/test_model.py::TestModel::test_local_set_valid_app_status",
"test/test_model.py::TestModel::test_local_set_valid_unit_status",
"test/test_model.py::TestModel::test_model_attributes",
"test/test_model.py::TestModel::test_model_name_from_backend",
"test/test_model.py::TestModel::test_our_unit_is_our",
"test/test_model.py::TestModel::test_peer_relation_app",
"test/test_model.py::TestModel::test_pod_immutable",
"test/test_model.py::TestModel::test_pod_spec",
"test/test_model.py::TestModel::test_relation_data_access_peer_leader",
"test/test_model.py::TestModel::test_relation_data_access_peer_minion",
"test/test_model.py::TestModel::test_relation_data_del_key",
"test/test_model.py::TestModel::test_relation_data_del_missing_key",
"test/test_model.py::TestModel::test_relation_data_modify_our",
"test/test_model.py::TestModel::test_relation_data_modify_remote",
"test/test_model.py::TestModel::test_relation_data_type_check",
"test/test_model.py::TestModel::test_relation_local_app_data_readability_follower",
"test/test_model.py::TestModel::test_relation_local_app_data_readability_leader",
"test/test_model.py::TestModel::test_relation_no_units",
"test/test_model.py::TestModel::test_relation_set_fail",
"test/test_model.py::TestModel::test_relations_immutable",
"test/test_model.py::TestModel::test_relations_keys",
"test/test_model.py::TestModel::test_remote_app_relation_data",
"test/test_model.py::TestModel::test_remote_app_status",
"test/test_model.py::TestModel::test_remote_unit_status",
"test/test_model.py::TestModel::test_remote_units_is_our",
"test/test_model.py::TestModel::test_resources",
"test/test_model.py::TestModel::test_resources_immutable",
"test/test_model.py::TestModel::test_set_app_status_invalid",
"test/test_model.py::TestModel::test_set_app_status_non_leader_raises",
"test/test_model.py::TestModel::test_set_unit_status_invalid",
"test/test_model.py::TestModel::test_status_eq",
"test/test_model.py::TestModel::test_status_repr",
"test/test_model.py::TestModel::test_storage",
"test/test_model.py::TestModel::test_storages_immutable",
"test/test_model.py::TestModel::test_unit_immutable",
"test/test_model.py::TestModel::test_unit_relation_data",
"test/test_model.py::TestModel::test_workload_version",
"test/test_model.py::TestModel::test_workload_version_invalid",
"test/test_model.py::test_recursive_list[case0]",
"test/test_model.py::test_recursive_list[case1]",
"test/test_model.py::test_recursive_list[case2]",
"test/test_model.py::test_recursive_push_and_pull[case0]",
"test/test_model.py::test_recursive_push_and_pull[case1]",
"test/test_model.py::test_recursive_push_and_pull[case2]",
"test/test_model.py::test_recursive_push_and_pull[case3]",
"test/test_model.py::test_recursive_push_and_pull[case4]",
"test/test_model.py::test_recursive_push_and_pull[case5]",
"test/test_model.py::test_recursive_push_and_pull[case6]",
"test/test_model.py::test_recursive_push_and_pull[case7]",
"test/test_model.py::test_recursive_push_and_pull[case8]",
"test/test_model.py::TestApplication::test_mocked_get_services",
"test/test_model.py::TestApplication::test_planned_units",
"test/test_model.py::TestApplication::test_planned_units_garbage_values",
"test/test_model.py::TestApplication::test_planned_units_override",
"test/test_model.py::TestApplication::test_planned_units_user_set",
"test/test_model.py::TestContainers::test_unit_containers",
"test/test_model.py::TestContainers::test_unit_get_container",
"test/test_model.py::TestContainerPebble::test_add_layer",
"test/test_model.py::TestContainerPebble::test_autostart",
"test/test_model.py::TestContainerPebble::test_bare_can_connect_call",
"test/test_model.py::TestContainerPebble::test_exec",
"test/test_model.py::TestContainerPebble::test_get_check",
"test/test_model.py::TestContainerPebble::test_get_checks",
"test/test_model.py::TestContainerPebble::test_get_plan",
"test/test_model.py::TestContainerPebble::test_get_service",
"test/test_model.py::TestContainerPebble::test_get_services",
"test/test_model.py::TestContainerPebble::test_get_system_info",
"test/test_model.py::TestContainerPebble::test_list_files",
"test/test_model.py::TestContainerPebble::test_make_dir",
"test/test_model.py::TestContainerPebble::test_pull",
"test/test_model.py::TestContainerPebble::test_push",
"test/test_model.py::TestContainerPebble::test_remove_path",
"test/test_model.py::TestContainerPebble::test_replan",
"test/test_model.py::TestContainerPebble::test_restart",
"test/test_model.py::TestContainerPebble::test_restart_fallback",
"test/test_model.py::TestContainerPebble::test_restart_fallback_non_400_error",
"test/test_model.py::TestContainerPebble::test_restart_no_arguments",
"test/test_model.py::TestContainerPebble::test_send_signal",
"test/test_model.py::TestContainerPebble::test_socket_path",
"test/test_model.py::TestContainerPebble::test_start",
"test/test_model.py::TestContainerPebble::test_start_no_arguments",
"test/test_model.py::TestContainerPebble::test_stop",
"test/test_model.py::TestContainerPebble::test_stop_no_arguments",
"test/test_model.py::TestContainerPebble::test_type_errors",
"test/test_model.py::TestModelBindings::test_binding_by_relation",
"test/test_model.py::TestModelBindings::test_binding_by_relation_name",
"test/test_model.py::TestModelBindings::test_binding_no_iface_name",
"test/test_model.py::TestModelBindings::test_dead_relations",
"test/test_model.py::TestModelBindings::test_empty_bind_addresses",
"test/test_model.py::TestModelBindings::test_empty_interface_info",
"test/test_model.py::TestModelBindings::test_invalid_keys",
"test/test_model.py::TestModelBindings::test_missing_bind_addresses",
"test/test_model.py::TestModelBindings::test_missing_egress_subnets",
"test/test_model.py::TestModelBindings::test_missing_ingress_addresses",
"test/test_model.py::TestModelBindings::test_no_bind_addresses",
"test/test_model.py::TestModelBackend::test_action_fail",
"test/test_model.py::TestModelBackend::test_action_get",
"test/test_model.py::TestModelBackend::test_action_get_error",
"test/test_model.py::TestModelBackend::test_action_log",
"test/test_model.py::TestModelBackend::test_action_log_error",
"test/test_model.py::TestModelBackend::test_action_set",
"test/test_model.py::TestModelBackend::test_action_set_dotted_dict",
"test/test_model.py::TestModelBackend::test_action_set_duplicated_keys",
"test/test_model.py::TestModelBackend::test_action_set_error",
"test/test_model.py::TestModelBackend::test_action_set_key_validation",
"test/test_model.py::TestModelBackend::test_action_set_more_nested",
"test/test_model.py::TestModelBackend::test_action_set_nested",
"test/test_model.py::TestModelBackend::test_application_version_set",
"test/test_model.py::TestModelBackend::test_application_version_set_invalid",
"test/test_model.py::TestModelBackend::test_invalid_metric_label_values",
"test/test_model.py::TestModelBackend::test_invalid_metric_labels",
"test/test_model.py::TestModelBackend::test_invalid_metric_names",
"test/test_model.py::TestModelBackend::test_invalid_metric_values",
"test/test_model.py::TestModelBackend::test_is_leader_refresh",
"test/test_model.py::TestModelBackend::test_juju_log",
"test/test_model.py::TestModelBackend::test_local_set_invalid_status",
"test/test_model.py::TestModelBackend::test_network_get",
"test/test_model.py::TestModelBackend::test_network_get_errors",
"test/test_model.py::TestModelBackend::test_relation_get_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_get_set_is_app_arg",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_env",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_errors",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_success",
"test/test_model.py::TestModelBackend::test_relation_set_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_tool_errors",
"test/test_model.py::TestModelBackend::test_status_get",
"test/test_model.py::TestModelBackend::test_status_is_app_forced_kwargs",
"test/test_model.py::TestModelBackend::test_status_set_is_app_not_bool_raises",
"test/test_model.py::TestModelBackend::test_storage_tool_errors",
"test/test_model.py::TestModelBackend::test_valid_metrics",
"test/test_model.py::TestLazyMapping::test_invalidate"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2022-09-02 07:21:01+00:00 | apache-2.0 | 1,495 |
|
canonical__operator-860 | diff --git a/ops/model.py b/ops/model.py
index 9412ae9..79af88f 100644
--- a/ops/model.py
+++ b/ops/model.py
@@ -1418,17 +1418,17 @@ class Container:
# instance that is in fact 'ready'.
self._pebble.get_system_info()
except pebble.ConnectionError as e:
- logger.debug("Pebble API is not ready; ConnectionError: %s", e.message())
+ logger.debug("Pebble API is not ready; ConnectionError: %s", e)
return False
except FileNotFoundError as e:
# In some cases, charm authors can attempt to hit the Pebble API before it has had the
# chance to create the UNIX socket in the shared volume.
- logger.debug("Pebble API is not ready; UNIX socket not found:", str(e))
+ logger.debug("Pebble API is not ready; UNIX socket not found: %s", e)
return False
except pebble.APIError as e:
# An API error is only raised when the Pebble API returns invalid JSON, or the response
# cannot be read. Both of these are a likely indicator that something is wrong.
- logger.warning("Pebble API is not ready; APIError: %s", str(e))
+ logger.warning("Pebble API is not ready; APIError: %s", e)
return False
return True
diff --git a/ops/pebble.py b/ops/pebble.py
index 71de6fb..0c15c3d 100644
--- a/ops/pebble.py
+++ b/ops/pebble.py
@@ -333,14 +333,6 @@ class Error(Exception):
def __repr__(self):
return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.args)
- def name(self):
- """Return a string representation of the model plus class."""
- return '<{}.{}>'.format(type(self).__module__, type(self).__name__)
-
- def message(self):
- """Return the message passed as an argument."""
- return self.args[0]
-
class TimeoutError(TimeoutError, Error):
"""Raised when a polling timeout occurs."""
| canonical/operator | 28251faf41868017aec5add2b59a80719f5354db | diff --git a/ops/testing.py b/ops/testing.py
index d92609b..98507a5 100755
--- a/ops/testing.py
+++ b/ops/testing.py
@@ -1406,6 +1406,7 @@ class _TestingModelBackend:
if self._resource_dir is not None:
self._resource_dir.cleanup()
self._resource_dir = None
+ self._harness_tmp_dir.cleanup()
def _get_resource_dir(self) -> pathlib.Path:
if self._resource_dir is None:
diff --git a/test/test_model.py b/test/test_model.py
index 173168f..e1ff14d 100755
--- a/test/test_model.py
+++ b/test/test_model.py
@@ -33,7 +33,14 @@ import ops.testing
from ops import model
from ops._private import yaml
from ops.charm import RelationMeta, RelationRole
-from ops.pebble import APIError, FileInfo, FileType, ServiceInfo
+from ops.pebble import (
+ APIError,
+ ConnectionError,
+ FileInfo,
+ FileType,
+ ServiceInfo,
+ SystemInfo,
+)
class TestModel(unittest.TestCase):
@@ -1354,8 +1361,9 @@ containers:
self.container.replan()
self.assertEqual(self.pebble.requests, [('replan',)])
- def test_get_system_info(self):
- self.container.can_connect()
+ def test_can_connect(self):
+ self.pebble.responses.append(SystemInfo.from_dict({'version': '1.0.0'}))
+ self.assertTrue(self.container.can_connect())
self.assertEqual(self.pebble.requests, [('get_system_info',)])
def test_start(self):
@@ -1687,10 +1695,37 @@ containers:
('remove_path', '/path/2', True),
])
- def test_bare_can_connect_call(self):
- self.pebble.responses.append('dummy')
+ def test_can_connect_simple(self):
+ self.pebble.responses.append(SystemInfo.from_dict({'version': '1.0.0'}))
self.assertTrue(self.container.can_connect())
+ def test_can_connect_connection_error(self):
+ def raise_error():
+ raise ConnectionError('connection error!')
+ self.pebble.get_system_info = raise_error
+ with self.assertLogs('ops.model', level='DEBUG') as cm:
+ self.assertFalse(self.container.can_connect())
+ self.assertEqual(len(cm.output), 1)
+ self.assertRegex(cm.output[0], r'DEBUG:ops.model:.*: connection error!')
+
+ def test_can_connect_file_not_found_error(self):
+ def raise_error():
+ raise FileNotFoundError('file not found!')
+ self.pebble.get_system_info = raise_error
+ with self.assertLogs('ops.model', level='DEBUG') as cm:
+ self.assertFalse(self.container.can_connect())
+ self.assertEqual(len(cm.output), 1)
+ self.assertRegex(cm.output[0], r'DEBUG:ops.model:.*: file not found!')
+
+ def test_can_connect_api_error(self):
+ def raise_error():
+ raise APIError('body', 404, 'status', 'api error!')
+ self.pebble.get_system_info = raise_error
+ with self.assertLogs('ops.model') as cm:
+ self.assertFalse(self.container.can_connect())
+ self.assertEqual(len(cm.output), 1)
+ self.assertRegex(cm.output[0], r'WARNING:ops.model:.*: api error!')
+
def test_exec(self):
self.pebble.responses.append('fake_exec_process')
p = self.container.exec(
@@ -1759,6 +1794,7 @@ class MockPebbleClient:
def get_system_info(self):
self.requests.append(('get_system_info',))
+ return self.responses.pop(0)
def replan_services(self):
self.requests.append(('replan',))
diff --git a/test/test_pebble.py b/test/test_pebble.py
index 924d110..d076795 100644
--- a/test/test_pebble.py
+++ b/test/test_pebble.py
@@ -1248,7 +1248,7 @@ class TestMultipartParser(unittest.TestCase):
if not test.error:
self.fail('unexpected error:', err)
break
- self.assertEqual(test.error, err.message())
+ self.assertEqual(test.error, str(err))
else:
if test.error:
self.fail('missing expected error: {!r}'.format(test.error))
| Log format string missing %s in can_connect
When reviewing some unrelated code, I noticed there was an [invalid `logger.debug` format string](https://github.com/canonical/operator/blob/6ee44cfb64d0680ec7e72ad1c746a87a0c63f0c8/ops/model.py#L1148) in the `model.Container.can_connect` code. It does the following:
```python
logger.debug("Pebble API is not ready; UNIX socket not found:", str(e))
```
However, the logging API expects the first argument to be a format string, so there should be a `%s` here, and the logging library gives this exception (not raised, as logging catches exceptions):
```
>>> logger.debug("Pebble API is not ready; UNIX socket not found:", str(e))
--- Logging error ---
Traceback (most recent call last):
File "/usr/lib/python3.9/logging/__init__.py", line 1083, in emit
msg = self.format(record)
File "/usr/lib/python3.9/logging/__init__.py", line 927, in format
return fmt.format(record)
File "/usr/lib/python3.9/logging/__init__.py", line 663, in format
record.message = record.getMessage()
File "/usr/lib/python3.9/logging/__init__.py", line 367, in getMessage
msg = msg % self.args
TypeError: not all arguments converted during string formatting
Call stack:
File "<stdin>", line 1, in <module>
Message: 'Pebble API is not ready; UNIX socket not found:'
Arguments: ('REASON',)
```
In addition, when you use `%s`, you don't need `str(e)`, you can just pass `e` directly, and the `%s` formatting calls `str` (which in turn calls `__str__`). We could update the other `APIError` case to omit this too.
I'm not 100% sure, but from a quick glance it looks like `can_connect` is short on tests. We should probably add tests for each exception case and test that it logged something.
| 0.0 | 28251faf41868017aec5add2b59a80719f5354db | [
"test/test_model.py::TestContainerPebble::test_can_connect_file_not_found_error"
]
| [
"test/test_model.py::TestModel::test_active_message_default",
"test/test_model.py::TestModel::test_app_immutable",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_leader",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_minion",
"test/test_model.py::TestModel::test_base_status_instance_raises",
"test/test_model.py::TestModel::test_config",
"test/test_model.py::TestModel::test_config_immutable",
"test/test_model.py::TestModel::test_get_app_relation_data",
"test/test_model.py::TestModel::test_get_relation",
"test/test_model.py::TestModel::test_invalid_type_relation_data",
"test/test_model.py::TestModel::test_is_leader",
"test/test_model.py::TestModel::test_local_set_valid_app_status",
"test/test_model.py::TestModel::test_local_set_valid_unit_status",
"test/test_model.py::TestModel::test_model_attributes",
"test/test_model.py::TestModel::test_model_name_from_backend",
"test/test_model.py::TestModel::test_our_unit_is_our",
"test/test_model.py::TestModel::test_peer_relation_app",
"test/test_model.py::TestModel::test_pod_immutable",
"test/test_model.py::TestModel::test_pod_spec",
"test/test_model.py::TestModel::test_relation_data_access_peer_leader",
"test/test_model.py::TestModel::test_relation_data_access_peer_minion",
"test/test_model.py::TestModel::test_relation_data_del_key",
"test/test_model.py::TestModel::test_relation_data_del_missing_key",
"test/test_model.py::TestModel::test_relation_data_modify_our",
"test/test_model.py::TestModel::test_relation_data_modify_remote",
"test/test_model.py::TestModel::test_relation_data_type_check",
"test/test_model.py::TestModel::test_relation_local_app_data_readability_follower",
"test/test_model.py::TestModel::test_relation_local_app_data_readability_leader",
"test/test_model.py::TestModel::test_relation_no_units",
"test/test_model.py::TestModel::test_relation_set_fail",
"test/test_model.py::TestModel::test_relations_immutable",
"test/test_model.py::TestModel::test_relations_keys",
"test/test_model.py::TestModel::test_remote_app_relation_data",
"test/test_model.py::TestModel::test_remote_app_status",
"test/test_model.py::TestModel::test_remote_unit_status",
"test/test_model.py::TestModel::test_remote_units_is_our",
"test/test_model.py::TestModel::test_resources",
"test/test_model.py::TestModel::test_resources_immutable",
"test/test_model.py::TestModel::test_set_app_status_invalid",
"test/test_model.py::TestModel::test_set_app_status_non_leader_raises",
"test/test_model.py::TestModel::test_set_unit_status_invalid",
"test/test_model.py::TestModel::test_status_eq",
"test/test_model.py::TestModel::test_status_repr",
"test/test_model.py::TestModel::test_storage",
"test/test_model.py::TestModel::test_storages_immutable",
"test/test_model.py::TestModel::test_unit_immutable",
"test/test_model.py::TestModel::test_unit_relation_data",
"test/test_model.py::TestModel::test_workload_version",
"test/test_model.py::TestModel::test_workload_version_invalid",
"test/test_model.py::test_recursive_list[case0]",
"test/test_model.py::test_recursive_list[case1]",
"test/test_model.py::test_recursive_list[case2]",
"test/test_model.py::test_recursive_push_and_pull[case0]",
"test/test_model.py::test_recursive_push_and_pull[case1]",
"test/test_model.py::test_recursive_push_and_pull[case2]",
"test/test_model.py::test_recursive_push_and_pull[case3]",
"test/test_model.py::test_recursive_push_and_pull[case4]",
"test/test_model.py::test_recursive_push_and_pull[case5]",
"test/test_model.py::test_recursive_push_and_pull[case6]",
"test/test_model.py::test_recursive_push_and_pull[case7]",
"test/test_model.py::test_recursive_push_and_pull[case8]",
"test/test_model.py::TestApplication::test_mocked_get_services",
"test/test_model.py::TestApplication::test_planned_units",
"test/test_model.py::TestApplication::test_planned_units_garbage_values",
"test/test_model.py::TestApplication::test_planned_units_override",
"test/test_model.py::TestApplication::test_planned_units_user_set",
"test/test_model.py::TestContainers::test_unit_containers",
"test/test_model.py::TestContainers::test_unit_get_container",
"test/test_model.py::TestContainerPebble::test_add_layer",
"test/test_model.py::TestContainerPebble::test_autostart",
"test/test_model.py::TestContainerPebble::test_can_connect",
"test/test_model.py::TestContainerPebble::test_can_connect_api_error",
"test/test_model.py::TestContainerPebble::test_can_connect_connection_error",
"test/test_model.py::TestContainerPebble::test_can_connect_simple",
"test/test_model.py::TestContainerPebble::test_exec",
"test/test_model.py::TestContainerPebble::test_get_check",
"test/test_model.py::TestContainerPebble::test_get_checks",
"test/test_model.py::TestContainerPebble::test_get_plan",
"test/test_model.py::TestContainerPebble::test_get_service",
"test/test_model.py::TestContainerPebble::test_get_services",
"test/test_model.py::TestContainerPebble::test_list_files",
"test/test_model.py::TestContainerPebble::test_make_dir",
"test/test_model.py::TestContainerPebble::test_pull",
"test/test_model.py::TestContainerPebble::test_push",
"test/test_model.py::TestContainerPebble::test_remove_path",
"test/test_model.py::TestContainerPebble::test_replan",
"test/test_model.py::TestContainerPebble::test_restart",
"test/test_model.py::TestContainerPebble::test_restart_fallback",
"test/test_model.py::TestContainerPebble::test_restart_fallback_non_400_error",
"test/test_model.py::TestContainerPebble::test_restart_no_arguments",
"test/test_model.py::TestContainerPebble::test_send_signal",
"test/test_model.py::TestContainerPebble::test_socket_path",
"test/test_model.py::TestContainerPebble::test_start",
"test/test_model.py::TestContainerPebble::test_start_no_arguments",
"test/test_model.py::TestContainerPebble::test_stop",
"test/test_model.py::TestContainerPebble::test_stop_no_arguments",
"test/test_model.py::TestContainerPebble::test_type_errors",
"test/test_model.py::TestModelBindings::test_binding_by_relation",
"test/test_model.py::TestModelBindings::test_binding_by_relation_name",
"test/test_model.py::TestModelBindings::test_binding_no_iface_name",
"test/test_model.py::TestModelBindings::test_dead_relations",
"test/test_model.py::TestModelBindings::test_empty_bind_addresses",
"test/test_model.py::TestModelBindings::test_empty_interface_info",
"test/test_model.py::TestModelBindings::test_invalid_keys",
"test/test_model.py::TestModelBindings::test_missing_bind_addresses",
"test/test_model.py::TestModelBindings::test_missing_egress_subnets",
"test/test_model.py::TestModelBindings::test_missing_ingress_addresses",
"test/test_model.py::TestModelBindings::test_no_bind_addresses",
"test/test_model.py::TestModelBindings::test_unresolved_ingress_addresses",
"test/test_model.py::TestModelBackend::test_action_fail",
"test/test_model.py::TestModelBackend::test_action_get",
"test/test_model.py::TestModelBackend::test_action_get_error",
"test/test_model.py::TestModelBackend::test_action_log",
"test/test_model.py::TestModelBackend::test_action_log_error",
"test/test_model.py::TestModelBackend::test_action_set",
"test/test_model.py::TestModelBackend::test_action_set_dotted_dict",
"test/test_model.py::TestModelBackend::test_action_set_duplicated_keys",
"test/test_model.py::TestModelBackend::test_action_set_error",
"test/test_model.py::TestModelBackend::test_action_set_key_validation",
"test/test_model.py::TestModelBackend::test_action_set_more_nested",
"test/test_model.py::TestModelBackend::test_action_set_nested",
"test/test_model.py::TestModelBackend::test_application_version_set",
"test/test_model.py::TestModelBackend::test_application_version_set_invalid",
"test/test_model.py::TestModelBackend::test_invalid_metric_label_values",
"test/test_model.py::TestModelBackend::test_invalid_metric_labels",
"test/test_model.py::TestModelBackend::test_invalid_metric_names",
"test/test_model.py::TestModelBackend::test_invalid_metric_values",
"test/test_model.py::TestModelBackend::test_is_leader_refresh",
"test/test_model.py::TestModelBackend::test_juju_log",
"test/test_model.py::TestModelBackend::test_local_set_invalid_status",
"test/test_model.py::TestModelBackend::test_network_get",
"test/test_model.py::TestModelBackend::test_network_get_errors",
"test/test_model.py::TestModelBackend::test_relation_get_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_get_set_is_app_arg",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_env",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_errors",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_success",
"test/test_model.py::TestModelBackend::test_relation_set_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_tool_errors",
"test/test_model.py::TestModelBackend::test_status_get",
"test/test_model.py::TestModelBackend::test_status_is_app_forced_kwargs",
"test/test_model.py::TestModelBackend::test_status_set_is_app_not_bool_raises",
"test/test_model.py::TestModelBackend::test_storage_tool_errors",
"test/test_model.py::TestModelBackend::test_valid_metrics",
"test/test_model.py::TestLazyMapping::test_invalidate",
"test/test_pebble.py::TestHelpers::test_parse_timestamp",
"test/test_pebble.py::TestTypes::test_api_error",
"test/test_pebble.py::TestTypes::test_change_error",
"test/test_pebble.py::TestTypes::test_change_error_with_task_logs",
"test/test_pebble.py::TestTypes::test_change_from_dict",
"test/test_pebble.py::TestTypes::test_change_id",
"test/test_pebble.py::TestTypes::test_change_init",
"test/test_pebble.py::TestTypes::test_change_state",
"test/test_pebble.py::TestTypes::test_connection_error",
"test/test_pebble.py::TestTypes::test_error",
"test/test_pebble.py::TestTypes::test_file_info_from_dict",
"test/test_pebble.py::TestTypes::test_file_info_init",
"test/test_pebble.py::TestTypes::test_file_type",
"test/test_pebble.py::TestTypes::test_path_error",
"test/test_pebble.py::TestTypes::test_protocol_error",
"test/test_pebble.py::TestTypes::test_system_info_from_dict",
"test/test_pebble.py::TestTypes::test_system_info_init",
"test/test_pebble.py::TestTypes::test_task_from_dict",
"test/test_pebble.py::TestTypes::test_task_id",
"test/test_pebble.py::TestTypes::test_task_init",
"test/test_pebble.py::TestTypes::test_task_progress_from_dict",
"test/test_pebble.py::TestTypes::test_task_progress_init",
"test/test_pebble.py::TestTypes::test_timeout_error",
"test/test_pebble.py::TestTypes::test_warning_from_dict",
"test/test_pebble.py::TestTypes::test_warning_init",
"test/test_pebble.py::TestTypes::test_warning_state",
"test/test_pebble.py::TestPlan::test_checks",
"test/test_pebble.py::TestPlan::test_no_args",
"test/test_pebble.py::TestPlan::test_service_equality",
"test/test_pebble.py::TestPlan::test_services",
"test/test_pebble.py::TestPlan::test_yaml",
"test/test_pebble.py::TestLayer::test_dict",
"test/test_pebble.py::TestLayer::test_layer_service_equality",
"test/test_pebble.py::TestLayer::test_no_args",
"test/test_pebble.py::TestLayer::test_yaml",
"test/test_pebble.py::TestService::test_dict",
"test/test_pebble.py::TestService::test_equality",
"test/test_pebble.py::TestService::test_name_only",
"test/test_pebble.py::TestCheck::test_dict",
"test/test_pebble.py::TestCheck::test_equality",
"test/test_pebble.py::TestCheck::test_level_raw",
"test/test_pebble.py::TestCheck::test_name_only",
"test/test_pebble.py::TestServiceInfo::test_is_running",
"test/test_pebble.py::TestServiceInfo::test_service_info",
"test/test_pebble.py::TestServiceInfo::test_service_startup",
"test/test_pebble.py::TestServiceInfo::test_service_status",
"test/test_pebble.py::TestCheckInfo::test_check_info",
"test/test_pebble.py::TestCheckInfo::test_check_level",
"test/test_pebble.py::TestCheckInfo::test_check_status",
"test/test_pebble.py::TestMultipartParser::test_multipart_parser",
"test/test_pebble.py::TestClient::test_abort_change",
"test/test_pebble.py::TestClient::test_ack_warnings",
"test/test_pebble.py::TestClient::test_add_layer",
"test/test_pebble.py::TestClient::test_add_layer_invalid_type",
"test/test_pebble.py::TestClient::test_autostart_services",
"test/test_pebble.py::TestClient::test_autostart_services_async",
"test/test_pebble.py::TestClient::test_change_error",
"test/test_pebble.py::TestClient::test_checklevel_conversion",
"test/test_pebble.py::TestClient::test_client_init",
"test/test_pebble.py::TestClient::test_get_change",
"test/test_pebble.py::TestClient::test_get_changes",
"test/test_pebble.py::TestClient::test_get_checks_all",
"test/test_pebble.py::TestClient::test_get_checks_filters",
"test/test_pebble.py::TestClient::test_get_plan",
"test/test_pebble.py::TestClient::test_get_services_all",
"test/test_pebble.py::TestClient::test_get_services_names",
"test/test_pebble.py::TestClient::test_get_system_info",
"test/test_pebble.py::TestClient::test_get_warnings",
"test/test_pebble.py::TestClient::test_list_files_itself",
"test/test_pebble.py::TestClient::test_list_files_path",
"test/test_pebble.py::TestClient::test_list_files_pattern",
"test/test_pebble.py::TestClient::test_make_dir_all_options",
"test/test_pebble.py::TestClient::test_make_dir_basic",
"test/test_pebble.py::TestClient::test_make_dir_error",
"test/test_pebble.py::TestClient::test_pull_binary",
"test/test_pebble.py::TestClient::test_pull_boundary_spanning_chunk",
"test/test_pebble.py::TestClient::test_pull_path_error",
"test/test_pebble.py::TestClient::test_pull_protocol_errors",
"test/test_pebble.py::TestClient::test_pull_text",
"test/test_pebble.py::TestClient::test_push_all_options",
"test/test_pebble.py::TestClient::test_push_binary",
"test/test_pebble.py::TestClient::test_push_bytes",
"test/test_pebble.py::TestClient::test_push_path_error",
"test/test_pebble.py::TestClient::test_push_str",
"test/test_pebble.py::TestClient::test_push_text",
"test/test_pebble.py::TestClient::test_push_uid_gid",
"test/test_pebble.py::TestClient::test_remove_path_basic",
"test/test_pebble.py::TestClient::test_remove_path_error",
"test/test_pebble.py::TestClient::test_remove_path_recursive",
"test/test_pebble.py::TestClient::test_replan_services",
"test/test_pebble.py::TestClient::test_replan_services_async",
"test/test_pebble.py::TestClient::test_restart_services",
"test/test_pebble.py::TestClient::test_restart_services_async",
"test/test_pebble.py::TestClient::test_send_signal_name",
"test/test_pebble.py::TestClient::test_send_signal_number",
"test/test_pebble.py::TestClient::test_send_signal_type_error",
"test/test_pebble.py::TestClient::test_start_services",
"test/test_pebble.py::TestClient::test_start_services_async",
"test/test_pebble.py::TestClient::test_stop_services",
"test/test_pebble.py::TestClient::test_stop_services_async",
"test/test_pebble.py::TestClient::test_wait_change_error",
"test/test_pebble.py::TestClient::test_wait_change_success",
"test/test_pebble.py::TestClient::test_wait_change_success_multiple_calls",
"test/test_pebble.py::TestClient::test_wait_change_success_polled",
"test/test_pebble.py::TestClient::test_wait_change_success_polled_timeout_none",
"test/test_pebble.py::TestClient::test_wait_change_success_timeout_none",
"test/test_pebble.py::TestClient::test_wait_change_timeout",
"test/test_pebble.py::TestClient::test_wait_change_timeout_polled",
"test/test_pebble.py::TestSocketClient::test_real_client",
"test/test_pebble.py::TestSocketClient::test_socket_not_found",
"test/test_pebble.py::TestExecError::test_init",
"test/test_pebble.py::TestExecError::test_str",
"test/test_pebble.py::TestExecError::test_str_truncated",
"test/test_pebble.py::TestExec::test_arg_errors",
"test/test_pebble.py::TestExec::test_connect_websocket_error",
"test/test_pebble.py::TestExec::test_no_wait_call",
"test/test_pebble.py::TestExec::test_send_signal",
"test/test_pebble.py::TestExec::test_wait_change_error",
"test/test_pebble.py::TestExec::test_wait_exit_nonzero",
"test/test_pebble.py::TestExec::test_wait_exit_zero",
"test/test_pebble.py::TestExec::test_wait_file_io",
"test/test_pebble.py::TestExec::test_wait_other_args",
"test/test_pebble.py::TestExec::test_wait_output",
"test/test_pebble.py::TestExec::test_wait_output_bad_command",
"test/test_pebble.py::TestExec::test_wait_output_bytes",
"test/test_pebble.py::TestExec::test_wait_output_combine_stderr",
"test/test_pebble.py::TestExec::test_wait_output_exit_nonzero",
"test/test_pebble.py::TestExec::test_wait_output_exit_nonzero_combine_stderr",
"test/test_pebble.py::TestExec::test_wait_output_send_stdin",
"test/test_pebble.py::TestExec::test_wait_output_send_stdin_bytes",
"test/test_pebble.py::TestExec::test_wait_passed_output",
"test/test_pebble.py::TestExec::test_wait_passed_output_bad_command",
"test/test_pebble.py::TestExec::test_wait_passed_output_bytes",
"test/test_pebble.py::TestExec::test_wait_passed_output_combine_stderr",
"test/test_pebble.py::TestExec::test_wait_returned_io",
"test/test_pebble.py::TestExec::test_wait_returned_io_bytes",
"test/test_pebble.py::TestExec::test_wait_timeout",
"test/test_pebble.py::TestExec::test_websocket_recv_raises",
"test/test_pebble.py::TestExec::test_websocket_send_raises"
]
| {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2022-11-27 23:22:20+00:00 | apache-2.0 | 1,496 |
|
canonical__operator-900 | diff --git a/ops/framework.py b/ops/framework.py
index 0ba99e1..c9da35e 100755
--- a/ops/framework.py
+++ b/ops/framework.py
@@ -549,7 +549,7 @@ _event_regex = r'^(|.*/)on/[a-zA-Z_]+\[\d+\]$'
class Framework(Object):
- """Main interface to from the Charm to the Operator Framework internals."""
+ """Main interface from the Charm to the Operator Framework internals."""
on = FrameworkEvents()
diff --git a/ops/model.py b/ops/model.py
index 7bf5576..141bd4f 100644
--- a/ops/model.py
+++ b/ops/model.py
@@ -2926,7 +2926,7 @@ class _ModelBackend:
args = [] # type: List[str]
if id is not None:
args.append(id)
- if label is not None:
+ elif label is not None: # elif because Juju secret-info-get doesn't allow id and label
args.extend(['--label', label])
result = self._run_for_secret('secret-info-get', *args, return_output=True, use_json=True)
info_dicts = typing.cast(Dict[str, 'JsonObject'], result)
| canonical/operator | e33ad4f221dd34cba5c81d3fd9eee868a6d866a7 | diff --git a/ops/testing.py b/ops/testing.py
index 7082976..cf8912a 100755
--- a/ops/testing.py
+++ b/ops/testing.py
@@ -1116,7 +1116,7 @@ class Harness(Generic[CharmType]):
self._charm.on.leader_elected.emit()
def set_planned_units(self, num_units: int) -> None:
- """Set the number of "planned" units that "Application.planned_units" should return.
+ """Set the number of "planned" units that "Application.planned_units" should return.
In real world circumstances, this number will be the number of units in the
application. E.g., this number will be the number of peers this unit has, plus one, as we
diff --git a/test/test_model.py b/test/test_model.py
index 7859feb..976fb77 100755
--- a/test/test_model.py
+++ b/test/test_model.py
@@ -2993,14 +2993,34 @@ class TestSecretClass(unittest.TestCase):
def test_get_info(self):
fake_script(self, 'secret-info-get', """echo '{"x": {"label": "y", "revision": 7}}'""")
+ # Secret with ID only
+ secret = self.make_secret(id='x')
+ info = secret.get_info()
+ self.assertEqual(info.id, 'secret:x')
+ self.assertEqual(info.label, 'y')
+ self.assertEqual(info.revision, 7)
+
+ # Secret with label only
+ secret = self.make_secret(label='y')
+ info = secret.get_info()
+ self.assertEqual(info.id, 'secret:x')
+ self.assertEqual(info.label, 'y')
+ self.assertEqual(info.revision, 7)
+
+ # Secret with ID and label
secret = self.make_secret(id='x', label='y')
info = secret.get_info()
self.assertEqual(info.id, 'secret:x')
self.assertEqual(info.label, 'y')
self.assertEqual(info.revision, 7)
- self.assertEqual(fake_script_calls(self, clear=True),
- [['secret-info-get', 'secret:x', '--label', 'y', '--format=json']])
+ self.assertEqual(
+ fake_script_calls(self, clear=True),
+ [
+ ['secret-info-get', 'secret:x', '--format=json'],
+ ['secret-info-get', '--label', 'y', '--format=json'],
+ ['secret-info-get', 'secret:x', '--format=json'],
+ ])
def test_set_content(self):
fake_script(self, 'secret-set', """exit 0""")
| uncaught error in `Secret.get_info()`
the following error appears while running the `owner` charm in [this demo bundle](https://github.com/PietroPasotti/secrets-demo-charms/tree/main)

| 0.0 | e33ad4f221dd34cba5c81d3fd9eee868a6d866a7 | [
"test/test_model.py::TestSecretClass::test_get_info"
]
| [
"test/test_model.py::TestModel::test_active_message_default",
"test/test_model.py::TestModel::test_app_immutable",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_leader",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_minion",
"test/test_model.py::TestModel::test_base_status_instance_raises",
"test/test_model.py::TestModel::test_config",
"test/test_model.py::TestModel::test_config_immutable",
"test/test_model.py::TestModel::test_get_app_relation_data",
"test/test_model.py::TestModel::test_get_relation",
"test/test_model.py::TestModel::test_invalid_type_relation_data",
"test/test_model.py::TestModel::test_is_leader",
"test/test_model.py::TestModel::test_local_set_valid_app_status",
"test/test_model.py::TestModel::test_local_set_valid_unit_status",
"test/test_model.py::TestModel::test_model_attributes",
"test/test_model.py::TestModel::test_model_name_from_backend",
"test/test_model.py::TestModel::test_our_unit_is_our",
"test/test_model.py::TestModel::test_peer_relation_app",
"test/test_model.py::TestModel::test_pod_immutable",
"test/test_model.py::TestModel::test_pod_spec",
"test/test_model.py::TestModel::test_relation_data_access_peer_leader",
"test/test_model.py::TestModel::test_relation_data_access_peer_minion",
"test/test_model.py::TestModel::test_relation_data_del_key",
"test/test_model.py::TestModel::test_relation_data_del_missing_key",
"test/test_model.py::TestModel::test_relation_data_modify_our",
"test/test_model.py::TestModel::test_relation_data_modify_remote",
"test/test_model.py::TestModel::test_relation_data_type_check",
"test/test_model.py::TestModel::test_relation_local_app_data_readability_follower",
"test/test_model.py::TestModel::test_relation_local_app_data_readability_leader",
"test/test_model.py::TestModel::test_relation_no_units",
"test/test_model.py::TestModel::test_relation_set_fail",
"test/test_model.py::TestModel::test_relations_immutable",
"test/test_model.py::TestModel::test_relations_keys",
"test/test_model.py::TestModel::test_remote_app_relation_data",
"test/test_model.py::TestModel::test_remote_app_status",
"test/test_model.py::TestModel::test_remote_unit_status",
"test/test_model.py::TestModel::test_remote_units_is_our",
"test/test_model.py::TestModel::test_resources",
"test/test_model.py::TestModel::test_resources_immutable",
"test/test_model.py::TestModel::test_run_error",
"test/test_model.py::TestModel::test_set_app_status_invalid",
"test/test_model.py::TestModel::test_set_app_status_non_leader_raises",
"test/test_model.py::TestModel::test_set_unit_status_invalid",
"test/test_model.py::TestModel::test_status_eq",
"test/test_model.py::TestModel::test_status_repr",
"test/test_model.py::TestModel::test_storage",
"test/test_model.py::TestModel::test_storages_immutable",
"test/test_model.py::TestModel::test_unit_immutable",
"test/test_model.py::TestModel::test_unit_relation_data",
"test/test_model.py::TestModel::test_workload_version",
"test/test_model.py::TestModel::test_workload_version_invalid",
"test/test_model.py::test_recursive_list[case0]",
"test/test_model.py::test_recursive_list[case1]",
"test/test_model.py::test_recursive_list[case2]",
"test/test_model.py::test_recursive_push_and_pull[case0]",
"test/test_model.py::test_recursive_push_and_pull[case1]",
"test/test_model.py::test_recursive_push_and_pull[case2]",
"test/test_model.py::test_recursive_push_and_pull[case3]",
"test/test_model.py::test_recursive_push_and_pull[case4]",
"test/test_model.py::test_recursive_push_and_pull[case5]",
"test/test_model.py::test_recursive_push_and_pull[case6]",
"test/test_model.py::test_recursive_push_and_pull[case7]",
"test/test_model.py::test_recursive_push_and_pull[case8]",
"test/test_model.py::TestApplication::test_mocked_get_services",
"test/test_model.py::TestApplication::test_planned_units",
"test/test_model.py::TestApplication::test_planned_units_garbage_values",
"test/test_model.py::TestApplication::test_planned_units_override",
"test/test_model.py::TestApplication::test_planned_units_user_set",
"test/test_model.py::TestContainers::test_unit_containers",
"test/test_model.py::TestContainers::test_unit_get_container",
"test/test_model.py::TestContainerPebble::test_add_layer",
"test/test_model.py::TestContainerPebble::test_autostart",
"test/test_model.py::TestContainerPebble::test_can_connect",
"test/test_model.py::TestContainerPebble::test_can_connect_api_error",
"test/test_model.py::TestContainerPebble::test_can_connect_connection_error",
"test/test_model.py::TestContainerPebble::test_can_connect_file_not_found_error",
"test/test_model.py::TestContainerPebble::test_can_connect_simple",
"test/test_model.py::TestContainerPebble::test_exec",
"test/test_model.py::TestContainerPebble::test_get_check",
"test/test_model.py::TestContainerPebble::test_get_checks",
"test/test_model.py::TestContainerPebble::test_get_plan",
"test/test_model.py::TestContainerPebble::test_get_service",
"test/test_model.py::TestContainerPebble::test_get_services",
"test/test_model.py::TestContainerPebble::test_list_files",
"test/test_model.py::TestContainerPebble::test_make_dir",
"test/test_model.py::TestContainerPebble::test_pull",
"test/test_model.py::TestContainerPebble::test_push",
"test/test_model.py::TestContainerPebble::test_remove_path",
"test/test_model.py::TestContainerPebble::test_replan",
"test/test_model.py::TestContainerPebble::test_restart",
"test/test_model.py::TestContainerPebble::test_restart_fallback",
"test/test_model.py::TestContainerPebble::test_restart_fallback_non_400_error",
"test/test_model.py::TestContainerPebble::test_restart_no_arguments",
"test/test_model.py::TestContainerPebble::test_send_signal",
"test/test_model.py::TestContainerPebble::test_socket_path",
"test/test_model.py::TestContainerPebble::test_start",
"test/test_model.py::TestContainerPebble::test_start_no_arguments",
"test/test_model.py::TestContainerPebble::test_stop",
"test/test_model.py::TestContainerPebble::test_stop_no_arguments",
"test/test_model.py::TestContainerPebble::test_type_errors",
"test/test_model.py::TestModelBindings::test_binding_by_relation",
"test/test_model.py::TestModelBindings::test_binding_by_relation_name",
"test/test_model.py::TestModelBindings::test_binding_no_iface_name",
"test/test_model.py::TestModelBindings::test_dead_relations",
"test/test_model.py::TestModelBindings::test_empty_bind_addresses",
"test/test_model.py::TestModelBindings::test_empty_interface_info",
"test/test_model.py::TestModelBindings::test_invalid_keys",
"test/test_model.py::TestModelBindings::test_missing_bind_addresses",
"test/test_model.py::TestModelBindings::test_missing_egress_subnets",
"test/test_model.py::TestModelBindings::test_missing_ingress_addresses",
"test/test_model.py::TestModelBindings::test_no_bind_addresses",
"test/test_model.py::TestModelBindings::test_unresolved_ingress_addresses",
"test/test_model.py::TestModelBackend::test_action_fail",
"test/test_model.py::TestModelBackend::test_action_get",
"test/test_model.py::TestModelBackend::test_action_get_error",
"test/test_model.py::TestModelBackend::test_action_log",
"test/test_model.py::TestModelBackend::test_action_log_error",
"test/test_model.py::TestModelBackend::test_action_set",
"test/test_model.py::TestModelBackend::test_action_set_dotted_dict",
"test/test_model.py::TestModelBackend::test_action_set_duplicated_keys",
"test/test_model.py::TestModelBackend::test_action_set_error",
"test/test_model.py::TestModelBackend::test_action_set_key_validation",
"test/test_model.py::TestModelBackend::test_action_set_more_nested",
"test/test_model.py::TestModelBackend::test_action_set_nested",
"test/test_model.py::TestModelBackend::test_application_version_set",
"test/test_model.py::TestModelBackend::test_application_version_set_invalid",
"test/test_model.py::TestModelBackend::test_invalid_metric_label_values",
"test/test_model.py::TestModelBackend::test_invalid_metric_labels",
"test/test_model.py::TestModelBackend::test_invalid_metric_names",
"test/test_model.py::TestModelBackend::test_invalid_metric_values",
"test/test_model.py::TestModelBackend::test_is_leader_refresh",
"test/test_model.py::TestModelBackend::test_juju_log",
"test/test_model.py::TestModelBackend::test_local_get_status",
"test/test_model.py::TestModelBackend::test_local_set_invalid_status",
"test/test_model.py::TestModelBackend::test_network_get",
"test/test_model.py::TestModelBackend::test_network_get_errors",
"test/test_model.py::TestModelBackend::test_relation_get_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_get_set_is_app_arg",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_env",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_errors",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_success",
"test/test_model.py::TestModelBackend::test_relation_set_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_tool_errors",
"test/test_model.py::TestModelBackend::test_status_get",
"test/test_model.py::TestModelBackend::test_status_is_app_forced_kwargs",
"test/test_model.py::TestModelBackend::test_status_set_is_app_not_bool_raises",
"test/test_model.py::TestModelBackend::test_storage_tool_errors",
"test/test_model.py::TestModelBackend::test_valid_metrics",
"test/test_model.py::TestLazyMapping::test_invalidate",
"test/test_model.py::TestSecrets::test_add_secret_errors",
"test/test_model.py::TestSecrets::test_app_add_secret_args",
"test/test_model.py::TestSecrets::test_app_add_secret_simple",
"test/test_model.py::TestSecrets::test_get_secret_id",
"test/test_model.py::TestSecrets::test_get_secret_id_and_label",
"test/test_model.py::TestSecrets::test_get_secret_no_args",
"test/test_model.py::TestSecrets::test_get_secret_not_found",
"test/test_model.py::TestSecrets::test_get_secret_other_error",
"test/test_model.py::TestSecrets::test_unit_add_secret_args",
"test/test_model.py::TestSecrets::test_unit_add_secret_errors",
"test/test_model.py::TestSecrets::test_unit_add_secret_simple",
"test/test_model.py::TestSecretInfo::test_from_dict",
"test/test_model.py::TestSecretInfo::test_init",
"test/test_model.py::TestSecretClass::test_get_content_cached",
"test/test_model.py::TestSecretClass::test_get_content_refresh",
"test/test_model.py::TestSecretClass::test_get_content_uncached",
"test/test_model.py::TestSecretClass::test_grant",
"test/test_model.py::TestSecretClass::test_id_and_label",
"test/test_model.py::TestSecretClass::test_peek_content",
"test/test_model.py::TestSecretClass::test_remove_all_revisions",
"test/test_model.py::TestSecretClass::test_remove_revision",
"test/test_model.py::TestSecretClass::test_revoke",
"test/test_model.py::TestSecretClass::test_set_content",
"test/test_model.py::TestSecretClass::test_set_info"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_media",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2023-02-03 17:31:50+00:00 | apache-2.0 | 1,497 |
|
canonical__operator-903 | diff --git a/ops/model.py b/ops/model.py
index 141bd4f..2ab4ff8 100644
--- a/ops/model.py
+++ b/ops/model.py
@@ -276,6 +276,9 @@ class Model:
"""
if not (id or label):
raise TypeError('Must provide an id or label, or both')
+ if id is not None:
+ # Canonicalize to "secret:<id>" form for consistency in backend calls.
+ id = Secret._canonicalize_id(id)
try:
content = self._backend.secret_get(id=id, label=label)
return Secret(self._backend, id=id, label=label, content=content)
| canonical/operator | 2cbee00aa38919d52525a143303332745e3de40c | diff --git a/test/test_model.py b/test/test_model.py
index 976fb77..35df7ea 100755
--- a/test/test_model.py
+++ b/test/test_model.py
@@ -2841,7 +2841,18 @@ class TestSecrets(unittest.TestCase):
self.assertEqual(secret.get_content(), {'foo': 'g'})
self.assertEqual(fake_script_calls(self, clear=True),
- [['secret-get', '123', '--format=json']])
+ [['secret-get', 'secret:123', '--format=json']])
+
+ def test_get_secret_label(self):
+ fake_script(self, 'secret-get', """echo '{"foo": "g"}'""")
+
+ secret = self.model.get_secret(label='lbl')
+ self.assertIsNone(secret.id)
+ self.assertEqual(secret.label, 'lbl')
+ self.assertEqual(secret.get_content(), {'foo': 'g'})
+
+ self.assertEqual(fake_script_calls(self, clear=True),
+ [['secret-get', '--label', 'lbl', '--format=json']])
def test_get_secret_id_and_label(self):
fake_script(self, 'secret-get', """echo '{"foo": "h"}'""")
@@ -2852,7 +2863,7 @@ class TestSecrets(unittest.TestCase):
self.assertEqual(secret.get_content(), {'foo': 'h'})
self.assertEqual(fake_script_calls(self, clear=True),
- [['secret-get', '123', '--label', 'l', '--format=json']])
+ [['secret-get', 'secret:123', '--label', 'l', '--format=json']])
def test_get_secret_no_args(self):
with self.assertRaises(TypeError):
| secret id is not always canonicalized before calling secret_get
Secret canonicalizes the ID it receives upon initialization.
However, before the Secret object is initialized, the ID is used to call secret_get. This results in the ID being sometimes canonicalized, sometimes not, depending on the caller.
For example:
```
self.model.get_secret(id="foo").get_content()
# resulting call: secret-get --id foo
self.model.get_secret(id="foo").peek_content()
# resulting call: secret-get --id secret:foo --peek
```
This is not a problem per se, but it might mean headache in the future. | 0.0 | 2cbee00aa38919d52525a143303332745e3de40c | [
"test/test_model.py::TestSecrets::test_get_secret_id",
"test/test_model.py::TestSecrets::test_get_secret_id_and_label"
]
| [
"test/test_model.py::TestModel::test_active_message_default",
"test/test_model.py::TestModel::test_app_immutable",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_leader",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_minion",
"test/test_model.py::TestModel::test_base_status_instance_raises",
"test/test_model.py::TestModel::test_config",
"test/test_model.py::TestModel::test_config_immutable",
"test/test_model.py::TestModel::test_get_app_relation_data",
"test/test_model.py::TestModel::test_get_relation",
"test/test_model.py::TestModel::test_invalid_type_relation_data",
"test/test_model.py::TestModel::test_is_leader",
"test/test_model.py::TestModel::test_local_set_valid_app_status",
"test/test_model.py::TestModel::test_local_set_valid_unit_status",
"test/test_model.py::TestModel::test_model_attributes",
"test/test_model.py::TestModel::test_model_name_from_backend",
"test/test_model.py::TestModel::test_our_unit_is_our",
"test/test_model.py::TestModel::test_peer_relation_app",
"test/test_model.py::TestModel::test_pod_immutable",
"test/test_model.py::TestModel::test_pod_spec",
"test/test_model.py::TestModel::test_relation_data_access_peer_leader",
"test/test_model.py::TestModel::test_relation_data_access_peer_minion",
"test/test_model.py::TestModel::test_relation_data_del_key",
"test/test_model.py::TestModel::test_relation_data_del_missing_key",
"test/test_model.py::TestModel::test_relation_data_modify_our",
"test/test_model.py::TestModel::test_relation_data_modify_remote",
"test/test_model.py::TestModel::test_relation_data_type_check",
"test/test_model.py::TestModel::test_relation_local_app_data_readability_follower",
"test/test_model.py::TestModel::test_relation_local_app_data_readability_leader",
"test/test_model.py::TestModel::test_relation_no_units",
"test/test_model.py::TestModel::test_relation_set_fail",
"test/test_model.py::TestModel::test_relations_immutable",
"test/test_model.py::TestModel::test_relations_keys",
"test/test_model.py::TestModel::test_remote_app_relation_data",
"test/test_model.py::TestModel::test_remote_app_status",
"test/test_model.py::TestModel::test_remote_unit_status",
"test/test_model.py::TestModel::test_remote_units_is_our",
"test/test_model.py::TestModel::test_resources",
"test/test_model.py::TestModel::test_resources_immutable",
"test/test_model.py::TestModel::test_run_error",
"test/test_model.py::TestModel::test_set_app_status_invalid",
"test/test_model.py::TestModel::test_set_app_status_non_leader_raises",
"test/test_model.py::TestModel::test_set_unit_status_invalid",
"test/test_model.py::TestModel::test_status_eq",
"test/test_model.py::TestModel::test_status_repr",
"test/test_model.py::TestModel::test_storage",
"test/test_model.py::TestModel::test_storages_immutable",
"test/test_model.py::TestModel::test_unit_immutable",
"test/test_model.py::TestModel::test_unit_relation_data",
"test/test_model.py::TestModel::test_workload_version",
"test/test_model.py::TestModel::test_workload_version_invalid",
"test/test_model.py::test_recursive_list[case0]",
"test/test_model.py::test_recursive_list[case1]",
"test/test_model.py::test_recursive_list[case2]",
"test/test_model.py::test_recursive_push_and_pull[case0]",
"test/test_model.py::test_recursive_push_and_pull[case1]",
"test/test_model.py::test_recursive_push_and_pull[case2]",
"test/test_model.py::test_recursive_push_and_pull[case3]",
"test/test_model.py::test_recursive_push_and_pull[case4]",
"test/test_model.py::test_recursive_push_and_pull[case5]",
"test/test_model.py::test_recursive_push_and_pull[case6]",
"test/test_model.py::test_recursive_push_and_pull[case7]",
"test/test_model.py::test_recursive_push_and_pull[case8]",
"test/test_model.py::TestApplication::test_mocked_get_services",
"test/test_model.py::TestApplication::test_planned_units",
"test/test_model.py::TestApplication::test_planned_units_garbage_values",
"test/test_model.py::TestApplication::test_planned_units_override",
"test/test_model.py::TestApplication::test_planned_units_user_set",
"test/test_model.py::TestContainers::test_unit_containers",
"test/test_model.py::TestContainers::test_unit_get_container",
"test/test_model.py::TestContainerPebble::test_add_layer",
"test/test_model.py::TestContainerPebble::test_autostart",
"test/test_model.py::TestContainerPebble::test_can_connect",
"test/test_model.py::TestContainerPebble::test_can_connect_api_error",
"test/test_model.py::TestContainerPebble::test_can_connect_connection_error",
"test/test_model.py::TestContainerPebble::test_can_connect_file_not_found_error",
"test/test_model.py::TestContainerPebble::test_can_connect_simple",
"test/test_model.py::TestContainerPebble::test_exec",
"test/test_model.py::TestContainerPebble::test_get_check",
"test/test_model.py::TestContainerPebble::test_get_checks",
"test/test_model.py::TestContainerPebble::test_get_plan",
"test/test_model.py::TestContainerPebble::test_get_service",
"test/test_model.py::TestContainerPebble::test_get_services",
"test/test_model.py::TestContainerPebble::test_list_files",
"test/test_model.py::TestContainerPebble::test_make_dir",
"test/test_model.py::TestContainerPebble::test_pull",
"test/test_model.py::TestContainerPebble::test_push",
"test/test_model.py::TestContainerPebble::test_remove_path",
"test/test_model.py::TestContainerPebble::test_replan",
"test/test_model.py::TestContainerPebble::test_restart",
"test/test_model.py::TestContainerPebble::test_restart_fallback",
"test/test_model.py::TestContainerPebble::test_restart_fallback_non_400_error",
"test/test_model.py::TestContainerPebble::test_restart_no_arguments",
"test/test_model.py::TestContainerPebble::test_send_signal",
"test/test_model.py::TestContainerPebble::test_socket_path",
"test/test_model.py::TestContainerPebble::test_start",
"test/test_model.py::TestContainerPebble::test_start_no_arguments",
"test/test_model.py::TestContainerPebble::test_stop",
"test/test_model.py::TestContainerPebble::test_stop_no_arguments",
"test/test_model.py::TestContainerPebble::test_type_errors",
"test/test_model.py::TestModelBindings::test_binding_by_relation",
"test/test_model.py::TestModelBindings::test_binding_by_relation_name",
"test/test_model.py::TestModelBindings::test_binding_no_iface_name",
"test/test_model.py::TestModelBindings::test_dead_relations",
"test/test_model.py::TestModelBindings::test_empty_bind_addresses",
"test/test_model.py::TestModelBindings::test_empty_interface_info",
"test/test_model.py::TestModelBindings::test_invalid_keys",
"test/test_model.py::TestModelBindings::test_missing_bind_addresses",
"test/test_model.py::TestModelBindings::test_missing_egress_subnets",
"test/test_model.py::TestModelBindings::test_missing_ingress_addresses",
"test/test_model.py::TestModelBindings::test_no_bind_addresses",
"test/test_model.py::TestModelBindings::test_unresolved_ingress_addresses",
"test/test_model.py::TestModelBackend::test_action_fail",
"test/test_model.py::TestModelBackend::test_action_get",
"test/test_model.py::TestModelBackend::test_action_get_error",
"test/test_model.py::TestModelBackend::test_action_log",
"test/test_model.py::TestModelBackend::test_action_log_error",
"test/test_model.py::TestModelBackend::test_action_set",
"test/test_model.py::TestModelBackend::test_action_set_dotted_dict",
"test/test_model.py::TestModelBackend::test_action_set_duplicated_keys",
"test/test_model.py::TestModelBackend::test_action_set_error",
"test/test_model.py::TestModelBackend::test_action_set_key_validation",
"test/test_model.py::TestModelBackend::test_action_set_more_nested",
"test/test_model.py::TestModelBackend::test_action_set_nested",
"test/test_model.py::TestModelBackend::test_application_version_set",
"test/test_model.py::TestModelBackend::test_application_version_set_invalid",
"test/test_model.py::TestModelBackend::test_invalid_metric_label_values",
"test/test_model.py::TestModelBackend::test_invalid_metric_labels",
"test/test_model.py::TestModelBackend::test_invalid_metric_names",
"test/test_model.py::TestModelBackend::test_invalid_metric_values",
"test/test_model.py::TestModelBackend::test_is_leader_refresh",
"test/test_model.py::TestModelBackend::test_juju_log",
"test/test_model.py::TestModelBackend::test_local_get_status",
"test/test_model.py::TestModelBackend::test_local_set_invalid_status",
"test/test_model.py::TestModelBackend::test_network_get",
"test/test_model.py::TestModelBackend::test_network_get_errors",
"test/test_model.py::TestModelBackend::test_relation_get_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_get_set_is_app_arg",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_env",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_errors",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_success",
"test/test_model.py::TestModelBackend::test_relation_set_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_tool_errors",
"test/test_model.py::TestModelBackend::test_status_get",
"test/test_model.py::TestModelBackend::test_status_is_app_forced_kwargs",
"test/test_model.py::TestModelBackend::test_status_set_is_app_not_bool_raises",
"test/test_model.py::TestModelBackend::test_storage_tool_errors",
"test/test_model.py::TestModelBackend::test_valid_metrics",
"test/test_model.py::TestLazyMapping::test_invalidate",
"test/test_model.py::TestSecrets::test_add_secret_errors",
"test/test_model.py::TestSecrets::test_app_add_secret_args",
"test/test_model.py::TestSecrets::test_app_add_secret_simple",
"test/test_model.py::TestSecrets::test_get_secret_label",
"test/test_model.py::TestSecrets::test_get_secret_no_args",
"test/test_model.py::TestSecrets::test_get_secret_not_found",
"test/test_model.py::TestSecrets::test_get_secret_other_error",
"test/test_model.py::TestSecrets::test_unit_add_secret_args",
"test/test_model.py::TestSecrets::test_unit_add_secret_errors",
"test/test_model.py::TestSecrets::test_unit_add_secret_simple",
"test/test_model.py::TestSecretInfo::test_from_dict",
"test/test_model.py::TestSecretInfo::test_init",
"test/test_model.py::TestSecretClass::test_get_content_cached",
"test/test_model.py::TestSecretClass::test_get_content_refresh",
"test/test_model.py::TestSecretClass::test_get_content_uncached",
"test/test_model.py::TestSecretClass::test_get_info",
"test/test_model.py::TestSecretClass::test_grant",
"test/test_model.py::TestSecretClass::test_id_and_label",
"test/test_model.py::TestSecretClass::test_peek_content",
"test/test_model.py::TestSecretClass::test_remove_all_revisions",
"test/test_model.py::TestSecretClass::test_remove_revision",
"test/test_model.py::TestSecretClass::test_revoke",
"test/test_model.py::TestSecretClass::test_set_content",
"test/test_model.py::TestSecretClass::test_set_info"
]
| {
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2023-02-08 23:42:34+00:00 | apache-2.0 | 1,498 |
|
canonical__operator-949 | diff --git a/ops/model.py b/ops/model.py
index 7d24ec5..3458c77 100644
--- a/ops/model.py
+++ b/ops/model.py
@@ -2093,7 +2093,7 @@ class Container:
def local_list(source_path: Path) -> List[pebble.FileInfo]:
paths = source_path.iterdir() if source_path.is_dir() else [source_path]
- files = [self._build_fileinfo(source_path / f) for f in paths]
+ files = [self._build_fileinfo(f) for f in paths]
return files
errors: List[Tuple[str, Exception]] = []
@@ -2256,7 +2256,7 @@ class Container:
# /src --> /dst/src
file_path, source_path, dest_dir = Path(file_path), Path(source_path), Path(dest_dir)
prefix = str(source_path.parent)
- if os.path.commonprefix([prefix, str(file_path)]) != prefix:
+ if prefix != '.' and os.path.commonprefix([prefix, str(file_path)]) != prefix:
raise RuntimeError(
f'file "{file_path}" does not have specified prefix "{prefix}"')
path_suffix = os.path.relpath(str(file_path), prefix)
| canonical/operator | 37650135082030830c6420a404933bdc500d26c1 | diff --git a/test/test_model.py b/test/test_model.py
index 7b7344a..97b0c9c 100755
--- a/test/test_model.py
+++ b/test/test_model.py
@@ -1174,6 +1174,64 @@ def test_recursive_push_and_pull(case):
assert c.exists(fpath), f'pull_path failed: file {fpath} missing at destination'
[email protected]('case', [
+ PushPullCase(
+ name='push directory without trailing slash',
+ path='foo',
+ dst='/baz',
+ files=['foo/bar/baz.txt', 'foo/foobar.txt'],
+ want={'/baz/foo/foobar.txt', '/baz/foo/bar/baz.txt'},
+ ),
+ PushPullCase(
+ name='push directory with trailing slash',
+ path='foo/',
+ dst='/baz',
+ files=['foo/bar/baz.txt', 'foo/foobar.txt'],
+ want={'/baz/foo/foobar.txt', '/baz/foo/bar/baz.txt'},
+ ),
+ PushPullCase(
+ name='push directory relative pathing',
+ path='./foo',
+ dst='/baz',
+ files=['foo/bar/baz.txt', 'foo/foobar.txt'],
+ want={'/baz/foo/foobar.txt', '/baz/foo/bar/baz.txt'},
+ ),
+])
+def test_push_path_relative(case):
+ harness = ops.testing.Harness(ops.CharmBase, meta='''
+ name: test-app
+ containers:
+ foo:
+ resource: foo-image
+ ''')
+ harness.begin()
+ harness.set_can_connect('foo', True)
+ container = harness.model.unit.containers['foo']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ cwd = os.getcwd()
+ # change working directory to enable relative pathing for testing
+ os.chdir(tmpdir)
+ try:
+ # create test files under temporary test directory
+ tmp = pathlib.Path(tmpdir)
+ for testfile in case.files:
+ testfile_path = pathlib.Path(tmp / testfile)
+ testfile_path.parent.mkdir(parents=True, exist_ok=True)
+ testfile_path.touch(exist_ok=True)
+ testfile_path.write_text("test", encoding="utf-8")
+
+ # push path under test to container
+ container.push_path(case.path, case.dst)
+
+ # test
+ for want_path in case.want:
+ content = container.pull(want_path).read()
+ assert content == 'test'
+ finally:
+ os.chdir(cwd)
+
+
class TestApplication(unittest.TestCase):
def setUp(self):
| Container.push_path duplicates src_path
```python3
def local_list(source_path: Path) -> List[pebble.FileInfo]:
paths = source_path.iterdir() if source_path.is_dir() else [source_path]
files = [self._build_fileinfo(source_path / f) for f in paths]
return files
```
in ops.model, line 2115, the `self.build_fileinfo(source_path / f)` duplicates the source path.
For instance:
```
source_path = Path("templates/config.yaml")
...
files = [self._build_fileinfo(source_path / f) for f in paths]
```
`source_path / f` would be equivalent to `"templates/config.yaml/templates/config.yaml"` which causes `FileNotFoundError`.
| 0.0 | 37650135082030830c6420a404933bdc500d26c1 | [
"test/test_model.py::test_push_path_relative[case0]",
"test/test_model.py::test_push_path_relative[case1]",
"test/test_model.py::test_push_path_relative[case2]"
]
| [
"test/test_model.py::TestModel::test_active_message_default",
"test/test_model.py::TestModel::test_app_immutable",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_leader",
"test/test_model.py::TestModel::test_app_relation_data_modify_local_as_minion",
"test/test_model.py::TestModel::test_base_status_instance_raises",
"test/test_model.py::TestModel::test_config",
"test/test_model.py::TestModel::test_config_immutable",
"test/test_model.py::TestModel::test_get_app_relation_data",
"test/test_model.py::TestModel::test_get_relation",
"test/test_model.py::TestModel::test_invalid_type_relation_data",
"test/test_model.py::TestModel::test_is_leader",
"test/test_model.py::TestModel::test_local_set_valid_app_status",
"test/test_model.py::TestModel::test_local_set_valid_unit_status",
"test/test_model.py::TestModel::test_model_attributes",
"test/test_model.py::TestModel::test_model_name_from_backend",
"test/test_model.py::TestModel::test_our_unit_is_our",
"test/test_model.py::TestModel::test_peer_relation_app",
"test/test_model.py::TestModel::test_pod_immutable",
"test/test_model.py::TestModel::test_pod_spec",
"test/test_model.py::TestModel::test_relation_data_access_peer_leader",
"test/test_model.py::TestModel::test_relation_data_access_peer_minion",
"test/test_model.py::TestModel::test_relation_data_del_key",
"test/test_model.py::TestModel::test_relation_data_del_missing_key",
"test/test_model.py::TestModel::test_relation_data_modify_our",
"test/test_model.py::TestModel::test_relation_data_modify_remote",
"test/test_model.py::TestModel::test_relation_data_type_check",
"test/test_model.py::TestModel::test_relation_local_app_data_readability_follower",
"test/test_model.py::TestModel::test_relation_local_app_data_readability_leader",
"test/test_model.py::TestModel::test_relation_no_units",
"test/test_model.py::TestModel::test_relation_set_fail",
"test/test_model.py::TestModel::test_relations_immutable",
"test/test_model.py::TestModel::test_relations_keys",
"test/test_model.py::TestModel::test_remote_app_relation_data",
"test/test_model.py::TestModel::test_remote_app_status",
"test/test_model.py::TestModel::test_remote_unit_status",
"test/test_model.py::TestModel::test_remote_units_is_our",
"test/test_model.py::TestModel::test_resources",
"test/test_model.py::TestModel::test_resources_immutable",
"test/test_model.py::TestModel::test_run_error",
"test/test_model.py::TestModel::test_set_app_status_invalid",
"test/test_model.py::TestModel::test_set_app_status_non_leader_raises",
"test/test_model.py::TestModel::test_set_unit_status_invalid",
"test/test_model.py::TestModel::test_status_eq",
"test/test_model.py::TestModel::test_status_repr",
"test/test_model.py::TestModel::test_storage",
"test/test_model.py::TestModel::test_storages_immutable",
"test/test_model.py::TestModel::test_unit_immutable",
"test/test_model.py::TestModel::test_unit_relation_data",
"test/test_model.py::TestModel::test_workload_version",
"test/test_model.py::TestModel::test_workload_version_invalid",
"test/test_model.py::test_recursive_list[case0]",
"test/test_model.py::test_recursive_list[case1]",
"test/test_model.py::test_recursive_list[case2]",
"test/test_model.py::test_recursive_push_and_pull[case0]",
"test/test_model.py::test_recursive_push_and_pull[case1]",
"test/test_model.py::test_recursive_push_and_pull[case2]",
"test/test_model.py::test_recursive_push_and_pull[case3]",
"test/test_model.py::test_recursive_push_and_pull[case4]",
"test/test_model.py::test_recursive_push_and_pull[case5]",
"test/test_model.py::test_recursive_push_and_pull[case6]",
"test/test_model.py::test_recursive_push_and_pull[case7]",
"test/test_model.py::test_recursive_push_and_pull[case8]",
"test/test_model.py::TestApplication::test_mocked_get_services",
"test/test_model.py::TestApplication::test_planned_units",
"test/test_model.py::TestApplication::test_planned_units_garbage_values",
"test/test_model.py::TestApplication::test_planned_units_override",
"test/test_model.py::TestApplication::test_planned_units_user_set",
"test/test_model.py::TestContainers::test_unit_containers",
"test/test_model.py::TestContainers::test_unit_get_container",
"test/test_model.py::TestContainerPebble::test_add_layer",
"test/test_model.py::TestContainerPebble::test_autostart",
"test/test_model.py::TestContainerPebble::test_can_connect",
"test/test_model.py::TestContainerPebble::test_can_connect_api_error",
"test/test_model.py::TestContainerPebble::test_can_connect_connection_error",
"test/test_model.py::TestContainerPebble::test_can_connect_file_not_found_error",
"test/test_model.py::TestContainerPebble::test_can_connect_simple",
"test/test_model.py::TestContainerPebble::test_exec",
"test/test_model.py::TestContainerPebble::test_get_check",
"test/test_model.py::TestContainerPebble::test_get_checks",
"test/test_model.py::TestContainerPebble::test_get_plan",
"test/test_model.py::TestContainerPebble::test_get_service",
"test/test_model.py::TestContainerPebble::test_get_services",
"test/test_model.py::TestContainerPebble::test_list_files",
"test/test_model.py::TestContainerPebble::test_make_dir",
"test/test_model.py::TestContainerPebble::test_pull",
"test/test_model.py::TestContainerPebble::test_push",
"test/test_model.py::TestContainerPebble::test_remove_path",
"test/test_model.py::TestContainerPebble::test_replan",
"test/test_model.py::TestContainerPebble::test_restart",
"test/test_model.py::TestContainerPebble::test_restart_fallback",
"test/test_model.py::TestContainerPebble::test_restart_fallback_non_400_error",
"test/test_model.py::TestContainerPebble::test_restart_no_arguments",
"test/test_model.py::TestContainerPebble::test_send_signal",
"test/test_model.py::TestContainerPebble::test_socket_path",
"test/test_model.py::TestContainerPebble::test_start",
"test/test_model.py::TestContainerPebble::test_start_no_arguments",
"test/test_model.py::TestContainerPebble::test_stop",
"test/test_model.py::TestContainerPebble::test_stop_no_arguments",
"test/test_model.py::TestContainerPebble::test_type_errors",
"test/test_model.py::TestModelBindings::test_binding_by_relation",
"test/test_model.py::TestModelBindings::test_binding_by_relation_name",
"test/test_model.py::TestModelBindings::test_binding_no_iface_name",
"test/test_model.py::TestModelBindings::test_dead_relations",
"test/test_model.py::TestModelBindings::test_empty_bind_addresses",
"test/test_model.py::TestModelBindings::test_empty_interface_info",
"test/test_model.py::TestModelBindings::test_invalid_keys",
"test/test_model.py::TestModelBindings::test_missing_bind_addresses",
"test/test_model.py::TestModelBindings::test_missing_egress_subnets",
"test/test_model.py::TestModelBindings::test_missing_ingress_addresses",
"test/test_model.py::TestModelBindings::test_no_bind_addresses",
"test/test_model.py::TestModelBindings::test_unresolved_ingress_addresses",
"test/test_model.py::TestModelBackend::test_action_fail",
"test/test_model.py::TestModelBackend::test_action_get",
"test/test_model.py::TestModelBackend::test_action_get_error",
"test/test_model.py::TestModelBackend::test_action_log",
"test/test_model.py::TestModelBackend::test_action_log_error",
"test/test_model.py::TestModelBackend::test_action_set",
"test/test_model.py::TestModelBackend::test_action_set_dotted_dict",
"test/test_model.py::TestModelBackend::test_action_set_duplicated_keys",
"test/test_model.py::TestModelBackend::test_action_set_error",
"test/test_model.py::TestModelBackend::test_action_set_key_validation",
"test/test_model.py::TestModelBackend::test_action_set_more_nested",
"test/test_model.py::TestModelBackend::test_action_set_nested",
"test/test_model.py::TestModelBackend::test_application_version_set",
"test/test_model.py::TestModelBackend::test_application_version_set_invalid",
"test/test_model.py::TestModelBackend::test_invalid_metric_label_values",
"test/test_model.py::TestModelBackend::test_invalid_metric_labels",
"test/test_model.py::TestModelBackend::test_invalid_metric_names",
"test/test_model.py::TestModelBackend::test_invalid_metric_values",
"test/test_model.py::TestModelBackend::test_is_leader_refresh",
"test/test_model.py::TestModelBackend::test_juju_log",
"test/test_model.py::TestModelBackend::test_local_get_status",
"test/test_model.py::TestModelBackend::test_local_set_invalid_status",
"test/test_model.py::TestModelBackend::test_network_get",
"test/test_model.py::TestModelBackend::test_network_get_errors",
"test/test_model.py::TestModelBackend::test_planned_units",
"test/test_model.py::TestModelBackend::test_relation_get_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_get_set_is_app_arg",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_env",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_errors",
"test/test_model.py::TestModelBackend::test_relation_remote_app_name_script_success",
"test/test_model.py::TestModelBackend::test_relation_set_juju_version_quirks",
"test/test_model.py::TestModelBackend::test_relation_tool_errors",
"test/test_model.py::TestModelBackend::test_status_get",
"test/test_model.py::TestModelBackend::test_status_is_app_forced_kwargs",
"test/test_model.py::TestModelBackend::test_status_set_is_app_not_bool_raises",
"test/test_model.py::TestModelBackend::test_storage_tool_errors",
"test/test_model.py::TestModelBackend::test_valid_metrics",
"test/test_model.py::TestLazyMapping::test_invalidate",
"test/test_model.py::TestSecrets::test_add_secret_errors",
"test/test_model.py::TestSecrets::test_app_add_secret_args",
"test/test_model.py::TestSecrets::test_app_add_secret_simple",
"test/test_model.py::TestSecrets::test_get_secret_id",
"test/test_model.py::TestSecrets::test_get_secret_id_and_label",
"test/test_model.py::TestSecrets::test_get_secret_label",
"test/test_model.py::TestSecrets::test_get_secret_no_args",
"test/test_model.py::TestSecrets::test_get_secret_not_found",
"test/test_model.py::TestSecrets::test_get_secret_other_error",
"test/test_model.py::TestSecrets::test_unit_add_secret_args",
"test/test_model.py::TestSecrets::test_unit_add_secret_errors",
"test/test_model.py::TestSecrets::test_unit_add_secret_simple",
"test/test_model.py::TestSecretInfo::test_from_dict",
"test/test_model.py::TestSecretInfo::test_init",
"test/test_model.py::TestSecretClass::test_get_content_cached",
"test/test_model.py::TestSecretClass::test_get_content_refresh",
"test/test_model.py::TestSecretClass::test_get_content_uncached",
"test/test_model.py::TestSecretClass::test_get_info",
"test/test_model.py::TestSecretClass::test_grant",
"test/test_model.py::TestSecretClass::test_id_and_label",
"test/test_model.py::TestSecretClass::test_peek_content",
"test/test_model.py::TestSecretClass::test_remove_all_revisions",
"test/test_model.py::TestSecretClass::test_remove_revision",
"test/test_model.py::TestSecretClass::test_revoke",
"test/test_model.py::TestSecretClass::test_set_content",
"test/test_model.py::TestSecretClass::test_set_info",
"test/test_model.py::TestPorts::test_close_port",
"test/test_model.py::TestPorts::test_close_port_error",
"test/test_model.py::TestPorts::test_open_port",
"test/test_model.py::TestPorts::test_open_port_error",
"test/test_model.py::TestPorts::test_opened_ports",
"test/test_model.py::TestPorts::test_opened_ports_warnings"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2023-06-12 05:23:08+00:00 | apache-2.0 | 1,499 |
|
capless__envs-19 | diff --git a/envs/__init__.py b/envs/__init__.py
index 9d6bbce..4a0b860 100644
--- a/envs/__init__.py
+++ b/envs/__init__.py
@@ -48,12 +48,14 @@ class Env(object):
json.dump({'key': key, 'var_type': var_type, 'default': default, 'value': os.getenv(key)}, f)
f.write(',')
value = os.getenv(key, default)
- if not value and not allow_none:
- raise EnvsValueException('{}: Environment Variable Not Set'.format(key))
if not var_type in self.valid_types.keys():
raise ValueError(
'The var_type argument should be one of the following {0}'.format(
','.join(self.valid_types.keys())))
+ if value is None:
+ if not allow_none:
+ raise EnvsValueException('{}: Environment Variable Not Set'.format(key))
+ return value
return self.validate_type(value, self.valid_types[var_type], key)
def validate_type(self, value, klass, key):
diff --git a/envs/util.py b/envs/util.py
index ab0f58c..adabc60 100644
--- a/envs/util.py
+++ b/envs/util.py
@@ -4,12 +4,12 @@ import json
import os
import sys
-from click._compat import raw_input
-
from . import Env, ENVS_RESULT_FILENAME
VAR_TYPES = Env.valid_types.keys()
+if sys.version_info >= (3, 0):
+ raw_input = input
def import_util(imp):
"""
| capless/envs | 3bfd12ee46c4da92d0cdf68a183531ffdeb5ea73 | diff --git a/envs/tests.py b/envs/tests.py
index 741ad43..1b066df 100644
--- a/envs/tests.py
+++ b/envs/tests.py
@@ -14,6 +14,7 @@ except ImportError:
import sys
from envs import env
+from envs.exceptions import EnvsValueException
class EnvTestCase(unittest.TestCase):
def setUp(self):
@@ -113,6 +114,40 @@ class EnvTestCase(unittest.TestCase):
self.assertEqual(env('HELLO', 'true', var_type='boolean'), True)
self.assertEqual(env('HELLO', Decimal('3.14'), var_type='decimal'), Decimal('3.14'))
+ def test_without_defaults_allow_none(self):
+ self.assertEqual(env('HELLO'), None)
+ self.assertEqual(env('HELLO', var_type='integer'), None)
+ self.assertEqual(env('HELLO', var_type='float'), None)
+ self.assertEqual(env('HELLO', var_type='list'), None)
+
+ def test_without_defaults_disallow_none(self):
+ with self.assertRaises(EnvsValueException):
+ env('HELLO', allow_none=False)
+ with self.assertRaises(EnvsValueException):
+ env('HELLO', var_type='integer', allow_none=False)
+ with self.assertRaises(EnvsValueException):
+ env('HELLO', var_type='float', allow_none=False)
+ with self.assertRaises(EnvsValueException):
+ env('HELLO', var_type='list', allow_none=False)
+
+ def test_empty_values(self):
+ os.environ.setdefault('EMPTY', '')
+ self.assertEqual(env('EMPTY'), '')
+ with self.assertRaises(SyntaxError):
+ env('EMPTY', var_type='integer')
+ with self.assertRaises(SyntaxError):
+ env('EMPTY', var_type='float')
+ with self.assertRaises(SyntaxError):
+ env('EMPTY', var_type='list')
+ with self.assertRaises(SyntaxError):
+ env('EMPTY', var_type='dict')
+ with self.assertRaises(SyntaxError):
+ env('EMPTY', var_type='tuple')
+ with self.assertRaises(ValueError):
+ env('EMPTY', var_type='boolean')
+ with self.assertRaises(ArithmeticError):
+ env('EMPTY', var_type='decimal')
+
'''
Each CLI Test must be run outside of test suites in isolation
since Click CLI Runner alters the global context
| Incompatible with click>=8.0
Updating click to 8.0 and later breaks the tests, because raw_input isn't exposed via compats anymore.
```
======================================================================
ERROR: util (unittest.loader._FailedTest)
----------------------------------------------------------------------
ImportError: Failed to import test module: util
Traceback (most recent call last):
File "/nix/store/2nhfiak8a30vw67mxksc2kdb69np2jcw-python3-3.8.9/lib/python3.8/unittest/loader.py", line 154, in loadTestsFromName
module = __import__(module_name)
File "/build/envs-1.3/envs/util.py", line 7, in <module>
from click._compat import raw_input
ImportError: cannot import name 'raw_input' from 'click._compat' (/nix/store/s6a73ssai10wpz50nnk2cy3k48wydl05-python3.8-click-8.0.1/lib/python3.8/site-packages/click/_compat.py)
======================================================================
ERROR: cli (unittest.loader._FailedTest)
----------------------------------------------------------------------
ImportError: Failed to import test module: cli
Traceback (most recent call last):
File "/nix/store/2nhfiak8a30vw67mxksc2kdb69np2jcw-python3-3.8.9/lib/python3.8/unittest/loader.py", line 154, in loadTestsFromName
module = __import__(module_name)
File "/build/envs-1.3/envs/cli.py", line 10, in <module>
from .util import convert_module, import_mod, list_envs_module, raw_input
File "/build/envs-1.3/envs/util.py", line 7, in <module>
from click._compat import raw_input
ImportError: cannot import name 'raw_input' from 'click._compat' (/nix/store/s6a73ssai10wpz50nnk2cy3k48wydl05-python3.8-click-8.0.1/lib/python3.8/site-packages/click/_compat.py)
----------------------------------------------------------------------
Ran 20 tests in 0.005s
FAILED (errors=2)
``` | 0.0 | 3bfd12ee46c4da92d0cdf68a183531ffdeb5ea73 | [
"envs/tests.py::EnvTestCase::test_without_defaults_allow_none"
]
| [
"envs/tests.py::EnvTestCase::test_boolean_invalid",
"envs/tests.py::EnvTestCase::test_boolean_valid",
"envs/tests.py::EnvTestCase::test_boolean_valid_false",
"envs/tests.py::EnvTestCase::test_decimal_invalid",
"envs/tests.py::EnvTestCase::test_decimal_valid",
"envs/tests.py::EnvTestCase::test_defaults",
"envs/tests.py::EnvTestCase::test_dict_invalid",
"envs/tests.py::EnvTestCase::test_dict_valid",
"envs/tests.py::EnvTestCase::test_empty_values",
"envs/tests.py::EnvTestCase::test_float_invalid",
"envs/tests.py::EnvTestCase::test_float_valid",
"envs/tests.py::EnvTestCase::test_integer_invalid",
"envs/tests.py::EnvTestCase::test_integer_valid",
"envs/tests.py::EnvTestCase::test_list_invalid",
"envs/tests.py::EnvTestCase::test_list_valid",
"envs/tests.py::EnvTestCase::test_string_valid",
"envs/tests.py::EnvTestCase::test_tuple_invalid",
"envs/tests.py::EnvTestCase::test_tuple_valid",
"envs/tests.py::EnvTestCase::test_without_defaults_disallow_none",
"envs/tests.py::EnvTestCase::test_wrong_var_type"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2021-06-19 14:35:17+00:00 | apache-2.0 | 1,500 |
|
capless__envs-20 | diff --git a/envs/__init__.py b/envs/__init__.py
index 9d6bbce..4a0b860 100644
--- a/envs/__init__.py
+++ b/envs/__init__.py
@@ -48,12 +48,14 @@ class Env(object):
json.dump({'key': key, 'var_type': var_type, 'default': default, 'value': os.getenv(key)}, f)
f.write(',')
value = os.getenv(key, default)
- if not value and not allow_none:
- raise EnvsValueException('{}: Environment Variable Not Set'.format(key))
if not var_type in self.valid_types.keys():
raise ValueError(
'The var_type argument should be one of the following {0}'.format(
','.join(self.valid_types.keys())))
+ if value is None:
+ if not allow_none:
+ raise EnvsValueException('{}: Environment Variable Not Set'.format(key))
+ return value
return self.validate_type(value, self.valid_types[var_type], key)
def validate_type(self, value, klass, key):
| capless/envs | 3bfd12ee46c4da92d0cdf68a183531ffdeb5ea73 | diff --git a/envs/tests.py b/envs/tests.py
index 741ad43..1b066df 100644
--- a/envs/tests.py
+++ b/envs/tests.py
@@ -14,6 +14,7 @@ except ImportError:
import sys
from envs import env
+from envs.exceptions import EnvsValueException
class EnvTestCase(unittest.TestCase):
def setUp(self):
@@ -113,6 +114,40 @@ class EnvTestCase(unittest.TestCase):
self.assertEqual(env('HELLO', 'true', var_type='boolean'), True)
self.assertEqual(env('HELLO', Decimal('3.14'), var_type='decimal'), Decimal('3.14'))
+ def test_without_defaults_allow_none(self):
+ self.assertEqual(env('HELLO'), None)
+ self.assertEqual(env('HELLO', var_type='integer'), None)
+ self.assertEqual(env('HELLO', var_type='float'), None)
+ self.assertEqual(env('HELLO', var_type='list'), None)
+
+ def test_without_defaults_disallow_none(self):
+ with self.assertRaises(EnvsValueException):
+ env('HELLO', allow_none=False)
+ with self.assertRaises(EnvsValueException):
+ env('HELLO', var_type='integer', allow_none=False)
+ with self.assertRaises(EnvsValueException):
+ env('HELLO', var_type='float', allow_none=False)
+ with self.assertRaises(EnvsValueException):
+ env('HELLO', var_type='list', allow_none=False)
+
+ def test_empty_values(self):
+ os.environ.setdefault('EMPTY', '')
+ self.assertEqual(env('EMPTY'), '')
+ with self.assertRaises(SyntaxError):
+ env('EMPTY', var_type='integer')
+ with self.assertRaises(SyntaxError):
+ env('EMPTY', var_type='float')
+ with self.assertRaises(SyntaxError):
+ env('EMPTY', var_type='list')
+ with self.assertRaises(SyntaxError):
+ env('EMPTY', var_type='dict')
+ with self.assertRaises(SyntaxError):
+ env('EMPTY', var_type='tuple')
+ with self.assertRaises(ValueError):
+ env('EMPTY', var_type='boolean')
+ with self.assertRaises(ArithmeticError):
+ env('EMPTY', var_type='decimal')
+
'''
Each CLI Test must be run outside of test suites in isolation
since Click CLI Runner alters the global context
| allow_none=True, type "integer" -> error
hi
thank you for your usefull package :)
i think i've found one small bug.
```
>>> from envs import env
>>> PAGE_CACHE=env("PAGE_CACHE", None, "integer", allow_none=True)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/tlb/.cache/pypoetry/virtualenvs/homepage-B7IEgj7y-py3.8/lib/python3.8/site-packages/envs/__init__.py", line 57, in __call__
return self.validate_type(value, self.valid_types[var_type], key)
File "/home/tlb/.cache/pypoetry/virtualenvs/homepage-B7IEgj7y-py3.8/lib/python3.8/site-packages/envs/__init__.py", line 66, in validate_type
return klass(ast.literal_eval(value))
File "/usr/lib/python3.8/ast.py", line 96, in literal_eval
return _convert(node_or_string)
File "/usr/lib/python3.8/ast.py", line 95, in _convert
return _convert_signed_num(node)
File "/usr/lib/python3.8/ast.py", line 74, in _convert_signed_num
return _convert_num(node)
File "/usr/lib/python3.8/ast.py", line 66, in _convert_num
raise ValueError('malformed node or string: ' + repr(node))
ValueError: malformed node or string: None
```
i would expect `None` and not an exception | 0.0 | 3bfd12ee46c4da92d0cdf68a183531ffdeb5ea73 | [
"envs/tests.py::EnvTestCase::test_without_defaults_allow_none"
]
| [
"envs/tests.py::EnvTestCase::test_boolean_invalid",
"envs/tests.py::EnvTestCase::test_boolean_valid",
"envs/tests.py::EnvTestCase::test_boolean_valid_false",
"envs/tests.py::EnvTestCase::test_decimal_invalid",
"envs/tests.py::EnvTestCase::test_decimal_valid",
"envs/tests.py::EnvTestCase::test_defaults",
"envs/tests.py::EnvTestCase::test_dict_invalid",
"envs/tests.py::EnvTestCase::test_dict_valid",
"envs/tests.py::EnvTestCase::test_empty_values",
"envs/tests.py::EnvTestCase::test_float_invalid",
"envs/tests.py::EnvTestCase::test_float_valid",
"envs/tests.py::EnvTestCase::test_integer_invalid",
"envs/tests.py::EnvTestCase::test_integer_valid",
"envs/tests.py::EnvTestCase::test_list_invalid",
"envs/tests.py::EnvTestCase::test_list_valid",
"envs/tests.py::EnvTestCase::test_string_valid",
"envs/tests.py::EnvTestCase::test_tuple_invalid",
"envs/tests.py::EnvTestCase::test_tuple_valid",
"envs/tests.py::EnvTestCase::test_without_defaults_disallow_none",
"envs/tests.py::EnvTestCase::test_wrong_var_type"
]
| {
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2021-10-13 08:22:22+00:00 | apache-2.0 | 1,501 |
|
capsulecorplab__mindgraph-22 | diff --git a/README.md b/README.md
index c80ec3f..644f1cd 100644
--- a/README.md
+++ b/README.md
@@ -20,14 +20,14 @@ $ pip install git+https://github.com/capsulecorplab/mindgraph.git
```
>>> import mindgraph as mg
->>> graph = mg.Graph('learn all the things')
->>> thing1 = graph.append('1st thing')
->>> thing2 = graph.append('2nd thing')
->>> thing3 = graph.append('3rd thing')
+>>> project = mg.Project('learn all the things')
+>>> thing1 = project.append('1st thing')
+>>> thing2 = project.append('2nd thing')
+>>> thing3 = project.append('3rd thing')
->>> graph.remove(2)
+>>> project.remove(2)
->>> thing1 = graph[0]
+>>> thing1 = project[0]
>>> thing1_1 = thing1.append('thing within a thing')
>>> thing1_2 = thing1.append('thing blocking a thing')
>>> thing1_1.blockedby(thing1_2)
@@ -36,7 +36,7 @@ $ pip install git+https://github.com/capsulecorplab/mindgraph.git
>>> thing2_2 = thing2.append('another thing blocking a thing')
>>> thing2_2.blocking(thing2_1)
->>> print(graph)
+>>> print(project)
learn all the things:
- 1st thing:
- thing within a thing
@@ -46,11 +46,11 @@ learn all the things:
- another thing blocking a thing
```
-`Graph` objects can be exported to, and imported from, a yaml file for storage:
+Projects can be exported to, or imported from, a yaml file for external storage:
```
->>> graph.to_yaml('mygraph.yaml')
->>> graph2 = mg.read_yaml('mygraph.yaml')
+>>> project.to_yaml('myproject.yaml')
+>>> revivedproject = mg.read_yaml('myproject.yaml')
```
## Contribute
diff --git a/mindgraph/graph.py b/mindgraph/graph.py
index d7b1769..be55272 100644
--- a/mindgraph/graph.py
+++ b/mindgraph/graph.py
@@ -4,109 +4,106 @@ from typing import (Any, Callable, Generator, Iterator, List, Optional, Set,
from yaml import dump, load
-class Node(object):
- """node class"""
+class Task(object):
+ """Task class"""
- def __init__(self, name: str = '', weight: int = 1) -> None:
- self._dependencies = list() # type: List[Node]
- self._threads = list() # type: List[Node]
+ def __init__(self, name: str = '', priority: int = 1) -> None:
+ self._blockers = list() # type: List[Task]
+ self._subtasks = list() # type: List[Task]
self._name = '' # type: str
- self._weight = weight # type: int
+ self._priority = priority # type: int
if type(name) is str:
self._name = name
else:
raise TypeError
- def append(self, newnode) -> "Node":
- """ Creates a new Node and appends it to threads """
- if type(newnode) is str:
- newnode = Node(newnode)
- elif type(newnode) is not Node:
+ def append(self, newtask) -> "Task":
+ """ Creates a new Task and appends it to subtasks """
+ if type(newtask) is str:
+ newtask = Task(newtask)
+ elif type(newtask) is not Task:
raise TypeError
- self._threads.append(newnode)
- return newnode
+ self._subtasks.append(newtask)
+ return newtask
- def pop(self, index: int) -> "Node":
- """ Pops the Node from threads[index] """
- return self._threads.pop(index)
+ def pop(self, index: int) -> "Task":
+ """ Pops the Task from subtasks[index] """
+ return self._subtasks.pop(index)
- def blockedby(self, node: "Node") -> None:
- """ Adds a Node to the dependenceis list """
- if type(node) is Node:
- self._dependencies.append(node)
+ def blockedby(self, task: "Task") -> None:
+ """ Adds a task to the subtasks list """
+ if type(task) is Task:
+ self._blockers.append(task)
return None
else:
raise TypeError
- def blocking(self, node: "Node") -> None:
- """ Adds this Node to another node's dependencies list """
- if type(node) is Node:
- node._dependencies.append(self)
+ def blocking(self, task: "Task") -> None:
+ """ Adds this task to another task's blockers list """
+ if type(task) is Task:
+ task._blockers.append(self)
return None
else:
raise TypeError
- def __getitem__(self, key: int) -> "Node":
- return self._threads[key]
+ def __getitem__(self, key: int) -> "Task":
+ return self._subtasks[key]
def __repr__(self) -> str:
- return '\n'.join(self.format_tree())
+ return '\n'.join(self._format_tree())
- def format_tree(self: "Node", depth: int = 0) -> Iterator[str]:
- """Format node and dependents in tree format, emitting lines
-
- Assumes no cycles in graph
- """
+ def _format_tree(self: "Task", depth: int = 0) -> Iterator[str]:
+ """Generates task and subtasks into a string formatted tree"""
indent = ' ' * depth
bullet = '- ' if depth != 0 else ''
- suffix = ':' if self.threads else ''
+ suffix = ':' if self.subtasks else ''
line = '{indent}{bullet}{self.name}{suffix}'.format(**locals())
yield line
- for n in self.threads:
- yield from n.format_tree(depth+1)
+ for n in self.subtasks:
+ yield from n._format_tree(depth+1)
def _postorder(self,
depth: int = 0,
- visited: Set["Node"] = None,
- node_key: Callable[["Node"], Any]=None,
- ) -> Generator[Tuple[int, "Node"], None, Set["Node"]]:
- """Post-order traversal of graph rooted at node"""
+ visited: Set["Task"] = None,
+ taskkey: Callable[["Task"], Any]=None,
+ ) -> Generator[Tuple[int, "Task"], None, Set["Task"]]:
+ """Post-order traversal of Project rooted at Task"""
if visited is None:
visited = set()
- children = self._threads
- if node_key is not None:
- children = sorted(self._threads, key=node_key)
+ children = self._subtasks
+ if taskkey is not None:
+ children = sorted(self._subtasks, key=taskkey)
for child in children:
if child not in visited:
visited = yield from child._postorder(depth+1,
visited,
- node_key)
+ taskkey)
yield (depth, self)
visited.add(self)
return visited
- def todo(self) -> Iterator["Node"]:
- """Generate nodes in todo order
+ def todo(self) -> Iterator["Task"]:
+ """Generate Tasks in todo order
- Nodes are scheduled by weight and to resolve blocking tasks
+ Tasks are scheduled by priority and to resolve blocking tasks
"""
- # sorts by weight (2 before 1), then alphabetical
- def node_key(node):
- return (-node.weight, node.name)
- return (x[1] for x in self._postorder(node_key=node_key))
+ # sorts by priority (2 before 1), then alphabetical
+ def taskkey(Task):
+ return (-Task.priority, Task.name)
+ return (x[1] for x in self._postorder(taskkey=taskkey))
def __str__(self) -> str:
return dump(load(str(self.__repr__())), default_flow_style=False)
@property
- def dependencies(self) -> List["Node"]:
- """ dependencies getter """
- return self._dependencies
+ def blockers(self) -> List["Task"]:
+ """ blockers getter """
+ return self._blockers
@property
def name(self) -> str:
@@ -119,38 +116,37 @@ class Node(object):
self._name = name
@property
- def threads(self) -> List["Node"]:
- """ threads getter """
- return self._threads
+ def subtasks(self) -> List["Task"]:
+ """ subtasks getter """
+ return self._subtasks
@property
- def weight(self) -> int:
- """ weight getter """
- return self._weight
-
- @weight.setter
- def weight(self, value: int) -> None:
- """ weight setter """
- self._weight = value
-
+ def priority(self) -> int:
+ """ priority getter """
+ return self._priority
-class Graph(Node):
- """A Graph model of the mind"""
-
- def __init__(self, name=None) -> None:
- Node.__init__(self, name)
+ @priority.setter
+ def priority(self, value: int) -> None:
+ """ priority setter """
+ self._priority = value
def to_yaml(self, filename=None) -> None:
- """ Write this Graph to a yaml file """
+ """ Write this Project to a yaml file """
with open(filename, 'w') as f:
f.write(dump(self))
-def read_yaml(filename: str = "") -> Graph:
- """ Load a Graph from a yaml file """
+class Project(object):
+ """Returns a task representing the root of your project"""
+ def __new__(cls, name: str=None) -> Task:
+ return Task(name)
+
+
+def read_yaml(filename: str = "") -> Task:
+ """ Load a project from a yaml file """
with open(filename, 'r') as f:
rv = load(f.read())
- if type(rv) is Graph:
+ if type(rv) is Task:
return rv
else:
raise TypeError(type(rv))
| capsulecorplab/mindgraph | cfc9443eda343310c5f2cadb91f61e1849990a05 | diff --git a/test/test_mindgraph.py b/test/test_mindgraph.py
index 1fc5c76..3a375fa 100644
--- a/test/test_mindgraph.py
+++ b/test/test_mindgraph.py
@@ -11,25 +11,25 @@ from mindgraph import *
@pytest.fixture(scope="module")
-def graph():
- graph = Graph('learn all the things')
- return graph
+def project():
+ project = Project('learn all the things')
+ return project
@pytest.fixture
-def task_graph():
- # setup example graph from issue #14
- g = Graph('build a thing')
+def task_project():
+ # setup example project from issue #14
+ g = Project('build a thing')
t1 = g.append('task 1')
- t1.weight = 3
+ t1.priority = 3
t11 = t1.append('task 1.1')
t12 = t1.append('task 1.2')
t13 = t1.append('task 1.3')
- t13.weight = 3
+ t13.priority = 3
t2 = g.append('task 2')
- t2.weight = 2
+ t2.priority = 2
t21 = t2.append('task 2.1')
t22 = t2.append('task 2.2')
t221 = t22.append('task 2.2.1')
@@ -39,111 +39,123 @@ def task_graph():
t31 = t3.append('task 3.1')
t32 = t3.append('task 3.2')
- t32.threads.append(t22)
- t12.threads.append(t22)
+ t32.subtasks.append(t22)
+ t12.subtasks.append(t22)
return g
-def test_todo_high_weights_win(task_graph):
- """High weights are scheduled before low weights"""
- todo = [n.name for n in task_graph.todo()]
+def test_todo_high_priorities_win(task_project):
+ """High priorities are scheduled before low priorities"""
+ todo = [n.name for n in task_project.todo()]
assert todo.index('task 1') < todo.index('task 2')
assert todo.index('task 1') < todo.index('task 3')
assert todo.index('task 1.3') < todo.index('task 1.1')
-def test_todo_blocking_tasks_win(task_graph):
+def test_todo_blocking_tasks_win(task_project):
"""Blocking tasks are scheduled before blocked tasks"""
- todo = [n.name for n in task_graph.todo()]
+ todo = [n.name for n in task_project.todo()]
assert todo.index('task 2.2') < todo.index('task 3.2')
assert todo.index('task 2.2') < todo.index('task 1.2')
assert todo.index('task 1.1') < todo.index('task 1.2')
-def test_postorder_default_weights_ignored(task_graph):
- """Post-order traversal ignores node weights by default"""
- po = [n.name for _, n in task_graph._postorder()]
+def test_postorder_default_priorities_ignored(task_project):
+ """Post-order traversal ignores task priorities by default"""
+ po = [n.name for _, n in task_project._postorder()]
assert po.index('task 1.1') < po.index('task 1.3')
-def test_node_init_typeerror():
+def test_task_init_typeerror():
with pytest.raises(TypeError) as info:
- node = Node(47)
+ task = Task(47)
assert "" in str(info.value)
-def test_node_append_node():
- rootNode = Node('root node')
- subNode1 = rootNode.append(Node('sub node'))
- subNode2 = rootNode.append(Node('sub node 2'))
- assert rootNode[0] is subNode1
- assert rootNode[1] is subNode2
+def test_task_append_task():
+ rootTask = Task('root task')
+ subTask1 = rootTask.append(Task('sub task'))
+ subTask2 = rootTask.append(Task('sub task 2'))
+ assert rootTask[0] is subTask1
+ assert rootTask[1] is subTask2
-def test_node_append(graph):
- thing1 = graph.append('1st thing')
- thing2 = graph.append('2nd thing')
- thing3 = graph.append('3rd thing')
+def test_task_append(project):
+ thing1 = project.append('1st thing')
+ thing2 = project.append('2nd thing')
+ thing3 = project.append('3rd thing')
- assert thing1 is graph[0]
- assert thing2 is graph[1]
- assert thing3 is graph[2]
+ assert thing1 is project[0]
+ assert thing2 is project[1]
+ assert thing3 is project[2]
assert thing1.name == '1st thing'
assert thing2.name == '2nd thing'
assert thing3.name == '3rd thing'
-def test_node_pop(graph):
- assert graph[2].name == '3rd thing'
- graph.pop(2)
+def test_task_pop(project):
+ assert project[2].name == '3rd thing'
+ project.pop(2)
with pytest.raises(IndexError) as info:
- thing3 = graph[2]
+ thing3 = project[2]
assert "" in str(info.value)
-def test_node_pop_fail1(graph):
+def test_task_pop_fail1(project):
with pytest.raises(IndexError):
- graph.pop(20000)
+ project.pop(20000)
-def test_node_append_TypeError():
+def test_task_append_TypeError():
with pytest.raises(TypeError) as info:
- node = Node('mynode')
- node.append(47)
+ task = Task('mytask')
+ task.append(47)
assert "" in str(info.value)
-def test_blockedby(graph):
- thing1 = graph[0]
+def test_blockedby(project):
+ thing1 = project[0]
thing1_1 = thing1.append('thing within a thing')
thing1_2 = thing1.append('thing blocking a thing')
thing1_1.blockedby(thing1_2)
- assert thing1_1.dependencies[0].name == 'thing blocking a thing'
+ assert thing1_1.blockers[0].name == 'thing blocking a thing'
-def test_blocking(graph):
- thing2 = graph[1]
+def test_blockedby_TypeError():
+ with pytest.raises(TypeError):
+ task = Task('mytask')
+ task.blockedby(47)
+
+
+def test_blocking(project):
+ thing2 = project[1]
thing2_1 = thing2.append('another thing within a thing')
thing2_2 = thing2.append('another thing blocking a thing')
thing2_2.blocking(thing2_1)
- assert thing2_1.dependencies[0].name == 'another thing blocking a thing'
+ assert thing2_1.blockers[0].name == 'another thing blocking a thing'
+
+
+def test_blocking_TypeError():
+ with pytest.raises(TypeError):
+ task = Task('mytask')
+ task.blocking(47)
-def test_repr(graph):
- assert graph.name == 'learn all the things'
+def test_repr(project):
+ assert project.name == 'learn all the things'
- thing1 = graph[0]
- thing2 = graph[1]
+ thing1 = project[0]
+ thing2 = project[1]
with pytest.raises(IndexError) as info:
- thing3 = graph[2]
+ thing3 = project[2]
assert "" in str(info.value)
assert thing1.name == '1st thing'
assert thing2.name == '2nd thing'
- assert str(graph) == "".join([
+ assert str(project) == "".join([
"learn all the things:\n",
"- 1st thing:\n",
" - thing within a thing\n",
@@ -154,14 +166,14 @@ def test_repr(graph):
])
-def test_deep_repr(graph):
+def test_deep_repr(project):
- thing2_1 = graph[1][0]
+ thing2_1 = project[1][0]
assert thing2_1.name == 'another thing within a thing'
thing2_1.append('super deep thing')
- assert str(graph) == "".join([
+ assert str(project) == "".join([
"learn all the things:\n",
"- 1st thing:\n",
" - thing within a thing\n",
@@ -175,42 +187,42 @@ def test_deep_repr(graph):
thing2_1.pop(0)
-def test_weight_getter_setter():
- node = Node('myNode')
- default_weight = node.weight
- node.weight = 5
+def test_priority_getter_setter():
+ task = Task('myTask')
+ default_priority = task.priority
+ task.priority = 5
- assert default_weight == 1
- assert node.weight == 5
+ assert default_priority == 1
+ assert task.priority == 5
def test_name_getter_setter():
- node = Node()
- default_name = node.name
- node.name = 'a new name'
+ task = Task()
+ default_name = task.name
+ task.name = 'a new name'
assert default_name == ''
- assert node.name == 'a new name'
+ assert task.name == 'a new name'
-def test_to_yaml(graph):
- assert graph.name == 'learn all the things'
- assert graph[0].name == '1st thing'
- graph.to_yaml('mindgraph.yaml')
- graph2 = read_yaml('mindgraph.yaml')
- test_repr(graph2)
- assert repr(graph) == repr(graph2)
- os.remove('mindgraph.yaml')
+def test_to_yaml(project):
+ assert project.name == 'learn all the things'
+ assert project[0].name == '1st thing'
+ project.to_yaml('project.yaml')
+ project2 = read_yaml('project.yaml')
+ test_repr(project2)
+ assert repr(project) == repr(project2)
+ os.remove('project.yaml')
def test_to_yaml_TypeError():
- not_a_graph = yaml.dump("not a graph")
- with open('not_a_graph.yaml', 'w') as f:
- f.write(not_a_graph)
+ not_a_project = yaml.dump("not a project")
+ with open('not_a_project.yaml', 'w') as f:
+ f.write(not_a_project)
with pytest.raises(TypeError) as info:
- read_yaml('not_a_graph.yaml')
+ read_yaml('not_a_project.yaml')
assert "" in str(info.value)
- os.remove('not_a_graph.yaml')
+ os.remove('not_a_project.yaml')
def test_parser():
| Name changes for Graph, Node, and Node attributes
To implement the following changes to the current naming convention:
`Graph` to `Project`
`Node` to `Task`
`Node.threads` to `Task.subtasks`
`Node.dependencies` to `Task.blockers`
`Node.weight` to `Task.priority` | 0.0 | cfc9443eda343310c5f2cadb91f61e1849990a05 | [
"test/test_mindgraph.py::test_todo_high_priorities_win",
"test/test_mindgraph.py::test_todo_blocking_tasks_win",
"test/test_mindgraph.py::test_postorder_default_priorities_ignored",
"test/test_mindgraph.py::test_task_init_typeerror",
"test/test_mindgraph.py::test_task_append_task",
"test/test_mindgraph.py::test_task_append",
"test/test_mindgraph.py::test_task_pop",
"test/test_mindgraph.py::test_task_pop_fail1",
"test/test_mindgraph.py::test_task_append_TypeError",
"test/test_mindgraph.py::test_blockedby",
"test/test_mindgraph.py::test_blockedby_TypeError",
"test/test_mindgraph.py::test_blocking",
"test/test_mindgraph.py::test_blocking_TypeError",
"test/test_mindgraph.py::test_priority_getter_setter",
"test/test_mindgraph.py::test_name_getter_setter"
]
| [
"test/test_mindgraph.py::test_to_yaml_TypeError",
"test/test_mindgraph.py::test_parser"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2018-10-25 22:30:03+00:00 | mit | 1,502 |
|
capsulecorplab__mindgraph-23 | diff --git a/mindgraph/graph.py b/mindgraph/graph.py
index be55272..c1dc475 100644
--- a/mindgraph/graph.py
+++ b/mindgraph/graph.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
-from typing import (Any, Callable, Generator, Iterator, List, Optional, Set,
- Tuple)
+from typing import Any, Callable, Iterator, List, Set, Tuple
+
from yaml import dump, load
@@ -67,34 +67,36 @@ class Task(object):
depth: int = 0,
visited: Set["Task"] = None,
taskkey: Callable[["Task"], Any]=None,
- ) -> Generator[Tuple[int, "Task"], None, Set["Task"]]:
+ ) -> Iterator[Tuple[int, "Task"]]:
"""Post-order traversal of Project rooted at Task"""
+ from itertools import chain
+
if visited is None:
visited = set()
- children = self._subtasks
- if taskkey is not None:
- children = sorted(self._subtasks, key=taskkey)
+ if taskkey is None:
+ blockers = self.blockers
+ subtasks = self.subtasks
+ else:
+ blockers = sorted(self.blockers, key=taskkey)
+ subtasks = sorted(self.subtasks, key=taskkey)
- for child in children:
- if child not in visited:
- visited = yield from child._postorder(depth+1,
- visited,
- taskkey)
+ for node in chain(blockers, subtasks):
+ if node in visited:
+ continue
+ yield from node._postorder(depth+1, visited, taskkey)
yield (depth, self)
visited.add(self)
- return visited
-
def todo(self) -> Iterator["Task"]:
"""Generate Tasks in todo order
Tasks are scheduled by priority and to resolve blocking tasks
"""
# sorts by priority (2 before 1), then alphabetical
- def taskkey(Task):
- return (-Task.priority, Task.name)
+ def taskkey(task):
+ return (-task.priority, task.name)
return (x[1] for x in self._postorder(taskkey=taskkey))
def __str__(self) -> str:
| capsulecorplab/mindgraph | aa05682130d34c308f699936d16299b5029d26ab | diff --git a/test/test_mindgraph.py b/test/test_mindgraph.py
index 3a375fa..368b7a3 100644
--- a/test/test_mindgraph.py
+++ b/test/test_mindgraph.py
@@ -1,10 +1,10 @@
import os
-from random import choice
import string
from argparse import Namespace
+from random import choice
+from unittest.mock import mock_open, patch
import pytest
-from unittest.mock import mock_open, patch
import yaml
from mindgraph import *
@@ -39,8 +39,8 @@ def task_project():
t31 = t3.append('task 3.1')
t32 = t3.append('task 3.2')
- t32.subtasks.append(t22)
- t12.subtasks.append(t22)
+ t32.blockedby(t22)
+ t12.blockedby(t22)
return g
| add precedence for "blockers" in todo() method
Current `todo()` method implementation, only accounts for `Task.subtasks` and `Task.priority`. Needs to also account for `Task.blockers`, such that `Task` objects in "blockers" yield prior to `Task` objects in "subtasks". | 0.0 | aa05682130d34c308f699936d16299b5029d26ab | [
"test/test_mindgraph.py::test_todo_blocking_tasks_win"
]
| [
"test/test_mindgraph.py::test_todo_high_priorities_win",
"test/test_mindgraph.py::test_postorder_default_priorities_ignored",
"test/test_mindgraph.py::test_task_init_typeerror",
"test/test_mindgraph.py::test_task_append_task",
"test/test_mindgraph.py::test_task_append",
"test/test_mindgraph.py::test_task_pop",
"test/test_mindgraph.py::test_task_pop_fail1",
"test/test_mindgraph.py::test_task_append_TypeError",
"test/test_mindgraph.py::test_blockedby",
"test/test_mindgraph.py::test_blockedby_TypeError",
"test/test_mindgraph.py::test_blocking",
"test/test_mindgraph.py::test_blocking_TypeError",
"test/test_mindgraph.py::test_priority_getter_setter",
"test/test_mindgraph.py::test_name_getter_setter",
"test/test_mindgraph.py::test_to_yaml_TypeError",
"test/test_mindgraph.py::test_parser"
]
| {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | 2018-11-01 07:05:02+00:00 | mit | 1,503 |
|
caronc__apprise-324 | diff --git a/.gitignore b/.gitignore
index 11f190a..0315c5b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -67,3 +67,8 @@ target/
#PyCharm
.idea
+
+#PyDev (Eclipse)
+.project
+.pydevproject
+.settings
diff --git a/apprise/utils.py b/apprise/utils.py
index 21f2c49..4b4833b 100644
--- a/apprise/utils.py
+++ b/apprise/utils.py
@@ -120,9 +120,9 @@ GET_EMAIL_RE = re.compile(
r'(?P<email>(?P<userid>[a-z0-9$%=_~-]+'
r'(?:\.[a-z0-9$%+=_~-]+)'
r'*)@(?P<domain>('
- r'(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+'
- r'[a-z0-9](?:[a-z0-9-]*[a-z0-9]))|'
- r'[a-z0-9][a-z0-9-]{5,})))'
+ r'(?:[a-z0-9](?:[a-z0-9_-]*[a-z0-9])?\.)+'
+ r'[a-z0-9](?:[a-z0-9_-]*[a-z0-9]))|'
+ r'[a-z0-9][a-z0-9_-]{5,})))'
r'\s*>?', re.IGNORECASE)
# Regular expression used to extract a phone number
@@ -232,9 +232,12 @@ def is_hostname(hostname, ipv4=True, ipv6=True):
# - Hostnames can ony be comprised of alpha-numeric characters and the
# hyphen (-) character.
# - Hostnames can not start with the hyphen (-) character.
+ # - as a workaround for https://github.com/docker/compose/issues/229 to
+ # being able to address services in other stacks, we also allow
+ # underscores in hostnames
# - labels can not exceed 63 characters
allowed = re.compile(
- r'(?!-)[a-z0-9][a-z0-9-]{1,62}(?<!-)$',
+ r'^[a-z0-9][a-z0-9_-]{1,62}(?<!-)$',
re.IGNORECASE,
)
| caronc/apprise | 2c2722f61f9f983827c8246943f0462098e5a0ed | diff --git a/test/test_utils.py b/test/test_utils.py
index b187da0..37c2ba7 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -532,6 +532,8 @@ def test_is_hostname():
assert utils.is_hostname('yahoo.ca.') == 'yahoo.ca'
assert utils.is_hostname('valid-dashes-in-host.ca') == \
'valid-dashes-in-host.ca'
+ assert utils.is_hostname('valid-underscores_in_host.ca') == \
+ 'valid-underscores_in_host.ca'
# Invalid Hostnames
assert utils.is_hostname('-hostname.that.starts.with.a.dash') is False
@@ -539,7 +541,6 @@ def test_is_hostname():
assert utils.is_hostname(' spaces ') is False
assert utils.is_hostname(' ') is False
assert utils.is_hostname('') is False
- assert utils.is_hostname('valid-underscores_in_host.ca') is False
# Valid IPv4 Addresses
assert utils.is_hostname('127.0.0.1') == '127.0.0.1'
@@ -625,6 +626,14 @@ def test_is_email():
assert 'test' == results['user']
assert '' == results['label']
+ results = utils.is_email('test@my-valid_host.com')
+ assert '' == results['name']
+ assert 'test@my-valid_host.com' == results['email']
+ assert 'test@my-valid_host.com' == results['full_email']
+ assert 'my-valid_host.com' == results['domain']
+ assert 'test' == results['user']
+ assert '' == results['label']
+
results = utils.is_email('[email protected]')
assert '' == results['name']
assert '[email protected]' == results['email']
| 'Unparseable E-Mail URL' error for hostnames containing underscore
With underscore:
```bash
$ docker run --rm caronc/apprise:latest /usr/local/bin/apprise --body='hello world' \
'mailto://smtp_service:25/?from=foo@localhost&to=bar@localhost'
2020-11-22 16:33:16,297 - ERROR - Unparseable E-Mail URL mailto://smtp_service:25/?from=foo@localhost&to=bar@localhost
2020-11-22 16:33:16,297 - ERROR - You must specify at least one server URL or populated configuration file.
```
Without underscore:
```bash
$ docker run --rm caronc/apprise:latest /usr/local/bin/apprise --body='hello world' \
'mailto://smtpservice:25/?from=foo@localhost&to=bar@localhost'
2020-11-22 16:34:12,541 - INFO - Notifying 1 service(s) asynchronously.
```
Underscores in hostnames are completely legal, see https://stackoverflow.com/a/2183140/5116073 | 0.0 | 2c2722f61f9f983827c8246943f0462098e5a0ed | [
"test/test_utils.py::test_is_hostname",
"test/test_utils.py::test_is_email"
]
| [
"test/test_utils.py::test_parse_qsd",
"test/test_utils.py::test_parse_url",
"test/test_utils.py::test_parse_bool",
"test/test_utils.py::test_is_ipaddr",
"test/test_utils.py::test_parse_emails",
"test/test_utils.py::test_parse_urls",
"test/test_utils.py::test_parse_list",
"test/test_utils.py::test_exclusive_match",
"test/test_utils.py::test_apprise_validate_regex",
"test/test_utils.py::test_environ_temporary_change",
"test/test_utils.py::test_apply_templating"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2020-11-22 17:34:28+00:00 | mit | 1,504 |
|
carpedm20__emoji-128 | diff --git a/emoji/__init__.py b/emoji/__init__.py
index 958e62e..26fed55 100644
--- a/emoji/__init__.py
+++ b/emoji/__init__.py
@@ -25,6 +25,12 @@ from emoji.unicode_codes import EMOJI_UNICODE
from emoji.unicode_codes import UNICODE_EMOJI
from emoji.unicode_codes import UNICODE_EMOJI_ALIAS
+__all__ = [
+ # emoji.core
+ 'emojize', 'demojize', 'get_emoji_regexp', 'emoji_count', 'emoji_lis',
+ # emoji.unicode_codes
+ 'EMOJI_ALIAS_UNICODE', 'EMOJI_UNICODE', 'UNICODE_EMOJI', 'UNICODE_EMOJI_ALIAS',
+]
__version__ = '0.6.0'
__author__ = 'Taehoon Kim and Kevin Wurster'
__email__ = '[email protected]'
diff --git a/emoji/core.py b/emoji/core.py
index 9537692..327ad2a 100644
--- a/emoji/core.py
+++ b/emoji/core.py
@@ -99,20 +99,26 @@ def get_emoji_regexp():
_EMOJI_REGEXP = re.compile(pattern)
return _EMOJI_REGEXP
+
def emoji_lis(string):
- """Return the location and emoji in list of dic format
- >>>emoji.emoji_lis("Hi, I am fine. 😁")
- >>>[{'location': 15, 'emoji': '😁'}]
+ """
+ Returns the location and emoji in list of dict format
+ >>> emoji.emoji_lis("Hi, I am fine. 😁")
+ >>> [{'location': 15, 'emoji': '😁'}]
"""
_entities = []
- for pos,c in enumerate(string):
- if c in unicode_codes.UNICODE_EMOJI:
+
+ for match in get_emoji_regexp().finditer(string):
_entities.append({
- "location":pos,
- "emoji": c
- })
+ "location": match.start(),
+ "emoji": match.group()
+ })
+
return _entities
+
def emoji_count(string):
- """Returns the count of emojis in a string"""
- return sum(1 for i in string if i in unicode_codes.UNICODE_EMOJI)
+ """
+ Returns the count of emojis in a string
+ """
+ return len(emoji_lis(string))
| carpedm20/emoji | d26371bd28d2c9307826574cf21b0d0bd47787d8 | diff --git a/tests/test_core.py b/tests/test_core.py
index 48ab517..c477c77 100644
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -66,10 +66,14 @@ def test_demojize_complicated_string():
destructed = emoji.demojize(emojid)
assert constructed == destructed, "%s != %s" % (constructed, destructed)
+
def test_emoji_lis():
- assert emoji.emoji_lis("Hi, I am fine. 😁") == [{'location': 15, 'emoji': '😁'}]
+ assert emoji.emoji_lis("Hi, I am fine. 😁") == [{'location': 15, 'emoji': '😁'}]
assert emoji.emoji_lis("Hi") == []
+ assert emoji.emoji_lis("Hello 🇫🇷👌") == [{'emoji': '🇫🇷', 'location': 6}, {'emoji': '👌', 'location': 8}]
+
def test_emoji_count():
- assert emoji.emoji_count("Hi, I am fine. 😁") == 1
+ assert emoji.emoji_count("Hi, I am fine. 😁") == 1
assert emoji.emoji_count("Hi") == 0
+ assert emoji.emoji_count("Hello 🇫🇷👌") == 2
| emoji_lis() function doesn't work for some sequence emotes
Hi there, I'm noticing that in some circumstances, eg the UK nation flag emoji of Scotland, England and Wales, this function isn't spitting out the complete sequence. Instead, the function just provides a generic black flag emoji with none of the other codepoints that are apparently specified in the unicode_codesi.py file. Is this a bug and if so, how would I fix it? | 0.0 | d26371bd28d2c9307826574cf21b0d0bd47787d8 | [
"tests/test_core.py::test_emoji_lis",
"tests/test_core.py::test_emoji_count"
]
| [
"tests/test_core.py::test_emojize_name_only",
"tests/test_core.py::test_emojize_complicated_string",
"tests/test_core.py::test_emojize_invalid_emoji",
"tests/test_core.py::test_alias",
"tests/test_core.py::test_invalid_alias",
"tests/test_core.py::test_demojize_name_only",
"tests/test_core.py::test_demojize_complicated_string"
]
| {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2020-08-07 17:54:13+00:00 | bsd-3-clause | 1,505 |
|
carsonyl__pypac-28 | diff --git a/pypac/api.py b/pypac/api.py
index 6b8c089..4541be4 100644
--- a/pypac/api.py
+++ b/pypac/api.py
@@ -301,8 +301,9 @@ def pac_context_for_url(url, proxy_auth=None):
if pac:
resolver = ProxyResolver(pac, proxy_auth=proxy_auth)
proxies = resolver.get_proxy_for_requests(url)
- os.environ['HTTP_PROXY'] = proxies.get('http')
- os.environ['HTTPS_PROXY'] = proxies.get('https')
+ # Cannot set None for environ. (#27)
+ os.environ['HTTP_PROXY'] = proxies.get('http') or ''
+ os.environ['HTTPS_PROXY'] = proxies.get('https') or ''
yield
if prev_http_proxy:
os.environ['HTTP_PROXY'] = prev_http_proxy
| carsonyl/pypac | 835079168980a55868564e1dd3b784f5203e01ee | diff --git a/tests/test_api.py b/tests/test_api.py
index 2adb5fc..255da3b 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -368,3 +368,11 @@ class TestContextManager(object):
assert os.environ['HTTP_PROXY'] == fake_proxy_url
assert os.environ['HTTPS_PROXY'] == fake_proxy_url
assert os.environ['HTTP_PROXY'] == 'http://env'
+
+ def test_pac_direct(self, monkeypatch):
+ monkeypatch.setenv('HTTP_PROXY', 'http://env')
+ with _patch_get_pac(PACFile(proxy_pac_js_tpl % 'DIRECT')):
+ with pac_context_for_url(arbitrary_url):
+ assert os.environ['HTTP_PROXY'] == ''
+ assert os.environ['HTTPS_PROXY'] == ''
+ assert os.environ['HTTP_PROXY'] == 'http://env'
| "TypeError: putenv() argument 2 must be string, not None" Error when using pac_context_for_url
pypac version: 0.9.0 (retrieved from pip)
Python version: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)] on win32
When the configured pac file returns "DIRECT" for a URL then the function will fail with the following error:
```
File "file.py", line 27, in get_job_info
with pac_context_for_url('http://' + self.host):
File "C:\Python27\lib\contextlib.py", line 17, in __enter__
return self.gen.next()
File "C:\Python27\lib\site-packages\pypac\api.py", line 305, in pac_context_for_url
os.environ['HTTP_PROXY'] = proxies.get('http')
File "C:\Python27\lib\os.py", line 422, in __setitem__
putenv(key, item)
TypeError: putenv() argument 2 must be string, not None
```
In this case the pac file will return "DIRECT" for the url - the code gets to line 303
`proxies = resolver.get_proxy_for_requests(url)`
which will result in the code going to pypac/resolver.py:133 and the function proxy_parameter_for_requests("DIRECT") will be called. This will then set proxy_url_or_direct to None which will be used for the returns values in the dictionary for 'http' and 'https'. These value of None are used in
```
os.environ['HTTP_PROXY'] = proxies.get('http')
os.environ['HTTPS_PROXY'] = proxies.get('https')
```
Resulting in the error putenv() argument 2 must be string, not None | 0.0 | 835079168980a55868564e1dd3b784f5203e01ee | [
"tests/test_api.py::TestContextManager::test_pac_direct"
]
| [
"tests/test_api.py::TestApiFunctions::test_download_pac_not_ok",
"tests/test_api.py::TestApiFunctions::test_registry_filesystem_path",
"tests/test_api.py::TestApiFunctions::test_download_pac_timeout",
"tests/test_api.py::TestApiFunctions::test_download_pac_content_type[headers1-None-None]",
"tests/test_api.py::TestApiFunctions::test_get_pac_via_js",
"tests/test_api.py::TestApiFunctions::test_download_pac_content_type[headers0-None-function",
"tests/test_api.py::TestApiFunctions::test_collect_pac_urls",
"tests/test_api.py::TestApiFunctions::test_get_pac_autodetect",
"tests/test_api.py::TestApiFunctions::test_get_pac_via_url",
"tests/test_api.py::TestApiFunctions::test_download_pac_content_type[headers2-allowed_content_types2-function",
"tests/test_api.py::TestRequests::test_unreachable_proxy[127.0.0.1]",
"tests/test_api.py::TestRequests::test_environment_proxies[http://c.local/x.html-expected_proxies4-http://env]",
"tests/test_api.py::TestRequests::test_environment_proxies[http://a.local/x.html-expected_proxies0-None]",
"tests/test_api.py::TestRequests::test_unreachable_proxy[unreachable.local]",
"tests/test_api.py::TestRequests::test_unreachable_proxy[localhost]",
"tests/test_api.py::TestRequests::test_unreachable_proxy[0.0.0.0]",
"tests/test_api.py::TestRequests::test_environment_proxies[http://foo.b.local/x.html-expected_proxies2-None]",
"tests/test_api.py::TestRequests::test_environment_proxies[http://foob.local/x.html-expected_proxies3-http://env]",
"tests/test_api.py::TestRequests::test_environment_proxies[http://fooa.local/x.html-expected_proxies1-None]",
"tests/test_api.py::TestRequests::test_supercede_environment_settings",
"tests/test_api.py::TestRequests::test_override_env_proxy_and_go_direct",
"tests/test_api.py::TestContextManager::test_no_pac_no_env",
"tests/test_api.py::TestContextManager::test_no_pac",
"tests/test_api.py::TestContextManager::test_pac",
"tests/test_api.py::TestPACSession::test_default_behaviour_no_pac_found",
"tests/test_api.py::TestPACSession::test_pac_from_constructor",
"tests/test_api.py::TestPACSession::test_post_init_proxy_auth",
"tests/test_api.py::TestPACSession::test_pac_no_failover_available_exc_case",
"tests/test_api.py::TestPACSession::test_pac_failover_to_direct",
"tests/test_api.py::TestPACSession::test_default_behaviour_pac_found",
"tests/test_api.py::TestPACSession::test_pac_override_using_request_proxies_parameter",
"tests/test_api.py::TestPACSession::test_bad_proxy_no_failover[localhost]",
"tests/test_api.py::TestPACSession::test_failover_using_custom_response_filter",
"tests/test_api.py::TestPACSession::test_failover_using_custom_exception_criteria",
"tests/test_api.py::TestPACSession::test_pac_disabled",
"tests/test_api.py::TestPACSession::test_pac_failover",
"tests/test_api.py::TestPACSession::test_no_pac_but_call_get_pac_twice",
"tests/test_api.py::TestPACSession::test_pac_failover_to_direct_also_fails",
"tests/test_api.py::TestPACSession::test_bad_proxy_no_failover[unreachable.local]"
]
| {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | 2018-08-26 23:23:43+00:00 | apache-2.0 | 1,506 |
|
carsonyl__pypac-69 | diff --git a/HISTORY.rst b/HISTORY.rst
index 6c79d4d..8c0c6a0 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -1,3 +1,11 @@
+0.16.2 (2023-03-26)
+-------------------
+
+- Handle boolean args to ``isInNet()``. (#69)
+- Remove Python 3.5, 3.6 from CIB test matrix.
+- Windows Python 2.7 CIB: Pin to dukpy 0.2.3.
+
+
0.16.1 (2022-11-08)
-------------------
diff --git a/pypac/__init__.py b/pypac/__init__.py
index c66b7a1..5fe422d 100644
--- a/pypac/__init__.py
+++ b/pypac/__init__.py
@@ -1,26 +1,31 @@
-"""
-PyPAC: Proxy auto-config for Python
-===================================
-
-Copyright 2018 Carson Lam
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from pypac.api import get_pac, collect_pac_urls, download_pac, PACSession, pac_context_for_url
-
-
-__version__ = "0.16.1"
-
-
-__all__ = ["get_pac", "collect_pac_urls", "download_pac", "PACSession", "pac_context_for_url"]
+"""
+PyPAC: Proxy auto-config for Python
+===================================
+
+Copyright 2018 Carson Lam
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from pypac.api import (
+ PACSession,
+ collect_pac_urls,
+ download_pac,
+ get_pac,
+ pac_context_for_url,
+)
+
+__version__ = "0.16.2"
+
+
+__all__ = ["get_pac", "collect_pac_urls", "download_pac", "PACSession", "pac_context_for_url"]
diff --git a/pypac/parser_functions.py b/pypac/parser_functions.py
index 9b92a6d..ca3f813 100644
--- a/pypac/parser_functions.py
+++ b/pypac/parser_functions.py
@@ -58,6 +58,9 @@ def isInNet(host, pattern, mask):
:returns: True iff the IP address of the host matches the specified IP address pattern.
:rtype: bool
"""
+ host = str(host)
+ pattern = str(pattern)
+ mask = str(mask)
host_ip = host if is_ipv4_address(host) else dnsResolve(host)
if not host_ip or not is_ipv4_address(pattern) or not is_ipv4_address(mask):
return False
| carsonyl/pypac | 0e403cdb1bf633cbe63533975a29c20b04332d74 | diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 33a715d..adcdb6a 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -12,7 +12,7 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
- python-version: [2.7, 3.5, 3.6, 3.7, 3.8, 3.9, '3.10', 3.11]
+ python-version: [2.7, 3.7, 3.8, 3.9, '3.10', 3.11]
steps:
- uses: actions/checkout@v3
@@ -20,6 +20,10 @@ jobs:
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
+ - name: Install dukpy for Windows Python 2.7
+ if: matrix.os == 'windows-latest' && matrix.python-version == '2.7'
+ # Pin to final release that still published Python 2.7 binary wheels
+ run: pip install dukpy==0.2.3
- name: Install dependencies
run: |
python -m pip install --upgrade pip setuptools
diff --git a/tests/test_parser_functions.py b/tests/test_parser_functions.py
index a5b0388..d476958 100644
--- a/tests/test_parser_functions.py
+++ b/tests/test_parser_functions.py
@@ -1,25 +1,26 @@
-import pytest
from datetime import datetime
+import pytest
+
try:
from unittest.mock import patch
except ImportError:
- from mock import patch
+ from mock import patch # noqa
from pypac.parser_functions import (
+ alert,
+ dateRange,
dnsDomainIs,
- isResolvable,
- isInNet,
- dnsResolve,
dnsDomainLevels,
- weekdayRange,
- myIpAddress,
+ dnsResolve,
+ isInNet,
isPlainHostName,
+ isResolvable,
+ localHostOrDomainIs,
+ myIpAddress,
shExpMatch,
timeRange,
- localHostOrDomainIs,
- dateRange,
- alert,
+ weekdayRange,
)
@@ -55,6 +56,7 @@ def test_isResolvable(host, expected_value):
("192.168.1.100", "192.168.2.0", "255.255.255.0", False),
("google.com", "0.0.0.0", "0.0.0.0", True),
("google.com", "google.com", "foo", False),
+ (False, False, False, False),
],
)
def test_isInNet(host, pattern, mask, expected_value):
| Doesn't accept `https://antizapret.prostovpn.org/proxy.pac` as valid script
It raises `_dukpy.JSRuntimeError: EvalError: Error while calling Python Function: TypeError('inet_aton() argument 1 must be str, not bool')` when testing `self.find_proxy_for_url("/", "0.0.0.0")`. Removing the check locally allows the file to be correctly consumed by my code. Note that my other issue needs to be resolved before you could use the URL as is, or manually follow the redirect and use the end URL when working with the PAC file. | 0.0 | 0e403cdb1bf633cbe63533975a29c20b04332d74 | [
"tests/test_parser_functions.py::test_isInNet[False-False-False-False]"
]
| [
"tests/test_parser_functions.py::test_dnsDomainIs[www.netscape.com-.netscape.com-True]",
"tests/test_parser_functions.py::test_dnsDomainIs[www.netscape.com-NETSCAPE.com-True]",
"tests/test_parser_functions.py::test_dnsDomainIs[www-.netscape.com-False]",
"tests/test_parser_functions.py::test_dnsDomainIs[www.mcom.com-.netscape.com-False]",
"tests/test_parser_functions.py::test_isResolvable[www.google.com-True]",
"tests/test_parser_functions.py::test_isResolvable[bogus.domain.foobar-False]",
"tests/test_parser_functions.py::test_isInNet[198.95.249.79-198.95.249.79-255.255.255.255-True]",
"tests/test_parser_functions.py::test_isInNet[198.95.6.8-198.95.0.0-255.255.0.0-True]",
"tests/test_parser_functions.py::test_isInNet[192.168.1.100-192.168.2.0-255.255.255.0-False]",
"tests/test_parser_functions.py::test_isInNet[google.com-0.0.0.0-0.0.0.0-True]",
"tests/test_parser_functions.py::test_isInNet[google.com-google.com-foo-False]",
"tests/test_parser_functions.py::test_localHostOrDomainIs[www.netscape.com-www.netscape.com-True]",
"tests/test_parser_functions.py::test_localHostOrDomainIs[www-www.netscape.com-True]",
"tests/test_parser_functions.py::test_localHostOrDomainIs[www.mcom.com-www.netscape.com-False]",
"tests/test_parser_functions.py::test_localHostOrDomainIs[home.netscape.com-www.netscape.com-False]",
"tests/test_parser_functions.py::test_myIpAddress",
"tests/test_parser_functions.py::test_dnsResolve",
"tests/test_parser_functions.py::test_dnsDomainLevels[www-0]",
"tests/test_parser_functions.py::test_dnsDomainLevels[www.netscape.com-2]",
"tests/test_parser_functions.py::test_isPlainHostName[www-True]",
"tests/test_parser_functions.py::test_isPlainHostName[www.netscape.com-False]",
"tests/test_parser_functions.py::test_shExpMatch[http://home.netscape.com/people/ari/index.html-*/ari/*-True]",
"tests/test_parser_functions.py::test_shExpMatch[http://home.netscape.com/people/montulli/index.html-*/ari/*-False]",
"tests/test_parser_functions.py::test_weekdayRange[args0-True]",
"tests/test_parser_functions.py::test_weekdayRange[args1-False]",
"tests/test_parser_functions.py::test_weekdayRange[args2-False]",
"tests/test_parser_functions.py::test_weekdayRange[args3-True]",
"tests/test_parser_functions.py::test_weekdayRange[args4-True]",
"tests/test_parser_functions.py::test_dateRange[args0-True]",
"tests/test_parser_functions.py::test_dateRange[args1-True]",
"tests/test_parser_functions.py::test_dateRange[args2-True]",
"tests/test_parser_functions.py::test_dateRange[args3-True]",
"tests/test_parser_functions.py::test_dateRange[args4-True]",
"tests/test_parser_functions.py::test_dateRange[args5-False]",
"tests/test_parser_functions.py::test_dateRange[args6-True]",
"tests/test_parser_functions.py::test_dateRange[args7-False]",
"tests/test_parser_functions.py::test_dateRange[args8-True]",
"tests/test_parser_functions.py::test_dateRange[args9-True]",
"tests/test_parser_functions.py::test_dateRange[args10-True]",
"tests/test_parser_functions.py::test_dateRange[args11-False]",
"tests/test_parser_functions.py::test_dateRange[args12-True]",
"tests/test_parser_functions.py::test_dateRange[args13-True]",
"tests/test_parser_functions.py::test_dateRange[args14-False]",
"tests/test_parser_functions.py::test_dateRange[args15-False]",
"tests/test_parser_functions.py::test_timeRange[args0-True]",
"tests/test_parser_functions.py::test_timeRange[args1-True]",
"tests/test_parser_functions.py::test_timeRange[args2-True]",
"tests/test_parser_functions.py::test_timeRange[args3-False]",
"tests/test_parser_functions.py::test_timeRange[args4-True]",
"tests/test_parser_functions.py::test_timeRange[args5-True]",
"tests/test_parser_functions.py::test_timeRange[args6-False]",
"tests/test_parser_functions.py::test_alert"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2023-03-24 09:26:05+00:00 | apache-2.0 | 1,507 |
|
casbin__pycasbin-53 | diff --git a/README.md b/README.md
index 13ab133..4e329fd 100644
--- a/README.md
+++ b/README.md
@@ -23,7 +23,7 @@ production-ready | production-ready | production-ready | production-ready
[](https://github.com/casbin/pycasbin) | [](https://github.com/casbin-net/Casbin.NET) | [](https://github.com/casbin4d/Casbin4D) | [](https://github.com/casbin/casbin-rs)
----|----|----|----
[PyCasbin](https://github.com/casbin/pycasbin) | [Casbin.NET](https://github.com/casbin-net/Casbin.NET) | [Casbin4D](https://github.com/casbin4d/Casbin4D) | [Casbin-RS](https://github.com/casbin/casbin-rs)
-production-ready | production-ready | experimental | WIP
+production-ready | production-ready | experimental | production-ready
## Table of contents
diff --git a/casbin/enforcer.py b/casbin/enforcer.py
index deec151..ee8d0e7 100644
--- a/casbin/enforcer.py
+++ b/casbin/enforcer.py
@@ -1,11 +1,9 @@
-import copy
from casbin.management_enforcer import ManagementEnforcer
-from functools import reduce
-
+from casbin.util import join_slice, set_subtract
class Enforcer(ManagementEnforcer):
"""
- Enforcer = ManagementEnforcer + RBAC
+ Enforcer = ManagementEnforcer + RBAC_API + RBAC_WITH_DOMAIN_API
"""
"""creates an enforcer via file or DB.
@@ -17,142 +15,215 @@ class Enforcer(ManagementEnforcer):
e = casbin.Enforcer("path/to/basic_model.conf", a)
"""
- def get_roles_for_user(self, user):
- """gets the roles that a user has."""
- return self.model.model['g']['g'].rm.get_roles(user)
-
- def get_roles_for_user_in_domain(self, name, domain):
- """gets the roles that a user has inside a domain."""
- return self.model.model['g']['g'].rm.get_roles(name, domain)
+ def get_roles_for_user(self, name):
+ """ gets the roles that a user has. """
+ return self.model.model["g"]["g"].rm.get_roles(name)
- def get_users_for_role(self, role):
- """gets the users that has a role."""
- return self.model.model['g']['g'].rm.get_users(role)
+ def get_users_for_role(self, name):
+ """ gets the users that has a role. """
+ return self.model.model["g"]["g"].rm.get_users(name)
- def get_users_for_role_in_domain(self, name, domain):
- """gets the users that has a role inside a domain."""
- return self.model.model['g']['g'].rm.get_users(name, domain)
+ def has_role_for_user(self, name, role):
+ """ determines whether a user has a role. """
+ roles = self.get_roles_for_user(name)
- def has_role_for_user(self, user, role):
- """determines whether a user has a role."""
- roles = self.get_roles_for_user(user)
+ hasRole = False
+ for r in roles:
+ if r == role:
+ hasRole = True
+ break
- return role in roles
+ return hasRole
def add_role_for_user(self, user, role):
- """adds a role for a user."""
- """Returns false if the user already has the role (aka not affected)."""
+ """
+ adds a role for a user.
+ Returns false if the user already has the role (aka not affected).
+ """
return self.add_grouping_policy(user, role)
- def add_role_for_user_in_domain(self, user, role, domain):
- """adds a role for a user inside a domain."""
- """Returns false if the user already has the role (aka not affected)."""
- return self.add_grouping_policy(user, role, domain)
-
def delete_role_for_user(self, user, role):
- """deletes a role for a user."""
- """Returns false if the user does not have the role (aka not affected)."""
+ """
+ deletes a role for a user.
+ Returns false if the user does not have the role (aka not affected).
+ """
return self.remove_grouping_policy(user, role)
def delete_roles_for_user(self, user):
- """deletes all roles for a user."""
- """Returns false if the user does not have any roles (aka not affected)."""
+ """
+ deletes all roles for a user.
+ Returns false if the user does not have any roles (aka not affected).
+ """
return self.remove_filtered_grouping_policy(0, user)
- def delete_roles_for_user_in_domain(self, user, role, domain):
- """deletes a role for a user inside a domain."""
- """Returns false if the user does not have any roles (aka not affected)."""
- return self.remove_filtered_grouping_policy(0, user, role, domain)
-
def delete_user(self, user):
- """deletes a user."""
- """Returns false if the user does not exist (aka not affected)."""
- return self.remove_filtered_grouping_policy(0, user)
+ """
+ deletes a user.
+ Returns false if the user does not exist (aka not affected).
+ """
+ res1 = self.remove_filtered_grouping_policy(0, user)
+
+ res2 = self.remove_filtered_policy(0, user)
+ return res1 or res2
def delete_role(self, role):
- """deletes a role."""
- self.remove_filtered_grouping_policy(1, role)
- self.remove_filtered_policy(0, role)
+ """
+ deletes a role.
+ Returns false if the role does not exist (aka not affected).
+ """
+ res1 = self.remove_filtered_grouping_policy(1, role)
+
+ res2 = self.remove_filtered_policy(0, role)
+ return res1 or res2
def delete_permission(self, *permission):
- """deletes a permission."""
- """Returns false if the permission does not exist (aka not affected)."""
+ """
+ deletes a permission.
+ Returns false if the permission does not exist (aka not affected).
+ """
return self.remove_filtered_policy(1, *permission)
def add_permission_for_user(self, user, *permission):
- """adds a permission for a user or role."""
- """Returns false if the user or role already has the permission (aka not affected)."""
- params = [user]
- params.extend(permission)
-
- return self.add_policy(*params)
+ """
+ adds a permission for a user or role.
+ Returns false if the user or role already has the permission (aka not affected).
+ """
+ return self.add_policy(join_slice(user, *permission))
def delete_permission_for_user(self, user, *permission):
- """adds a permission for a user or role."""
- """Returns false if the user or role already has the permission (aka not affected)."""
- params = [user]
- params.extend(permission)
-
- return self.remove_policy(*params)
+ """
+ deletes a permission for a user or role.
+ Returns false if the user or role does not have the permission (aka not affected).
+ """
+ return self.remove_policy(join_slice(user, *permission))
def delete_permissions_for_user(self, user):
- """deletes permissions for a user or role."""
- """Returns false if the user or role does not have any permissions (aka not affected)."""
+ """
+ deletes permissions for a user or role.
+ Returns false if the user or role does not have any permissions (aka not affected).
+ """
return self.remove_filtered_policy(0, user)
def get_permissions_for_user(self, user):
- """gets permissions for a user or role."""
+ """
+ gets permissions for a user or role.
+ """
return self.get_filtered_policy(0, user)
- def get_permissions_for_user_in_domain(self, user, domain):
- """gets permissions for a user or role inside domain."""
- return self.get_filtered_policy(0, user, domain)
-
def has_permission_for_user(self, user, *permission):
- """determines whether a user has a permission."""
- params = [user]
- params.extend(permission)
-
- return self.has_policy(*params)
-
- def get_implicit_roles_for_user(self, user, domain=None):
"""
- get_implicit_roles_for_user gets implicit roles that a user has.
- Compared to get_roles_for_user(), this function retrieves indirect roles besides direct roles.
- For example:
- g, alice, role:admin
- g, role:admin, role:user
+ determines whether a user has a permission.
+ """
+ return self.has_policy(join_slice(user, *permission))
- get_roles_for_user("alice") can only get: ["role:admin"].
- But get_implicit_roles_for_user("alice") will get: ["role:admin", "role:user"].
+ def get_implicit_roles_for_user(self, name, *domain):
"""
- roles = self.get_roles_for_user_in_domain(user, domain) if domain else self.get_roles_for_user(user)
- res = copy.copy(roles)
- for r in roles:
- _roles = self.get_roles_for_user_in_domain(r, domain) if domain else self.get_roles_for_user(r)
- res.extend(_roles)
+ gets implicit roles that a user has.
+ Compared to get_roles_for_user(), this function retrieves indirect roles besides direct roles.
+ For example:
+ g, alice, role:admin
+ g, role:admin, role:user
+
+ get_roles_for_user("alice") can only get: ["role:admin"].
+ But get_implicit_roles_for_user("alice") will get: ["role:admin", "role:user"].
+ """
+ res = list()
+ roleSet = dict()
+ roleSet[name] = True
+
+ q = list()
+ q.append(name)
+
+ while len(q) > 0:
+ name = q[0]
+ q = q[1:]
+
+ roles = self.rm.get_roles(name, *domain)
+ for r in roles:
+ if r not in roleSet:
+ res.append(r)
+ q.append(r)
+ roleSet[r] = True
+
return res
- def get_implicit_permissions_for_user(self, user, domain=None):
+ def get_implicit_permissions_for_user(self, user, *domain):
"""
- gets implicit permissions for a user or role.
- Compared to get_permissions_for_user(), this function retrieves permissions for inherited roles.
- For example:
- p, admin, data1, read
- p, alice, data2, read
- g, alice, admin
-
- get_permissions_for_user("alice") can only get: [["alice", "data2", "read"]].
- But get_implicit_permissions_for_user("alice") will get: [["admin", "data1", "read"], ["alice", "data2", "read"]].
+ gets implicit permissions for a user or role.
+ Compared to get_permissions_for_user(), this function retrieves permissions for inherited roles.
+ For example:
+ p, admin, data1, read
+ p, alice, data2, read
+ g, alice, admin
+
+ get_permissions_for_user("alice") can only get: [["alice", "data2", "read"]].
+ But get_implicit_permissions_for_user("alice") will get: [["admin", "data1", "read"], ["alice", "data2", "read"]].
"""
- roles = self.get_implicit_roles_for_user(user, domain)
- permissions = self.get_permissions_for_user_in_domain(user,
- domain) if domain else self.get_permissions_for_user(user)
+ roles = self.get_implicit_roles_for_user(user, *domain)
+
+ roles.insert(0, user)
+
+ withDomain = False
+ if len(domain) == 1:
+ withDomain = True
+ elif len(domain) > 1:
+ return None
+
+ res = []
+ permissions = [list()]
for role in roles:
- _permissions = self.get_permissions_for_user_in_domain(role,
- domain) if domain else self.get_permissions_for_user(
- role)
- for item in _permissions:
- if item not in permissions:
- permissions.append(item)
- return permissions
+ if withDomain:
+ permissions = self.get_permissions_for_user_in_domain(role, domain[0])
+ else:
+ permissions = self.get_permissions_for_user(role)
+ res.extend(permissions)
+
+ return res
+
+ def get_implicit_users_for_permission(self, *permission):
+ """
+ gets implicit users for a permission.
+ For example:
+ p, admin, data1, read
+ p, bob, data1, read
+ g, alice, admin
+
+ get_implicit_users_for_permission("data1", "read") will get: ["alice", "bob"].
+ Note: only users will be returned, roles (2nd arg in "g") will be excluded.
+ """
+ subjects = self.get_all_subjects()
+ roles = self.get_all_roles()
+
+ users = set_subtract(subjects, roles)
+
+ res = list()
+ for user in users:
+ req = join_slice(user, *permission)
+ allowed = self.enforce(*req)
+
+ if allowed:
+ res.append(user)
+
+ return res
+
+ def get_roles_for_user_in_domain(self, name, domain):
+ """gets the roles that a user has inside a domain."""
+ return self.model.model['g']['g'].rm.get_roles(name, domain)
+
+ def get_users_for_role_in_domain(self, name, domain):
+ """gets the users that has a role inside a domain."""
+ return self.model.model['g']['g'].rm.get_users(name, domain)
+
+ def add_role_for_user_in_domain(self, user, role, domain):
+ """adds a role for a user inside a domain."""
+ """Returns false if the user already has the role (aka not affected)."""
+ return self.add_grouping_policy(user, role, domain)
+
+ def delete_roles_for_user_in_domain(self, user, role, domain):
+ """deletes a role for a user inside a domain."""
+ """Returns false if the user does not have any roles (aka not affected)."""
+ return self.remove_filtered_grouping_policy(0, user, role, domain)
+
+ def get_permissions_for_user_in_domain(self, user, domain):
+ """gets permissions for a user or role inside domain."""
+ return self.get_filtered_policy(0, user, domain)
\ No newline at end of file
diff --git a/casbin/util/util.py b/casbin/util/util.py
index eb001b2..96aefc5 100644
--- a/casbin/util/util.py
+++ b/casbin/util/util.py
@@ -35,3 +35,25 @@ def params_to_string(*s):
"""gets a printable string for variable number of parameters."""
return ", ".join(s)
+
+def join_slice(a, *b):
+ ''' joins a string and a slice into a new slice.'''
+ res = [a]
+
+ res.extend(b)
+
+ return res
+
+def set_subtract(a, b):
+ ''' returns the elements in `a` that aren't in `b`. '''
+ mb = dict()
+
+ for x in b:
+ mb[x] = True
+
+ diff = list()
+ for x in a:
+ if x not in mb:
+ diff.append(x)
+
+ return diff
| casbin/pycasbin | 4d228a319768ea38657d5f1a37835aee8a2ec5c1 | diff --git a/tests/test_rbac_api.py b/tests/test_rbac_api.py
index 5a82984..90bdf9f 100644
--- a/tests/test_rbac_api.py
+++ b/tests/test_rbac_api.py
@@ -205,3 +205,11 @@ class TestRbacApi(TestCase):
self.assertEqual(e.get_roles_for_user_in_domain('bob', 'domain2'), ['admin'])
self.assertEqual(e.get_roles_for_user_in_domain('admin', 'domain2'), [])
self.assertEqual(e.get_roles_for_user_in_domain('non_exist', 'domain2'), [])
+
+ def test_implicit_user_api(self):
+ e = get_enforcer(get_examples("rbac_model.conf"),get_examples("rbac_with_hierarchy_policy.csv"))
+
+ self.assertEqual(["alice"], e.get_implicit_users_for_permission("data1", "read"))
+ self.assertEqual(["alice"], e.get_implicit_users_for_permission("data1", "write"))
+ self.assertEqual(["alice"], e.get_implicit_users_for_permission("data2", "read"))
+ self.assertEqual(["alice", "bob"], e.get_implicit_users_for_permission("data2", "write"))
\ No newline at end of file
| Bringing pycasbin in track with casbin-core.
I am sure there must be some features or functionalities which are not in track with casbin-core, why should we not track those features and bring them here and implement it. | 0.0 | 4d228a319768ea38657d5f1a37835aee8a2ec5c1 | [
"tests/test_rbac_api.py::TestRbacApi::test_implicit_user_api"
]
| [
"tests/test_rbac_api.py::TestRbacApi::test_add_permission_for_user",
"tests/test_rbac_api.py::TestRbacApi::test_add_role_for_user",
"tests/test_rbac_api.py::TestRbacApi::test_delete_permission",
"tests/test_rbac_api.py::TestRbacApi::test_delete_permission_for_user",
"tests/test_rbac_api.py::TestRbacApi::test_delete_permissions_for_user",
"tests/test_rbac_api.py::TestRbacApi::test_delete_role",
"tests/test_rbac_api.py::TestRbacApi::test_delete_role_for_user",
"tests/test_rbac_api.py::TestRbacApi::test_delete_roles_for_user",
"tests/test_rbac_api.py::TestRbacApi::test_delete_user",
"tests/test_rbac_api.py::TestRbacApi::test_enforce_get_roles_with_domain",
"tests/test_rbac_api.py::TestRbacApi::test_enforce_get_users_in_domain",
"tests/test_rbac_api.py::TestRbacApi::test_enforce_implicit_permissions_api",
"tests/test_rbac_api.py::TestRbacApi::test_enforce_implicit_permissions_api_with_domain",
"tests/test_rbac_api.py::TestRbacApi::test_enforce_implicit_roles_api",
"tests/test_rbac_api.py::TestRbacApi::test_enforce_implicit_roles_with_domain",
"tests/test_rbac_api.py::TestRbacApi::test_enforce_user_api_with_domain",
"tests/test_rbac_api.py::TestRbacApi::test_get_permissions_for_user",
"tests/test_rbac_api.py::TestRbacApi::test_get_roles_for_user",
"tests/test_rbac_api.py::TestRbacApi::test_get_users_for_role",
"tests/test_rbac_api.py::TestRbacApi::test_has_permission_for_user",
"tests/test_rbac_api.py::TestRbacApi::test_has_role_for_user"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2020-04-15 14:18:51+00:00 | apache-2.0 | 1,508 |
|
castle__castle-python-29 | diff --git a/HISTORY.md b/HISTORY.md
index c705403..e66e5a4 100644
--- a/HISTORY.md
+++ b/HISTORY.md
@@ -1,3 +1,8 @@
+## master
+
+### Breaking Changes:
+- switched configuration request_timeout from seconds to milliseconds
+
## 2.0.0 (2018-02-09)
### Features:
diff --git a/README.rst b/README.rst
index 5fc110f..1436405 100644
--- a/README.rst
+++ b/README.rst
@@ -29,8 +29,8 @@ import and configure the library with your Castle API secret.
# For authenticate method you can set failover strategies: allow(default), deny, challenge, throw
configuration.failover_strategy = 'deny'
- # Castle::RequestError is raised when timing out in seconds (default: 0.5 of the second)
- configuration.request_timeout = 1
+ # Castle::RequestError is raised when timing out in milliseconds (default: 500 milliseconds)
+ configuration.request_timeout = 1000
# Whitelisted and Blacklisted headers are case insensitive and allow to use _ and - as a separator, http prefixes are removed
# Whitelisted headers
diff --git a/castle/configuration.py b/castle/configuration.py
index 1a9a6dc..c406191 100644
--- a/castle/configuration.py
+++ b/castle/configuration.py
@@ -19,7 +19,7 @@ WHITELISTED = [
BLACKLISTED = ['HTTP_COOKIE']
# 500 milliseconds
-REQUEST_TIMEOUT = 0.5
+REQUEST_TIMEOUT = 500
FAILOVER_STRATEGIES = ['allow', 'deny', 'challenge', 'throw']
diff --git a/castle/request.py b/castle/request.py
index 7e38c79..230787c 100644
--- a/castle/request.py
+++ b/castle/request.py
@@ -13,7 +13,7 @@ class Request(object):
method,
self.build_url(path),
auth=('', configuration.api_secret),
- timeout=configuration.request_timeout,
+ timeout=configuration.request_timeout / 1000.0,
headers=self.headers,
verify=Request.verify(),
data=None if params is None else json.dumps(params)
| castle/castle-python | 288780af56d67af5757c6b0d17a74a24c29a9a60 | diff --git a/castle/test/configuration_test.py b/castle/test/configuration_test.py
index 67fd056..6c80f05 100644
--- a/castle/test/configuration_test.py
+++ b/castle/test/configuration_test.py
@@ -15,7 +15,7 @@ class ConfigurationTestCase(unittest.TestCase):
HeadersFormatter.call(v) for v in WHITELISTED])
self.assertEqual(config.blacklisted, [
HeadersFormatter.call(v) for v in BLACKLISTED])
- self.assertEqual(config.request_timeout, 0.5)
+ self.assertEqual(config.request_timeout, 500)
self.assertEqual(config.failover_strategy, 'allow')
def test_api_secret_setter(self):
@@ -70,8 +70,8 @@ class ConfigurationTestCase(unittest.TestCase):
def test_request_timeout_setter(self):
config = Configuration()
- config.request_timeout = 5
- self.assertEqual(config.request_timeout, 5.0)
+ config.request_timeout = 5000
+ self.assertEqual(config.request_timeout, 5000)
def test_failover_strategy_setter_valid(self):
config = Configuration()
| change timeout configuration from seconds to miliseconds | 0.0 | 288780af56d67af5757c6b0d17a74a24c29a9a60 | [
"castle/test/configuration_test.py::ConfigurationTestCase::test_default_values"
]
| [
"castle/test/configuration_test.py::ConfigurationTestCase::test_api_secret_setter",
"castle/test/configuration_test.py::ConfigurationTestCase::test_blacklisted_setter_empty",
"castle/test/configuration_test.py::ConfigurationTestCase::test_blacklisted_setter_list",
"castle/test/configuration_test.py::ConfigurationTestCase::test_blacklisted_setter_none",
"castle/test/configuration_test.py::ConfigurationTestCase::test_failover_strategy_setter_invalid",
"castle/test/configuration_test.py::ConfigurationTestCase::test_failover_strategy_setter_valid",
"castle/test/configuration_test.py::ConfigurationTestCase::test_host_setter",
"castle/test/configuration_test.py::ConfigurationTestCase::test_port_setter",
"castle/test/configuration_test.py::ConfigurationTestCase::test_request_timeout_setter",
"castle/test/configuration_test.py::ConfigurationTestCase::test_url_prefix_setter",
"castle/test/configuration_test.py::ConfigurationTestCase::test_whitelisted_setter_empty",
"castle/test/configuration_test.py::ConfigurationTestCase::test_whitelisted_setter_list",
"castle/test/configuration_test.py::ConfigurationTestCase::test_whitelisted_setter_none"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2018-02-09 13:40:37+00:00 | mit | 1,509 |
|
catalystneuro__roiextractors-127 | diff --git a/src/roiextractors/multiimagingextractor.py b/src/roiextractors/multiimagingextractor.py
new file mode 100644
index 0000000..8dbc144
--- /dev/null
+++ b/src/roiextractors/multiimagingextractor.py
@@ -0,0 +1,112 @@
+from collections import defaultdict
+from typing import Tuple, List, Iterable, Optional
+
+import numpy as np
+
+from .extraction_tools import ArrayType, NumpyArray, check_get_frames_args
+from .imagingextractor import ImagingExtractor
+
+
+class MultiImagingExtractor(ImagingExtractor):
+ """
+ This class is used to combine multiple ImagingExtractor objects by frames.
+ """
+
+ extractor_name = "MultiImagingExtractor"
+ installed = True
+ installation_mesg = ""
+
+ def __init__(self, imaging_extractors: List[ImagingExtractor]):
+ """
+ Parameters
+ ----------
+ imaging_extractors: list of ImagingExtractor
+ list of imaging extractor objects
+ """
+ super().__init__()
+ assert isinstance(imaging_extractors, list), "Enter a list of ImagingExtractor objects as argument"
+ assert all(isinstance(IX, ImagingExtractor) for IX in imaging_extractors)
+ self._imaging_extractors = imaging_extractors
+
+ # Checks that properties are consistent between extractors
+ self._check_consistency_between_imaging_extractors()
+
+ self._start_frames, self._end_frames = [], []
+ num_frames = 0
+ for imaging_extractor in self._imaging_extractors:
+ self._start_frames.append(num_frames)
+ num_frames = num_frames + imaging_extractor.get_num_frames()
+ self._end_frames.append(num_frames)
+ self._num_frames = num_frames
+
+ if any((getattr(imaging_extractor, "_times") is not None for imaging_extractor in self._imaging_extractors)):
+ times = self._get_times()
+ self.set_times(times=times)
+
+ def _check_consistency_between_imaging_extractors(self):
+ properties_to_check = dict(
+ get_sampling_frequency="The sampling frequency",
+ get_image_size="The size of a frame",
+ get_num_channels="The number of channels",
+ get_channel_names="The name of the channels",
+ )
+ for method, property_message in properties_to_check.items():
+ values = [getattr(extractor, method, None)() for extractor in self._imaging_extractors]
+ unique_values = set(tuple(v) if isinstance(v, Iterable) else v for v in values)
+ assert (
+ len(unique_values) == 1
+ ), f"{property_message} is not consistent over the files (found {unique_values})."
+
+ def _get_times(self):
+ frame_indices = np.array([*range(self._start_frames[0], self._end_frames[-1])])
+ times = self.frame_to_time(frames=frame_indices)
+
+ for extractor_index, extractor in enumerate(self._imaging_extractors):
+ if getattr(extractor, "_times") is not None:
+ to_replace = [*range(self._start_frames[extractor_index], self._end_frames[extractor_index])]
+ times[to_replace] = extractor._times
+
+ return times
+
+ @check_get_frames_args
+ def get_frames(self, frame_idxs: ArrayType, channel: Optional[int] = 0) -> NumpyArray:
+ extractor_indices = np.searchsorted(self._end_frames, frame_idxs, side="right")
+ relative_frame_indices = frame_idxs - np.array(self._start_frames)[extractor_indices]
+ # Match frame_idxs to imaging extractors
+ extractors_dict = defaultdict(list)
+ for extractor_index, frame_index in zip(extractor_indices, relative_frame_indices):
+ extractors_dict[extractor_index].append(frame_index)
+
+ frames_to_concatenate = []
+ # Extract frames for each extractor and concatenate
+ for extractor_index, frame_indices in extractors_dict.items():
+ frames_for_each_extractor = self._get_frames_from_an_imaging_extractor(
+ extractor_index=extractor_index,
+ frame_idxs=frame_indices,
+ )
+ if len(frame_indices) == 1:
+ frames_for_each_extractor = frames_for_each_extractor[np.newaxis, ...]
+ frames_to_concatenate.append(frames_for_each_extractor)
+
+ frames = np.concatenate(frames_to_concatenate, axis=0).squeeze()
+ return frames
+
+ def _get_frames_from_an_imaging_extractor(self, extractor_index: int, frame_idxs: ArrayType) -> NumpyArray:
+ imaging_extractor = self._imaging_extractors[extractor_index]
+ frames = imaging_extractor.get_frames(frame_idxs=frame_idxs)
+ return frames
+
+ def get_image_size(self) -> Tuple:
+ return self._imaging_extractors[0].get_image_size()
+
+ def get_num_frames(self) -> int:
+ return self._num_frames
+
+ def get_sampling_frequency(self) -> float:
+ return self._imaging_extractors[0].get_sampling_frequency()
+
+ def get_channel_names(self) -> list:
+ return self._imaging_extractors[0].get_channel_names()
+
+ def get_num_channels(self) -> int:
+ return self._imaging_extractors[0].get_num_channels()
| catalystneuro/roiextractors | cd818a22e540060633683fbc6d7d36e39c593dd7 | diff --git a/tests/test_internals/test_multiimagingextractor.py b/tests/test_internals/test_multiimagingextractor.py
new file mode 100644
index 0000000..e1d4dbc
--- /dev/null
+++ b/tests/test_internals/test_multiimagingextractor.py
@@ -0,0 +1,150 @@
+import unittest
+
+import numpy as np
+from hdmf.testing import TestCase
+from numpy.testing import assert_array_equal
+from parameterized import parameterized, param
+
+from roiextractors.multiimagingextractor import MultiImagingExtractor
+from roiextractors.testing import generate_dummy_imaging_extractor
+
+
+class TestMultiImagingExtractor(TestCase):
+ extractors = None
+
+ @classmethod
+ def setUpClass(cls):
+ cls.extractors = [
+ generate_dummy_imaging_extractor(num_frames=10, num_rows=3, num_columns=4, sampling_frequency=20.0)
+ for _ in range(3)
+ ]
+ cls.multi_imaging_extractor = MultiImagingExtractor(imaging_extractors=cls.extractors)
+
+ def test_get_image_size(self):
+ assert self.multi_imaging_extractor.get_image_size() == self.extractors[0].get_image_size()
+
+ def test_get_num_frames(self):
+ assert self.multi_imaging_extractor.get_num_frames() == 30
+
+ def test_get_sampling_frequency(self):
+ assert self.multi_imaging_extractor.get_sampling_frequency() == 20.0
+
+ def test_get_channel_names(self):
+ assert self.multi_imaging_extractor.get_channel_names() == ["channel_num_0"]
+
+ def test_get_num_channels(self):
+ assert self.multi_imaging_extractor.get_num_channels() == 1
+
+ def test_get_frames_assertion(self):
+ with self.assertRaisesWith(exc_type=AssertionError, exc_msg="'frame_idxs' exceed number of frames"):
+ self.multi_imaging_extractor.get_frames(frame_idxs=[31])
+
+ def test_get_non_consecutive_frames(self):
+ test_frames = self.multi_imaging_extractor.get_frames(frame_idxs=[8, 10, 12, 15, 20, 29])
+ expected_frames = np.concatenate(
+ (
+ self.extractors[0].get_frames(frame_idxs=[8])[np.newaxis, ...],
+ self.extractors[1].get_frames(frame_idxs=[0, 2, 5]),
+ self.extractors[2].get_frames(frame_idxs=[0, 9]),
+ ),
+ axis=0,
+ )
+ assert_array_equal(test_frames, expected_frames)
+
+ def test_get_consecutive_frames(self):
+ test_frames = self.multi_imaging_extractor.get_frames(frame_idxs=np.arange(16, 22))
+ expected_frames = np.concatenate(
+ (
+ self.extractors[1].get_frames(frame_idxs=np.arange(6, 10)),
+ self.extractors[2].get_frames(frame_idxs=[0, 1]),
+ ),
+ axis=0,
+ )
+
+ assert_array_equal(test_frames, expected_frames)
+
+ def test_get_all_frames(self):
+ test_frames = self.multi_imaging_extractor.get_frames(frame_idxs=np.arange(0, 30))
+ expected_frames = np.concatenate(
+ [extractor.get_frames(np.arange(0, 10)) for extractor in self.extractors],
+ axis=0,
+ )
+
+ assert_array_equal(test_frames, expected_frames)
+
+ def test_get_video(self):
+ test_frames = self.multi_imaging_extractor.get_video()
+ expected_frames = np.concatenate(
+ [self.extractors[i].get_video() for i in range(3)],
+ axis=0,
+ )
+ assert_array_equal(test_frames, expected_frames)
+
+ def test_set_incorrect_times(self):
+ with self.assertRaisesWith(
+ exc_type=AssertionError,
+ exc_msg="'times' should have the same length of the number of frames!",
+ ):
+ self.multi_imaging_extractor.set_times(times=np.arange(0, 10) / 30.0)
+
+ self.assertEqual(self.multi_imaging_extractor._times, None)
+
+ def test_set_times(self):
+ self.extractors[1].set_times(np.arange(0, 10) / 30.0)
+ multi_imaging_extractor = MultiImagingExtractor(imaging_extractors=self.extractors)
+
+ dummy_times = np.arange(0, 30) / 20.0
+ to_replace = [*range(multi_imaging_extractor._start_frames[1], multi_imaging_extractor._end_frames[1])]
+
+ dummy_times[to_replace] = self.extractors[1]._times
+ assert_array_equal(multi_imaging_extractor._times, dummy_times)
+
+ self.multi_imaging_extractor.set_times(times=dummy_times)
+
+ assert_array_equal(self.multi_imaging_extractor._times, dummy_times)
+
+ @parameterized.expand(
+ [
+ param(
+ rows=3,
+ columns=4,
+ sampling_frequency=15.0,
+ num_channels=1,
+ expected_error_msg="The sampling frequency is not consistent over the files (found {20.0, 15.0}).",
+ ),
+ param(
+ rows=3,
+ columns=5,
+ sampling_frequency=20.0,
+ num_channels=1,
+ expected_error_msg="The size of a frame is not consistent over the files (found {(3, 4), (3, 5)}).",
+ ),
+ param(
+ rows=3,
+ columns=4,
+ sampling_frequency=20.0,
+ num_channels=2,
+ expected_error_msg="The number of channels is not consistent over the files (found {1, 2}).",
+ ),
+ ],
+ )
+ def test_inconsistent_property_assertion(self, rows, columns, sampling_frequency, num_channels, expected_error_msg):
+ inconsistent_extractors = [
+ self.extractors[0],
+ generate_dummy_imaging_extractor(
+ num_frames=1,
+ num_rows=rows,
+ num_columns=columns,
+ sampling_frequency=sampling_frequency,
+ num_channels=num_channels,
+ ),
+ ]
+ with self.assertRaisesWith(
+ exc_type=AssertionError,
+ exc_msg=expected_error_msg,
+ ):
+ MultiImagingExtractor(imaging_extractors=inconsistent_extractors)
+
+
+if __name__ == "__main__":
+ unittest.main()
| Add MultiImagingExtractor
We have a MultiSegmentationExtractor, but no MultiImagingExtractor. @weiglszonja needs this for the Fee lab project in order to splice together a series of separate ImagingExtractors into one continuous ImagingExtractor to be written to the same TwoPhotonSeries | 0.0 | cd818a22e540060633683fbc6d7d36e39c593dd7 | [
"tests/test_internals/test_multiimagingextractor.py::TestMultiImagingExtractor::test_get_all_frames",
"tests/test_internals/test_multiimagingextractor.py::TestMultiImagingExtractor::test_get_channel_names",
"tests/test_internals/test_multiimagingextractor.py::TestMultiImagingExtractor::test_get_consecutive_frames",
"tests/test_internals/test_multiimagingextractor.py::TestMultiImagingExtractor::test_get_frames_assertion",
"tests/test_internals/test_multiimagingextractor.py::TestMultiImagingExtractor::test_get_image_size",
"tests/test_internals/test_multiimagingextractor.py::TestMultiImagingExtractor::test_get_non_consecutive_frames",
"tests/test_internals/test_multiimagingextractor.py::TestMultiImagingExtractor::test_get_num_channels",
"tests/test_internals/test_multiimagingextractor.py::TestMultiImagingExtractor::test_get_num_frames",
"tests/test_internals/test_multiimagingextractor.py::TestMultiImagingExtractor::test_get_sampling_frequency",
"tests/test_internals/test_multiimagingextractor.py::TestMultiImagingExtractor::test_get_video",
"tests/test_internals/test_multiimagingextractor.py::TestMultiImagingExtractor::test_inconsistent_property_assertion_0",
"tests/test_internals/test_multiimagingextractor.py::TestMultiImagingExtractor::test_inconsistent_property_assertion_1",
"tests/test_internals/test_multiimagingextractor.py::TestMultiImagingExtractor::test_inconsistent_property_assertion_2",
"tests/test_internals/test_multiimagingextractor.py::TestMultiImagingExtractor::test_set_incorrect_times",
"tests/test_internals/test_multiimagingextractor.py::TestMultiImagingExtractor::test_set_times"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2022-06-01 12:51:30+00:00 | bsd-3-clause | 1,511 |
|
catalystneuro__roiextractors-128 | diff --git a/src/roiextractors/imagingextractor.py b/src/roiextractors/imagingextractor.py
index e65a618..059d9d4 100644
--- a/src/roiextractors/imagingextractor.py
+++ b/src/roiextractors/imagingextractor.py
@@ -1,5 +1,6 @@
+"""Base class definitions for all ImagingExtractors."""
from abc import ABC, abstractmethod
-from typing import Union
+from typing import Union, Optional, Tuple
import numpy as np
from copy import deepcopy
@@ -9,17 +10,13 @@ from .extraction_tools import (
ArrayType,
PathType,
DtypeType,
- IntType,
FloatType,
check_get_videos_args,
)
-from typing import Tuple
class ImagingExtractor(ABC, BaseExtractor):
- """An abstract class that contains all the meta-data and input data from
- the imaging data.
- """
+ """Abstract class that contains all the meta-data and input data from the imaging data."""
def __init__(self) -> None:
BaseExtractor.__init__(self)
@@ -27,7 +24,7 @@ class ImagingExtractor(ABC, BaseExtractor):
self._memmapped = False
@abstractmethod
- def get_frames(self, frame_idxs: ArrayType, channel: int = 0) -> np.ndarray:
+ def get_frames(self, frame_idxs: ArrayType, channel: Optional[int] = 0) -> np.ndarray:
pass
@abstractmethod
@@ -117,8 +114,8 @@ class ImagingExtractor(ABC, BaseExtractor):
times: array-like
The times in seconds for each frame
"""
- assert len(times) == self.get_num_frames(), "'times' should have the same length of the number of frames"
- self._times = times.astype("float64")
+ assert len(times) == self.get_num_frames(), "'times' should have the same length of the number of frames!"
+ self._times = np.array(times).astype("float64")
def copy_times(self, extractor: BaseExtractor) -> None:
"""This function copies times from another extractor.
@@ -131,6 +128,10 @@ class ImagingExtractor(ABC, BaseExtractor):
if extractor._times is not None:
self.set_times(deepcopy(extractor._times))
+ def frame_slice(self, start_frame, end_frame):
+ """Return a new ImagingExtractor ranging from the start_frame to the end_frame."""
+ return FrameSliceImagingExtractor(parent_imaging=self, start_frame=start_frame, end_frame=end_frame)
+
@staticmethod
def write_imaging(imaging, save_path: PathType, overwrite: bool = False):
"""
@@ -147,3 +148,77 @@ class ImagingExtractor(ABC, BaseExtractor):
If True and save_path is existing, it is overwritten
"""
raise NotImplementedError
+
+
+class FrameSliceImagingExtractor(ImagingExtractor):
+ """
+ Class to get a lazy frame slice.
+
+ Do not use this class directly but use `.frame_slice(...)` on an ImagingExtractor object.
+ """
+
+ extractor_name = "FrameSliceImagingExtractor"
+ installed = True
+ is_writable = True
+ installation_mesg = ""
+
+ def __init__(
+ self, parent_imaging: ImagingExtractor, start_frame: Optional[int] = None, end_frame: Optional[int] = None
+ ):
+ """
+ Initialize an ImagingExtractor whose frames subset the parent.
+
+ Subset is exclusive on the right bound, that is, the indexes of this ImagingExtractor range over
+ [0, ..., end_frame-start_frame-1], which is used to resolve the index mapping in `get_frames(frame_idxs=[...])`.
+
+ Parameters
+ ----------
+ parent_imaging : ImagingExtractor
+ The ImagingExtractor object to sebset the frames of.
+ start_frame : int, optional
+ The left bound of the frames to subset.
+ The default is the start frame of the parent.
+ end_frame : int, optional
+ The right bound of the frames, exlcusively, to subset.
+ The default is end frame of the parent.
+
+ """
+ self._parent_imaging = parent_imaging
+ self._start_frame = start_frame
+ self._end_frame = end_frame
+ self._num_frames = self._end_frame - self._start_frame
+
+ parent_size = self._parent_imaging.get_num_frames()
+ if start_frame is None:
+ start_frame = 0
+ else:
+ assert 0 <= start_frame < parent_size
+ if end_frame is None:
+ end_frame = parent_size
+ else:
+ assert 0 < end_frame <= parent_size
+ assert end_frame > start_frame, "'start_frame' must be smaller than 'end_frame'!"
+
+ super().__init__()
+ if getattr(self._parent_imaging, "_times") is not None:
+ self._times = self._parent_imaging._times[start_frame:end_frame]
+
+ def get_frames(self, frame_idxs: ArrayType, channel: Optional[int] = 0) -> np.ndarray:
+ assert max(frame_idxs) < self._num_frames, "'frame_idxs' range beyond number of available frames!"
+ mapped_frame_idxs = np.array(frame_idxs) + self._start_frame
+ return self._parent_imaging.get_frames(frame_idxs=mapped_frame_idxs, channel=channel)
+
+ def get_image_size(self) -> Tuple[int, int]:
+ return tuple(self._parent_imaging.get_image_size())
+
+ def get_num_frames(self) -> int:
+ return self._num_frames
+
+ def get_sampling_frequency(self) -> float:
+ return self._parent_imaging.get_sampling_frequency()
+
+ def get_channel_names(self) -> list:
+ return self._parent_imaging.get_channel_names()
+
+ def get_num_channels(self) -> int:
+ return self._parent_imaging.get_num_channels()
| catalystneuro/roiextractors | 71e2b527366194e42b559cff766d160344863a8d | diff --git a/tests/test_internals/test_frame_slice_imaging.py b/tests/test_internals/test_frame_slice_imaging.py
new file mode 100644
index 0000000..de797d3
--- /dev/null
+++ b/tests/test_internals/test_frame_slice_imaging.py
@@ -0,0 +1,64 @@
+import unittest
+
+import numpy as np
+from hdmf.testing import TestCase
+from numpy.testing import assert_array_equal
+
+from roiextractors.testing import generate_dummy_imaging_extractor
+
+
+def test_frame_slicing_imaging_times():
+ num_frames = 10
+ timestamp_shift = 7.1
+ times = np.array(range(num_frames)) + timestamp_shift
+ start_frame, end_frame = 2, 7
+
+ toy_imaging_example = generate_dummy_imaging_extractor(num_frames=num_frames, rows=5, columns=4, num_channels=1)
+ toy_imaging_example.set_times(times=times)
+
+ frame_sliced_imaging = toy_imaging_example.frame_slice(start_frame=start_frame, end_frame=end_frame)
+ assert_array_equal(
+ frame_sliced_imaging.frame_to_time(
+ frames=np.array([idx for idx in range(frame_sliced_imaging.get_num_frames())])
+ ),
+ times[start_frame:end_frame],
+ )
+
+
+class TestFrameSliceImaging(TestCase):
+ @classmethod
+ def setUpClass(cls):
+ """Use a toy example of ten frames of a 5 x 4 grayscale image."""
+ cls.toy_imaging_example = generate_dummy_imaging_extractor(num_frames=10, rows=5, columns=4, num_channels=1)
+ cls.frame_sliced_imaging = cls.toy_imaging_example.frame_slice(start_frame=2, end_frame=7)
+
+ def test_get_image_size(self):
+ assert self.frame_sliced_imaging.get_image_size() == (5, 4)
+
+ def test_get_num_frames(self):
+ assert self.frame_sliced_imaging.get_num_frames() == 5
+
+ def test_get_sampling_frequency(self):
+ assert self.frame_sliced_imaging.get_sampling_frequency() == 30.0
+
+ def test_get_channel_names(self):
+ assert self.frame_sliced_imaging.get_channel_names() == ["channel_num_0"]
+
+ def test_get_num_channels(self):
+ assert self.frame_sliced_imaging.get_num_channels() == 1
+
+ def test_get_frames_assertion(self):
+ with self.assertRaisesWith(
+ exc_type=AssertionError, exc_msg="'frame_idxs' range beyond number of available frames!"
+ ):
+ self.frame_sliced_imaging.get_frames(frame_idxs=[6])
+
+ def test_get_frames(self):
+ assert_array_equal(
+ self.frame_sliced_imaging.get_frames(frame_idxs=[2, 4]),
+ self.toy_imaging_example.get_frames(frame_idxs=[4, 6]),
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
| add stub option
A stub option should limit the conversion to the first ~100 frames so that conversions can be tested quickly without reading/writing all of the data. | 0.0 | 71e2b527366194e42b559cff766d160344863a8d | [
"tests/test_internals/test_frame_slice_imaging.py::test_frame_slicing_imaging_times",
"tests/test_internals/test_frame_slice_imaging.py::TestFrameSliceImaging::test_get_channel_names",
"tests/test_internals/test_frame_slice_imaging.py::TestFrameSliceImaging::test_get_frames",
"tests/test_internals/test_frame_slice_imaging.py::TestFrameSliceImaging::test_get_frames_assertion",
"tests/test_internals/test_frame_slice_imaging.py::TestFrameSliceImaging::test_get_image_size",
"tests/test_internals/test_frame_slice_imaging.py::TestFrameSliceImaging::test_get_num_channels",
"tests/test_internals/test_frame_slice_imaging.py::TestFrameSliceImaging::test_get_num_frames",
"tests/test_internals/test_frame_slice_imaging.py::TestFrameSliceImaging::test_get_sampling_frequency"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2022-06-03 18:55:19+00:00 | bsd-3-clause | 1,512 |
|
catalystneuro__roiextractors-248 | diff --git a/src/roiextractors/extractorlist.py b/src/roiextractors/extractorlist.py
index 091265c..7241de8 100644
--- a/src/roiextractors/extractorlist.py
+++ b/src/roiextractors/extractorlist.py
@@ -25,6 +25,7 @@ from .extractors.memmapextractors import MemmapImagingExtractor
from .extractors.miniscopeimagingextractor import MiniscopeImagingExtractor
from .multisegmentationextractor import MultiSegmentationExtractor
from .multiimagingextractor import MultiImagingExtractor
+from .volumetricimagingextractor import VolumetricImagingExtractor
imaging_extractor_full_list = [
NumpyImagingExtractor,
@@ -39,6 +40,7 @@ imaging_extractor_full_list = [
SbxImagingExtractor,
NumpyMemmapImagingExtractor,
MemmapImagingExtractor,
+ VolumetricImagingExtractor,
]
segmentation_extractor_full_list = [
diff --git a/src/roiextractors/volumetricimagingextractor.py b/src/roiextractors/volumetricimagingextractor.py
new file mode 100644
index 0000000..2abf0c1
--- /dev/null
+++ b/src/roiextractors/volumetricimagingextractor.py
@@ -0,0 +1,169 @@
+"""Base class definition for volumetric imaging extractors."""
+
+from typing import Tuple, List, Iterable, Optional
+import numpy as np
+
+from .extraction_tools import ArrayType, DtypeType
+from .imagingextractor import ImagingExtractor
+
+
+class VolumetricImagingExtractor(ImagingExtractor):
+ """Class to combine multiple ImagingExtractor objects by depth plane."""
+
+ extractor_name = "VolumetricImaging"
+ installed = True
+ installatiuon_mesage = ""
+
+ def __init__(self, imaging_extractors: List[ImagingExtractor]):
+ """Initialize a VolumetricImagingExtractor object from a list of ImagingExtractors.
+
+ Parameters
+ ----------
+ imaging_extractors: list of ImagingExtractor
+ list of imaging extractor objects
+ """
+ super().__init__()
+ assert isinstance(imaging_extractors, list), "Enter a list of ImagingExtractor objects as argument"
+ assert all(isinstance(imaging_extractor, ImagingExtractor) for imaging_extractor in imaging_extractors)
+ self._check_consistency_between_imaging_extractors(imaging_extractors)
+ self._imaging_extractors = imaging_extractors
+ self._num_planes = len(imaging_extractors)
+
+ def _check_consistency_between_imaging_extractors(self, imaging_extractors: List[ImagingExtractor]):
+ """Check that essential properties are consistent between extractors so that they can be combined appropriately.
+
+ Parameters
+ ----------
+ imaging_extractors: list of ImagingExtractor
+ list of imaging extractor objects
+
+ Raises
+ ------
+ AssertionError
+ If any of the properties are not consistent between extractors.
+
+ Notes
+ -----
+ This method checks the following properties:
+ - sampling frequency
+ - image size
+ - number of channels
+ - channel names
+ - data type
+ - num_frames
+ """
+ properties_to_check = dict(
+ get_sampling_frequency="The sampling frequency",
+ get_image_size="The size of a frame",
+ get_num_channels="The number of channels",
+ get_channel_names="The name of the channels",
+ get_dtype="The data type",
+ get_num_frames="The number of frames",
+ )
+ for method, property_message in properties_to_check.items():
+ values = [getattr(extractor, method)() for extractor in imaging_extractors]
+ unique_values = set(tuple(v) if isinstance(v, Iterable) else v for v in values)
+ assert (
+ len(unique_values) == 1
+ ), f"{property_message} is not consistent over the files (found {unique_values})."
+
+ def get_video(self, start_frame: Optional[int] = None, end_frame: Optional[int] = None) -> np.ndarray:
+ """Get the video frames.
+
+ Parameters
+ ----------
+ start_frame: int, optional
+ Start frame index (inclusive).
+ end_frame: int, optional
+ End frame index (exclusive).
+
+ Returns
+ -------
+ video: numpy.ndarray
+ The 3D video frames (num_frames, num_rows, num_columns, num_planes).
+ """
+ if start_frame is None:
+ start_frame = 0
+ elif start_frame < 0:
+ start_frame = self.get_num_frames() + start_frame
+ elif start_frame >= self.get_num_frames():
+ raise ValueError(
+ f"start_frame {start_frame} is greater than or equal to the number of frames {self.get_num_frames()}"
+ )
+ if end_frame is None:
+ end_frame = self.get_num_frames()
+ elif end_frame < 0:
+ end_frame = self.get_num_frames() + end_frame
+ elif end_frame > self.get_num_frames():
+ raise ValueError(f"end_frame {end_frame} is greater than the number of frames {self.get_num_frames()}")
+ if end_frame <= start_frame:
+ raise ValueError(f"end_frame {end_frame} is less than or equal to start_frame {start_frame}")
+
+ video = np.zeros((end_frame - start_frame, *self.get_image_size()), self.get_dtype())
+ for i, imaging_extractor in enumerate(self._imaging_extractors):
+ video[..., i] = imaging_extractor.get_video(start_frame, end_frame)
+ return video
+
+ def get_frames(self, frame_idxs: ArrayType) -> np.ndarray:
+ """Get specific video frames from indices (not necessarily continuous).
+
+ Parameters
+ ----------
+ frame_idxs: array-like
+ Indices of frames to return.
+
+ Returns
+ -------
+ frames: numpy.ndarray
+ The 3D video frames (num_rows, num_columns, num_planes).
+ """
+ if isinstance(frame_idxs, int):
+ frame_idxs = [frame_idxs]
+ for frame_idx in frame_idxs:
+ if frame_idx < -1 * self.get_num_frames() or frame_idx >= self.get_num_frames():
+ raise ValueError(f"frame_idx {frame_idx} is out of bounds")
+
+ # Note np.all([]) returns True so not all(np.diff(frame_idxs) == 1) returns False if frame_idxs is a single int
+ if not all(np.diff(frame_idxs) == 1):
+ frames = np.zeros((len(frame_idxs), *self.get_image_size()), self.get_dtype())
+ for i, imaging_extractor in enumerate(self._imaging_extractors):
+ frames[..., i] = imaging_extractor.get_frames(frame_idxs)
+ return frames
+ else:
+ return self.get_video(start_frame=frame_idxs[0], end_frame=frame_idxs[-1] + 1)
+
+ def get_image_size(self) -> Tuple:
+ """Get the size of a single frame.
+
+ Returns
+ -------
+ image_size: tuple
+ The size of a single frame (num_rows, num_columns, num_planes).
+ """
+ image_size = (*self._imaging_extractors[0].get_image_size(), self.get_num_planes())
+ return image_size
+
+ def get_num_planes(self) -> int:
+ """Get the number of depth planes.
+
+ Returns
+ -------
+ _num_planes: int
+ The number of depth planes.
+ """
+ return self._num_planes
+
+ def get_num_frames(self) -> int:
+ return self._imaging_extractors[0].get_num_frames()
+
+ def get_sampling_frequency(self) -> float:
+ return self._imaging_extractors[0].get_sampling_frequency()
+
+ def get_channel_names(self) -> list:
+ return self._imaging_extractors[0].get_channel_names()
+
+ def get_num_channels(self) -> int:
+ return self._imaging_extractors[0].get_num_channels()
+
+ def get_dtype(self) -> DtypeType:
+ return self._imaging_extractors[0].get_dtype()
| catalystneuro/roiextractors | 5bc32937a4c59356353f9cdfde813594778789e0 | diff --git a/src/roiextractors/testing.py b/src/roiextractors/testing.py
index d172089..47d694d 100644
--- a/src/roiextractors/testing.py
+++ b/src/roiextractors/testing.py
@@ -53,6 +53,7 @@ def generate_dummy_imaging_extractor(
num_channels: int = 1,
sampling_frequency: float = 30,
dtype: DtypeType = "uint16",
+ channel_names: Optional[list] = None,
):
"""Generate a dummy imaging extractor for testing.
@@ -78,7 +79,8 @@ def generate_dummy_imaging_extractor(
ImagingExtractor
An imaging extractor with random data fed into `NumpyImagingExtractor`.
"""
- channel_names = [f"channel_num_{num}" for num in range(num_channels)]
+ if channel_names is None:
+ channel_names = [f"channel_num_{num}" for num in range(num_channels)]
size = (num_frames, num_rows, num_columns, num_channels)
video = generate_dummy_video(size=size, dtype=dtype)
diff --git a/tests/test_volumetricimagingextractor.py b/tests/test_volumetricimagingextractor.py
new file mode 100644
index 0000000..62424da
--- /dev/null
+++ b/tests/test_volumetricimagingextractor.py
@@ -0,0 +1,121 @@
+import pytest
+import numpy as np
+from roiextractors.testing import generate_dummy_imaging_extractor
+from roiextractors import VolumetricImagingExtractor
+
+num_frames = 10
+
+
[email protected](scope="module", params=[1, 2])
+def imaging_extractors(request):
+ num_channels = request.param
+ return [generate_dummy_imaging_extractor(num_channels=num_channels, num_frames=num_frames) for _ in range(3)]
+
+
[email protected](scope="module")
+def volumetric_imaging_extractor(imaging_extractors):
+ return VolumetricImagingExtractor(imaging_extractors)
+
+
[email protected](
+ "params",
+ [
+ [dict(sampling_frequency=1), dict(sampling_frequency=2)],
+ [dict(num_rows=1), dict(num_rows=2)],
+ [dict(num_channels=1), dict(num_channels=2)],
+ [dict(channel_names=["a"], num_channels=1), dict(channel_names=["b"], num_channels=1)],
+ [dict(dtype=np.int16), dict(dtype=np.float32)],
+ [dict(num_frames=1), dict(num_frames=2)],
+ ],
+)
+def test_check_consistency_between_imaging_extractors(params):
+ imaging_extractors = [generate_dummy_imaging_extractor(**param) for param in params]
+ with pytest.raises(AssertionError):
+ VolumetricImagingExtractor(imaging_extractors=imaging_extractors)
+
+
[email protected]("start_frame, end_frame", [(None, None), (0, num_frames), (3, 7), (-2, -1)])
+def test_get_video(volumetric_imaging_extractor, start_frame, end_frame):
+ video = volumetric_imaging_extractor.get_video(start_frame=start_frame, end_frame=end_frame)
+ expected_video = []
+ for extractor in volumetric_imaging_extractor._imaging_extractors:
+ expected_video.append(extractor.get_video(start_frame=start_frame, end_frame=end_frame))
+ expected_video = np.array(expected_video)
+ expected_video = np.moveaxis(expected_video, 0, -1)
+ assert np.all(video == expected_video)
+
+
[email protected]("start_frame, end_frame", [(num_frames + 1, None), (None, num_frames + 1), (2, 1)])
+def test_get_video_invalid(volumetric_imaging_extractor, start_frame, end_frame):
+ with pytest.raises(ValueError):
+ volumetric_imaging_extractor.get_video(start_frame=start_frame, end_frame=end_frame)
+
+
[email protected]("frame_idxs", [0, [0, 1, 2], [0, num_frames - 1], [-3, -1]])
+def test_get_frames(volumetric_imaging_extractor, frame_idxs):
+ frames = volumetric_imaging_extractor.get_frames(frame_idxs=frame_idxs)
+ expected_frames = []
+ for extractor in volumetric_imaging_extractor._imaging_extractors:
+ expected_frames.append(extractor.get_frames(frame_idxs=frame_idxs))
+ expected_frames = np.array(expected_frames)
+ expected_frames = np.moveaxis(expected_frames, 0, -1)
+ assert np.all(frames == expected_frames)
+
+
[email protected]("frame_idxs", [num_frames, [0, num_frames], [-num_frames - 1, -1]])
+def test_get_frames_invalid(volumetric_imaging_extractor, frame_idxs):
+ with pytest.raises(ValueError):
+ volumetric_imaging_extractor.get_frames(frame_idxs=frame_idxs)
+
+
[email protected]("num_rows, num_columns, num_planes", [(1, 2, 3), (2, 1, 3), (3, 2, 1)])
+def test_get_image_size(num_rows, num_columns, num_planes):
+ imaging_extractors = [
+ generate_dummy_imaging_extractor(num_rows=num_rows, num_columns=num_columns) for _ in range(num_planes)
+ ]
+ volumetric_imaging_extractor = VolumetricImagingExtractor(imaging_extractors=imaging_extractors)
+ assert volumetric_imaging_extractor.get_image_size() == (num_rows, num_columns, num_planes)
+
+
[email protected]("num_planes", [1, 2, 3])
+def test_get_num_planes(num_planes):
+ imaging_extractors = [generate_dummy_imaging_extractor() for _ in range(num_planes)]
+ volumetric_imaging_extractor = VolumetricImagingExtractor(imaging_extractors=imaging_extractors)
+ assert volumetric_imaging_extractor.get_num_planes() == num_planes
+
+
[email protected]("num_frames", [1, 2, 3])
+def test_get_num_frames(num_frames):
+ imaging_extractors = [generate_dummy_imaging_extractor(num_frames=num_frames)]
+ volumetric_imaging_extractor = VolumetricImagingExtractor(imaging_extractors=imaging_extractors)
+ assert volumetric_imaging_extractor.get_num_frames() == num_frames
+
+
[email protected]("sampling_frequency", [1, 2, 3])
+def test_get_sampling_frequency(sampling_frequency):
+ imaging_extractors = [generate_dummy_imaging_extractor(sampling_frequency=sampling_frequency)]
+ volumetric_imaging_extractor = VolumetricImagingExtractor(imaging_extractors=imaging_extractors)
+ assert volumetric_imaging_extractor.get_sampling_frequency() == sampling_frequency
+
+
[email protected]("channel_names", [["Channel 1"], [" Channel 1 ", "Channel 2"]])
+def test_get_channel_names(channel_names):
+ imaging_extractors = [
+ generate_dummy_imaging_extractor(channel_names=channel_names, num_channels=len(channel_names))
+ ]
+ volumetric_imaging_extractor = VolumetricImagingExtractor(imaging_extractors=imaging_extractors)
+ assert volumetric_imaging_extractor.get_channel_names() == channel_names
+
+
[email protected]("num_channels", [1, 2, 3])
+def test_get_num_channels(num_channels):
+ imaging_extractors = [generate_dummy_imaging_extractor(num_channels=num_channels)]
+ volumetric_imaging_extractor = VolumetricImagingExtractor(imaging_extractors=imaging_extractors)
+ assert volumetric_imaging_extractor.get_num_channels() == num_channels
+
+
[email protected]("dtype", [np.float64, np.int16, np.uint8])
+def test_get_dtype(dtype):
+ imaging_extractors = [generate_dummy_imaging_extractor(dtype=dtype)]
+ volumetric_imaging_extractor = VolumetricImagingExtractor(imaging_extractors=imaging_extractors)
+ assert volumetric_imaging_extractor.get_dtype() == dtype
| Unify Volumetric/MultiPlane Imaging API
# Current Behavior
Right now, the only imaging extractor that supports 3D imaging is BrukerTiff. It does so by taking a list of `SinglePlaneImagingExtractors` and combines them a la `MultiImagingExtractor`. Note that each `SinglePlaneImagingExtractor` is itself a `MultiImagingExtractor` that combines multiple 'chunked' files corresponding to a single depth plane. Each `SinglePlaneImagingExtractor` instance only supports 1 channel and 1 plane, which are passed via `stream_name` during the `__init__`. Each `MultiPlaneImagingExtractor` instance supports 1 channel (but all planes) which is passed via `stream_name` during the `__init__`.
# Issues
- 'streams' are a low-cohesion concept to represent optical channels and depth planes.
- They are referred to by string names, which can be different in the `MultiPlaneImagingExtractor` (which only needs to specify the channel) and the `SinglePlaneImagingExtractor` (which needs to specify some formatted combination of channel and plane).
- They are supposed to be analogous to 'AP', 'LF', etc. streams in spikeinterface, but those streams refer to collections of channels that undergo different types of pre-processing. In contrast, each stream as they are implemented in BrukerTiff just refer to 1 channel and 1 plane. There may be a relevant parallel in ophys (ex. functional vs anatomical streams), but specifying a plane/channel combo is not that parallel.
- optical channels and depth planes are fully independent concepts (1 plane + many channels, 1 channel + many planes, many channels + many planes are all possible) with rich associated metadata that can guide the API: channel name, channel wavelength, plane depth, etc. By lumping these two axes together as 'streams' we lose the ability to interact with them separately.
- Combining multiple `SinglePlaneImagingExtractors` with `MultiImagingExtractor` is a bit hacky and confusing.
- `MultiImagingExtractor` was designed to concatenate `ImagingExtractors` along the `frames` axis, but here we want to combine multiple `IEs` along a separate depth axis.
- Using `MultiImagingExtractor` for this purpose leads to wonky stuff like `start_frames = [0, 0, ..., 0]`.
- BrukerTiff specifies channel during the `__init__` but all other IE's specify channel during the call to `get_video`.
- Not sure which is better, but we should be consistent.
# Proposed Solutions
- Specify channel and plane separately by index (or ID?) and then the extractor handles the logic of how to load that channel/plane combo in their file structure (Bruker, ScanImage, etc.)
- Add a `MultiPlaneImagingExtractor` base class to handle the combination of IE's along the depth axis. Could come with extra metadata properties like `get_plane_depth`.
- We need to specify plane at `__init__` time so that SinglePlaneImagingExtractors can be appropriately combined into MultiPlaneImagingExtractors. So, to keep it consistent we should specify channel also at `__init__` time. This will require refactoring of all the existing IE's but I think it is the most convenient implementation agnostic to file structure. It also simplifies `get_video` to only take `start_frame` and `end_frame`, which I think is a good thing.
### Do you have any interest in helping implement the feature?
Yes.
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/catalystneuro/roiextractors/blob/master/.github/CODE_OF_CONDUCT.rst)
- [X] Have you ensured this bug was not already [reported](https://github.com/catalystneuro/roiextractors/issues)? | 0.0 | 5bc32937a4c59356353f9cdfde813594778789e0 | [
"tests/test_volumetricimagingextractor.py::test_check_consistency_between_imaging_extractors[params0]",
"tests/test_volumetricimagingextractor.py::test_check_consistency_between_imaging_extractors[params1]",
"tests/test_volumetricimagingextractor.py::test_check_consistency_between_imaging_extractors[params2]",
"tests/test_volumetricimagingextractor.py::test_check_consistency_between_imaging_extractors[params3]",
"tests/test_volumetricimagingextractor.py::test_check_consistency_between_imaging_extractors[params4]",
"tests/test_volumetricimagingextractor.py::test_check_consistency_between_imaging_extractors[params5]",
"tests/test_volumetricimagingextractor.py::test_get_video[1-None-None]",
"tests/test_volumetricimagingextractor.py::test_get_video[1-0-10]",
"tests/test_volumetricimagingextractor.py::test_get_video[1-3-7]",
"tests/test_volumetricimagingextractor.py::test_get_video[1--2--1]",
"tests/test_volumetricimagingextractor.py::test_get_video_invalid[1-11-None]",
"tests/test_volumetricimagingextractor.py::test_get_video_invalid[1-None-11]",
"tests/test_volumetricimagingextractor.py::test_get_video_invalid[1-2-1]",
"tests/test_volumetricimagingextractor.py::test_get_frames[1-0]",
"tests/test_volumetricimagingextractor.py::test_get_frames[1-frame_idxs1]",
"tests/test_volumetricimagingextractor.py::test_get_frames[1-frame_idxs2]",
"tests/test_volumetricimagingextractor.py::test_get_frames[1-frame_idxs3]",
"tests/test_volumetricimagingextractor.py::test_get_frames_invalid[1-10]",
"tests/test_volumetricimagingextractor.py::test_get_frames_invalid[1-frame_idxs1]",
"tests/test_volumetricimagingextractor.py::test_get_frames_invalid[1-frame_idxs2]",
"tests/test_volumetricimagingextractor.py::test_get_video[2-None-None]",
"tests/test_volumetricimagingextractor.py::test_get_video[2-0-10]",
"tests/test_volumetricimagingextractor.py::test_get_video[2-3-7]",
"tests/test_volumetricimagingextractor.py::test_get_video[2--2--1]",
"tests/test_volumetricimagingextractor.py::test_get_video_invalid[2-11-None]",
"tests/test_volumetricimagingextractor.py::test_get_video_invalid[2-None-11]",
"tests/test_volumetricimagingextractor.py::test_get_video_invalid[2-2-1]",
"tests/test_volumetricimagingextractor.py::test_get_frames[2-0]",
"tests/test_volumetricimagingextractor.py::test_get_frames[2-frame_idxs1]",
"tests/test_volumetricimagingextractor.py::test_get_frames[2-frame_idxs2]",
"tests/test_volumetricimagingextractor.py::test_get_frames[2-frame_idxs3]",
"tests/test_volumetricimagingextractor.py::test_get_frames_invalid[2-10]",
"tests/test_volumetricimagingextractor.py::test_get_frames_invalid[2-frame_idxs1]",
"tests/test_volumetricimagingextractor.py::test_get_frames_invalid[2-frame_idxs2]",
"tests/test_volumetricimagingextractor.py::test_get_image_size[1-2-3]",
"tests/test_volumetricimagingextractor.py::test_get_image_size[2-1-3]",
"tests/test_volumetricimagingextractor.py::test_get_image_size[3-2-1]",
"tests/test_volumetricimagingextractor.py::test_get_num_planes[1]",
"tests/test_volumetricimagingextractor.py::test_get_num_planes[2]",
"tests/test_volumetricimagingextractor.py::test_get_num_planes[3]",
"tests/test_volumetricimagingextractor.py::test_get_num_frames[1]",
"tests/test_volumetricimagingextractor.py::test_get_num_frames[2]",
"tests/test_volumetricimagingextractor.py::test_get_num_frames[3]",
"tests/test_volumetricimagingextractor.py::test_get_sampling_frequency[1]",
"tests/test_volumetricimagingextractor.py::test_get_sampling_frequency[2]",
"tests/test_volumetricimagingextractor.py::test_get_sampling_frequency[3]",
"tests/test_volumetricimagingextractor.py::test_get_channel_names[channel_names0]",
"tests/test_volumetricimagingextractor.py::test_get_channel_names[channel_names1]",
"tests/test_volumetricimagingextractor.py::test_get_num_channels[1]",
"tests/test_volumetricimagingextractor.py::test_get_num_channels[2]",
"tests/test_volumetricimagingextractor.py::test_get_num_channels[3]",
"tests/test_volumetricimagingextractor.py::test_get_dtype[float64]",
"tests/test_volumetricimagingextractor.py::test_get_dtype[int16]",
"tests/test_volumetricimagingextractor.py::test_get_dtype[uint8]"
]
| []
| {
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
} | 2023-09-20 20:48:28+00:00 | bsd-3-clause | 1,513 |
|
catkin__catkin_tools-728 | diff --git a/catkin_tools/context.py b/catkin_tools/context.py
index c6ac7c7..f035063 100644
--- a/catkin_tools/context.py
+++ b/catkin_tools/context.py
@@ -191,6 +191,8 @@ class Context(object):
# Get the active profile
profile = profile or opts_vars.get('profile', None) or metadata.get_active_profile(workspace)
+ if not metadata.active_profile_set(workspace):
+ metadata.set_active_profile(workspace, profile)
opts_vars['profile'] = profile
# Initialize empty metadata/args
diff --git a/catkin_tools/metadata.py b/catkin_tools/metadata.py
index 0545236..5337878 100644
--- a/catkin_tools/metadata.py
+++ b/catkin_tools/metadata.py
@@ -243,6 +243,7 @@ def init_metadata_root(workspace_path, reset=False):
else:
# Create a new .catkin_tools directory
os.mkdir(metadata_root_path)
+ os.mkdir(os.path.join(metadata_root_path, 'profiles'))
# Write the README file describing the directory
with open(os.path.join(metadata_root_path, 'README'), 'w') as metadata_readme:
@@ -357,6 +358,12 @@ def get_active_profile(workspace_path):
return DEFAULT_PROFILE_NAME
+def active_profile_set(workspace_path):
+ """Check if the active profile is set in profiles.yml"""
+ profiles_data = get_profiles_data(workspace_path)
+ return 'active' in profiles_data
+
+
def get_profiles_data(workspace_path):
"""Get the contents of the profiles file.
diff --git a/catkin_tools/verbs/catkin_build/cli.py b/catkin_tools/verbs/catkin_build/cli.py
index c1ef811..99696ee 100644
--- a/catkin_tools/verbs/catkin_build/cli.py
+++ b/catkin_tools/verbs/catkin_build/cli.py
@@ -45,6 +45,7 @@ from catkin_tools.jobs.utils import CommandMissing
from catkin_tools.jobs.utils import loadenv
from catkin_tools.metadata import find_enclosing_workspace
from catkin_tools.metadata import get_metadata
+from catkin_tools.metadata import init_metadata_root
from catkin_tools.metadata import update_metadata
from catkin_tools.resultspace import load_resultspace_environment
from catkin_tools.terminal_color import set_color
@@ -252,14 +253,15 @@ def main(opts):
sys.exit(clr("[build] @!@{rf}Error:@| With --no-deps, you must specify packages to build."))
# Load the context
- if opts.build_this or opts.start_with_this:
- ctx = Context.load(opts.workspace, opts.profile, opts, append=True, strict=True)
- else:
- ctx = Context.load(opts.workspace, opts.profile, opts, append=True)
+ ctx = Context.load(opts.workspace, opts.profile, opts, append=True, strict=True)
# Handle no workspace
- if ctx is None:
+ if ctx is None and (opts.build_this or opts.start_with_this):
sys.exit(clr("[build] @!@{rf}Error:@| The current folder is not part of a catkin workspace."))
+ elif ctx is None:
+ init_metadata_root(opts.workspace or os.getcwd())
+ ctx = Context.load(opts.workspace, opts.profile, opts, append=True)
+ log(clr('@!@{cf}Initialized new catkin workspace in `{}`@|').format(ctx.workspace))
# Initialize the build configuration
make_args, makeflags, cli_flags, jobserver = configure_make_args(
diff --git a/catkin_tools/verbs/catkin_config/cli.py b/catkin_tools/verbs/catkin_config/cli.py
index 4d46840..0fd729e 100644
--- a/catkin_tools/verbs/catkin_config/cli.py
+++ b/catkin_tools/verbs/catkin_config/cli.py
@@ -19,6 +19,7 @@ import sys
from catkin_tools.argument_parsing import add_cmake_and_make_and_catkin_make_args
from catkin_tools.argument_parsing import add_context_args
from catkin_tools.context import Context
+from catkin_tools.metadata import init_metadata_root
from catkin_tools.terminal_color import ColorMapper
from catkin_tools.terminal_color import fmt
@@ -179,21 +180,35 @@ def main(opts):
opts.profile,
opts,
append=opts.append_args,
- remove=opts.remove_args)
+ remove=opts.remove_args,
+ strict=True)
do_init = opts.init or not no_action
summary_notes = []
- if not context.initialized() and do_init:
- summary_notes.append(clr('@!@{cf}Initialized new catkin workspace in `{}`@|').format(context.workspace))
+ if not context and not do_init:
+ # Don't initialize a new workspace
+ print(clr('@!@{rf}WARNING:@| Workspace is not yet initialized. '
+ 'Use catkin init or run catkin config with --init.'))
+
+ else:
+ # Either initialize it or it already exists
+ if not context:
+ init_metadata_root(opts.workspace or os.getcwd())
+ context = Context.load(
+ opts.workspace,
+ opts.profile,
+ opts,
+ append=opts.append_args,
+ remove=opts.remove_args)
+ summary_notes.append(clr('@!@{cf}Initialized new catkin workspace in `{}`@|').format(context.workspace))
- if context.initialized() or do_init:
Context.save(context)
- if opts.mkdirs and not context.source_space_exists():
- os.makedirs(context.source_space_abs)
+ if opts.mkdirs and not context.source_space_exists():
+ os.makedirs(context.source_space_abs)
- print(context.summary(notes=summary_notes))
+ print(context.summary(notes=summary_notes))
except IOError as exc:
# Usually happens if workspace is already underneath another catkin_tools workspace
| catkin/catkin_tools | c1707309479138b22c97d966b9095dd3ac844197 | diff --git a/tests/system/verbs/catkin_config/test_config.py b/tests/system/verbs/catkin_config/test_config.py
index 649d597..d214679 100644
--- a/tests/system/verbs/catkin_config/test_config.py
+++ b/tests/system/verbs/catkin_config/test_config.py
@@ -14,7 +14,7 @@ from ...workspace_factory import workspace_factory
def test_config_no_ws():
with redirected_stdio() as (out, err):
assert catkin_success(['config'])
- assert_warning_message(out, 'Workspace .+ is not yet initialized')
+ assert_warning_message(out, 'Workspace is not yet initialized')
@in_temporary_directory
diff --git a/tests/system/verbs/catkin_profile/test_profile.py b/tests/system/verbs/catkin_profile/test_profile.py
index 480b6a7..f25c60a 100644
--- a/tests/system/verbs/catkin_profile/test_profile.py
+++ b/tests/system/verbs/catkin_profile/test_profile.py
@@ -1,3 +1,7 @@
+import os
+
+import yaml
+
from ....utils import assert_cmd_success
from ....utils import catkin_success
from ....utils import in_temporary_directory
@@ -7,6 +11,18 @@ from ...workspace_factory import workspace_factory
BUILD = ['build', '--no-notify', '--no-status']
+BUILD = ['build', '--no-notify', '--no-status']
+
+
+def assert_active_profile(workspace, profile):
+ profile_file = os.path.join(workspace, '.catkin_tools', 'profiles', 'profiles.yaml')
+ if not os.path.exists(profile_file):
+ assert profile == 'default'
+ else:
+ with open(profile_file) as f:
+ profiles = yaml.safe_load(f)
+ assert profiles.get('active', 'default') == profile
+
@in_temporary_directory
def test_profile_list():
@@ -15,6 +31,7 @@ def test_profile_list():
assert catkin_success(['init'])
assert catkin_success(BUILD)
assert catkin_success(['profile', 'list'])
+ assert_active_profile('.', 'default')
@in_temporary_directory
@@ -24,6 +41,10 @@ def test_profile_set():
assert catkin_success(['init'])
assert catkin_success(BUILD)
assert catkin_success(['profile', 'set', 'default'])
+ assert catkin_success(['profile', 'add', 'second'])
+ assert_active_profile('.', 'default')
+ assert catkin_success(['profile', 'set', 'second'])
+ assert_active_profile('.', 'second')
def test_profile_copy():
@@ -33,6 +54,7 @@ def test_profile_copy():
assert catkin_success(['config', '--make-args', 'test'])
assert catkin_success(['profile', 'add', '--copy', 'default', 'mycopy'])
assert_in_config('.', 'mycopy', 'make_args', ['test'])
+ assert_active_profile('.', 'default')
def test_profile_extend():
@@ -44,3 +66,10 @@ def test_profile_extend():
assert catkin_success(['config', '--profile', 'myextend', '--skiplist', 'mypackage'])
assert_in_config('.', 'default', 'make_args', ['test'])
assert_in_config('.', 'myextend', 'blacklist', ['mypackage'])
+
+
+def test_different_first_profile():
+ with workspace_factory() as wf:
+ wf.build()
+ assert catkin_success(BUILD + ['--profile', 'release'])
+ assert_active_profile(wf.workspace, 'release')
| catkin locate --build does not find 'profile'_build
[//]: # (Thank you for reporting an issue for catkin_tools!)
[//]: # (Please fill out the details below so that your issue can be resolved quickly!)
[//]: # (If you want to suggest a feature or start a discussion, feel free to delete them.)
### System Info
* Operating System: Ubuntu 14.04
* Python Version: Python 2.7.6
* Version of catkin_tools: catkin_tools 0.4.4
* ROS Distro: indigo
### Build / Run Issue
[//]: # (If you are migrating from catkin_make, please follow the migration guide:)
[//]: # (http://catkin-tools.readthedocs.org/en/latest/migration.html)
[//]: # (Please also check for solved issues here:)
[//]: # (http://catkin-tools.readthedocs.org/en/latest/troubleshooting.html)
[//]: # (And check for open issues here:)
[//]: # (https://github.com/catkin/catkin_tools/issues?q=is%3Aopen+is%3Aissue+label%3Abug)
[//]: # (If you have an issue building and running code, please verify the following:)
* [ ] Works with `catkin_make`
* [ ] Works with `catkin_make_isolated --merge`
* [ ] Works with `catkin build`
* [ ] Works with `catkin build -p1`
* [x] I did not `read this`
### Expected Behavior
[//]: # (Please describe what you expected or think should happen.)
Given the following workspace :
```terminal
~/workspace$ l
build_release/ devel_release/ logs_release/ src/
```
I was expecting the command locate to return the following :
```terminal
~/workspace$ catkin locate --workspace $(pwd) --build
/home/user/workspace/build_release
```
### Actual Behavior
[//]: # (Please describe what actually happens.)
[//]: # (Please put long CLI output into Gists: http://gist.github.com)
[//]: # (Use `VERBOSE=1 catkin build -p1 -v` for verbose output)
[//]: # (Please post `logs` directory if available.)
It returns the default `build` directory despite that it does not exists
```terminal
~/workspace$ catkin locate --workspace $(pwd) --build
/home/user/workspace/build
```
### Steps to Reproduce the Issue
[//]: # (Please give a series of command-line commands which cause the problem.)
[//]: # (Include a checkout URI for the workspace so that we can reproduce the issue.)
[//]: # (If we can't reproduce it on our own machines, we can't fix it!)
```bash
~$ mkdir -p dummy_ws/src && cd dummy_ws
~/dummy_ws$ catkin config --profile release -x _release --cmake-args -DCMAKE_BUILD_TYPE=Release
~/dummy_ws$ catkin build --profile release
~/dummy_ws$ catkin locate --workspace $(pwd) --build
/home/user/dummy_ws/build
```
| 0.0 | c1707309479138b22c97d966b9095dd3ac844197 | [
"tests/system/verbs/catkin_config/test_config.py::test_config_no_ws"
]
| [
"tests/system/verbs/catkin_config/test_config.py::test_config_non_bare",
"tests/system/verbs/catkin_config/test_config.py::test_config_unchanged",
"tests/system/verbs/catkin_config/test_config.py::test_config_no_args_flags",
"tests/system/verbs/catkin_config/test_config.py::test_config_no_buildlist",
"tests/system/verbs/catkin_config/test_config.py::test_config_no_skiplist",
"tests/system/verbs/catkin_profile/test_profile.py::test_profile_copy",
"tests/system/verbs/catkin_profile/test_profile.py::test_profile_extend"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2022-04-19 20:25:57+00:00 | apache-2.0 | 1,514 |
|
catmaid__catpy-31 | diff --git a/HISTORY.rst b/HISTORY.rst
index 6a0161c..1d261d2 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -2,11 +2,13 @@
History
=======
+
In progress
-----------
* Allow networkx 2+ to be used
* Add NameResolver application
+* Add enum for connector relation types
0.2.0 (2018-05-30)
diff --git a/catpy/__init__.py b/catpy/__init__.py
index 7365394..b05b812 100644
--- a/catpy/__init__.py
+++ b/catpy/__init__.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
-from catpy.client import CatmaidClient, CoordinateTransformer, CatmaidUrl
+from catpy.client import CatmaidClient, CoordinateTransformer, CatmaidUrl, ConnectorRelation # noqa
from catpy.version import __version__, __version_info__ # noqa
from catpy.author import __author__, __email__ # noqa
from catpy import image
diff --git a/catpy/applications/export.py b/catpy/applications/export.py
index 7067a36..bfb42d6 100644
--- a/catpy/applications/export.py
+++ b/catpy/applications/export.py
@@ -7,6 +7,8 @@ import networkx as nx
from networkx.readwrite import json_graph
from catpy.applications.base import CatmaidClientApplication
+from catpy.applications.relation_identifier import RelationIdentifier
+from catpy.client import ConnectorRelation
NX_VERSION_INFO = tuple(int(i) for i in nx.__version__.split('.'))
@@ -189,8 +191,10 @@ class ExportWidget(CatmaidClientApplication):
-------
dict
"""
+ rel_id = self.get_relation_identifier()
skeletons = dict()
+ warnings = set()
for skeleton_id in skeleton_ids:
@@ -207,19 +211,36 @@ class ExportWidget(CatmaidClientApplication):
}
for connector in data[1]:
- if connector[2] not in [0, 1]:
+ try:
+ relation = rel_id.from_id(connector[2])
+ except ValueError as e:
+ msg = str(e)
+ if " is not a valid " in msg:
+ warnings.add(str(e))
+ continue
+ else:
+ raise e
+
+ if not relation.is_synaptic:
continue
conn_id = int(connector[1])
if conn_id not in skeleton["connectors"]:
skeleton["connectors"][conn_id] = {
- "presynaptic_to": [], "postsynaptic_to": []
+ r.name: [] for r in ConnectorRelation if r.is_synaptic
}
skeleton["connectors"][conn_id]["location"] = connector[3:6]
- relation = "postsynaptic_to" if connector[2] == 1 else "presynaptic_to"
- skeleton["connectors"][conn_id][relation].append(connector[0])
+ skeleton["connectors"][conn_id][relation.name].append(connector[0])
skeletons[int(skeleton_id)] = skeleton
+ warn(
+ "Skeleton representations contained some unknown treenode->connector relation IDs:\n\t"
+ "\n\t".join(sorted(warnings))
+ )
+
return {"skeletons": skeletons}
+
+ def get_relation_identifier(self):
+ return RelationIdentifier(self._catmaid)
diff --git a/catpy/applications/relation_identifier.py b/catpy/applications/relation_identifier.py
new file mode 100644
index 0000000..376fa61
--- /dev/null
+++ b/catpy/applications/relation_identifier.py
@@ -0,0 +1,88 @@
+from collections import defaultdict
+
+from catpy.client import ConnectorRelation
+from catpy.applications import CatmaidClientApplication
+
+
+class RelationIdentifier(CatmaidClientApplication):
+ """Class to convert connector relation IDs to ConnectorRelation enums and back.
+
+ The mappings are cached on the class, and so do not need to be re-fetched for new instances.
+
+ The mappings are retrieved on a per-project basis.
+ """
+ id_to_relation = defaultdict(dict)
+ relation_to_id = defaultdict(dict)
+
+ def _check_pid(self):
+ if self.project_id is None:
+ raise RuntimeError("No project ID defined; cannot get relation name-id mappings")
+ else:
+ return self.project_id
+
+ def _fetch_mappings(self, project_id):
+ return self.get((project_id, 'connectors', 'types'))
+
+ def populate_mappings(self, project_id):
+ """Populate the id-relation mappings cache for the given project"""
+ if isinstance(self, type):
+ raise ValueError("Cannot populate relation ID mappings as a class method")
+
+ id_to_rel = dict()
+ rel_to_id = dict()
+ for obj in self._fetch_mappings(project_id):
+ rel = ConnectorRelation[obj["relation"]]
+ rel_id = obj["relation_id"]
+
+ id_to_rel[rel_id] = rel
+ rel_to_id[rel] = rel_id
+
+ type(self).id_to_relation[project_id] = id_to_rel
+ type(self).relation_to_id[project_id] = rel_to_id
+
+ def _get_dict(self, is_relation, project_id):
+ project_id = project_id or self._check_pid()
+ d = (self.id_to_relation, self.relation_to_id)[is_relation]
+ if project_id not in d:
+ self.populate_mappings(project_id)
+ return d[project_id]
+
+ def from_id(self, relation_id, project_id=None):
+ """
+ Return the ConnectorRelation for the given relation ID.
+ If ``project_id`` is given and you know this project's mappings are already populated
+ (possibly via a different instance),
+ this can be used as a class method.
+
+ Parameters
+ ----------
+ relation_id
+ project_id
+
+ Returns
+ -------
+ ConnectorRelation
+ """
+ if relation_id == -1:
+ return ConnectorRelation.other
+ return self._get_dict(False, project_id)[relation_id]
+
+ def to_id(self, relation, project_id=None):
+ """
+ Return the integer ID for the given ConnectorRelation.
+ If ``project_id`` is given and you know this project's mappings are already populated,
+ (possibly via a different instance
+ this can be used as a class method.
+
+ Parameters
+ ----------
+ relation
+ project_id
+
+ Returns
+ -------
+ int
+ """
+ if relation == ConnectorRelation.other:
+ return -1
+ return self._get_dict(True, project_id)[relation]
diff --git a/catpy/client.py b/catpy/client.py
index 10379e5..385a9ce 100644
--- a/catpy/client.py
+++ b/catpy/client.py
@@ -8,22 +8,97 @@ from abc import ABCMeta, abstractmethod
from warnings import warn
from six import string_types, add_metaclass
-from enum import IntEnum
+from enum import IntEnum, Enum
import requests
import numpy as np
+class ConnectorRelationType(Enum):
+ SYNAPTIC = "Synaptic"
+ GAP_JUNCTION = "Gap junction"
+ ABUTTING = "Abutting"
+ ATTACHMENT = "Attachment"
+ SPATIAL = "Spatial"
+ OTHER = ""
+
+ @classmethod
+ def from_relation(cls, relation):
+ return {
+ ConnectorRelation.presynaptic_to: cls.SYNAPTIC,
+ ConnectorRelation.postsynaptic_to: cls.SYNAPTIC,
+ ConnectorRelation.gapjunction_with: cls.GAP_JUNCTION,
+ ConnectorRelation.abutting: cls.ABUTTING,
+ ConnectorRelation.attached_to: cls.ATTACHMENT,
+ ConnectorRelation.close_to: cls.SPATIAL,
+ ConnectorRelation.other: cls.OTHER
+ }[relation]
+
+
+class ConnectorRelation(Enum):
+ """Enum describing the link between a treenode and connector, i.e. the treenode is ____ to the connector.
+
+ The enum's ``name`` is CATMAID's concept of "relation name":
+ what is returned in the ``relation`` field of the <pid>/connectors/types/ response.
+
+ The enum's ``value`` is the ``name`` field of the <pid>/connectors/types/ response.
+
+ The mappings from relation name to relation ID are project-specific and must be fetched from CATMAID.
+ """
+ other = ""
+ presynaptic_to = "Presynaptic"
+ postsynaptic_to = "Postsynaptic"
+ gapjunction_with = "Gap junction"
+ abutting = "Abutting"
+ attached_to = "Attachment"
+ close_to = "Close to"
+
+ @property
+ def type(self):
+ return ConnectorRelationType.from_relation(self)
+
+ @property
+ def is_synaptic(self):
+ return self.type == ConnectorRelationType.SYNAPTIC
+
+ def __str__(self):
+ return self.value
+
+
class StackOrientation(IntEnum):
+ """Can be iterated over or indexed like the lower-case string representation of the orientation"""
XY = 0
XZ = 1
ZY = 2
+ def __str__(self):
+ return self.name.lower()
+
+ @classmethod
+ def from_str(cls, s):
+ return {o.name: o for o in StackOrientation}[s.upper()]
-orientation_strs = {
- StackOrientation.XY: 'xy',
- StackOrientation.XZ: 'xz',
- StackOrientation.ZY: 'zy'
-}
+ @classmethod
+ def from_value(cls, value, default='xy'):
+ """Convert an int, str or StackOrientation into a StackOrientation.
+ A NoneType ``value`` will use the default orientation."""
+ if value is None:
+ value = default
+
+ if isinstance(value, string_types):
+ return cls.from_str(value)
+ elif isinstance(value, int):
+ return cls(value)
+ else:
+ raise TypeError("Cannot create a StackOrientation from {}".format(type(value).__name__))
+
+ def __iter__(self):
+ return iter(str(self))
+
+ def __getitem__(self, item):
+ return str(self)[item]
+
+ def __contains__(self, item):
+ return item in str(self)
def make_url(base_url, *args):
@@ -334,7 +409,7 @@ class CoordinateTransformer(object):
StackOrientation
int corresponding to StackOrientation
'xy', 'xz', or 'zy'
- None (reverts to default)
+ None (reverts to default 'xy')
Default StackOrientation.XY
scale_z : bool
Whether or not to scale z coordinates when using stack_to_scaled* methods. Default False is recommended, but
@@ -349,7 +424,7 @@ class CoordinateTransformer(object):
self.translation = {dim: translation.get(dim, 0) for dim in 'zyx'}
self.scale_z = scale_z
- self.orientation = self._validate_orientation(orientation)
+ self.orientation = StackOrientation.from_value(orientation)
self.depth_dim = [dim for dim in 'zyx' if dim not in self.orientation][0]
# mapping of project dimension to stack dimension, based on orientation
@@ -361,16 +436,6 @@ class CoordinateTransformer(object):
# mapping of stack dimension to project dimension, based on orientation
self._p2s = {value: key for key, value in self._s2p.items()}
- def _validate_orientation(self, orientation):
- if orientation is None:
- orientation = StackOrientation.XY
- orientation = orientation_strs.get(orientation, orientation)
- lower = orientation.lower()
- if lower not in orientation_strs.values():
- raise ValueError("orientation must be a StackOrientation, 'xy', 'xz', or 'zy'")
-
- return lower
-
@classmethod
def from_catmaid(cls, catmaid_client, stack_id):
"""
diff --git a/catpy/image.py b/catpy/image.py
index cd25378..fe3e27c 100644
--- a/catpy/image.py
+++ b/catpy/image.py
@@ -23,7 +23,7 @@ import requests
from requests_futures.sessions import FuturesSession
from catpy import CoordinateTransformer
-
+from catpy.client import StackOrientation
logger = logging.getLogger()
@@ -96,11 +96,28 @@ DEFAULT_ROI_MODE = ROIMode.STACK
class TileSourceType(IntEnum):
+ """https://catmaid.readthedocs.io/en/stable/tile_sources.html"""
FILE_BASED = 1
+ REQUEST_QUERY = 2
+ HDF5 = 3
FILE_BASED_WITH_ZOOM_DIRS = 4
DIR_BASED = 5
+ DVID_IMAGEBLK = 6
RENDER_SERVICE = 7
+ DVID_IMAGETILE = 8
FLIXSERVER = 9
+ H2N5_TILES = 10
+
+ def format(self, **kwargs):
+ try:
+ format_url = format_urls[self]
+ except KeyError:
+ raise ValueError(
+ "{} is not supported by TileFetcher, supported types are below:\n\t{}".format(
+ self, '\n\t'.join(str(k) for k in sorted(format_urls))
+ )
+ )
+ return format_url.format(**kwargs)
format_urls = {
@@ -267,7 +284,7 @@ class StackMirror(object):
self.title = str(title)
self.position = int(position)
- self.format_url = format_urls[self.tile_source_type].format(**self.__dict__)
+ self.format_url = self.tile_source_type.format(**self.__dict__)
def generate_url(self, tile_index):
"""
@@ -421,7 +438,7 @@ class ProjectStack(Stack):
super(ProjectStack, self).__init__(dimension, broken_slices, canary_location)
self.translation = translation
self.resolution = resolution
- self.orientation = orientation
+ self.orientation = StackOrientation.from_value(orientation)
@classmethod
def from_stack_info(cls, stack_info):
@@ -438,7 +455,7 @@ class ProjectStack(Stack):
"""
stack = cls(
stack_info['dimension'], stack_info['translation'], stack_info['resolution'],
- cls.orientation_choices[stack_info['orientation']], stack_info['broken_slices'],
+ stack_info['orientation'], stack_info['broken_slices'],
stack_info['canary_location']
)
mirrors = [StackMirror.from_dict(d) for d in stack_info['mirrors']]
@@ -865,7 +882,7 @@ class ImageFetcher(object):
if roi_mode == ROIMode.PROJECT:
if not isinstance(self.stack, ProjectStack):
raise ValueError("ImageFetcher's stack is not related to a project, cannot use ROIMode.PROJECT")
- if self.stack.orientation.lower() != 'xy':
+ if self.stack.orientation != StackOrientation.XY:
warn("Stack orientation differs from project: returned array's orientation will reflect"
"stack orientation, not project orientation")
roi_tgt = self.coord_trans.project_to_stack_array(roi_tgt, dims=self.target_orientation)
| catmaid/catpy | 474b667314160b8aeaaf13933a686237ebf03f2d | diff --git a/tests/common.py b/tests/common.py
new file mode 100644
index 0000000..b2552d2
--- /dev/null
+++ b/tests/common.py
@@ -0,0 +1,54 @@
+try:
+ from mock import Mock
+except ImportError:
+ from unittest.mock import Mock
+
+from catpy.applications.relation_identifier import RelationIdentifier
+
+import pytest
+
+
[email protected]
+def connectors_types():
+ return [
+ {
+ 'name': 'Presynaptic',
+ 'type': 'Synaptic',
+ 'relation': 'presynaptic_to',
+ 'relation_id': 0
+ }, {
+ 'name': 'Postsynaptic',
+ 'type': 'Synaptic',
+ 'relation': 'postsynaptic_to',
+ 'relation_id': 1
+ }, {
+ 'name': 'Abutting',
+ 'type': 'Abutting',
+ 'relation': 'abutting',
+ 'relation_id': 3
+ }, {
+ 'name': 'Gap junction',
+ 'type': 'Gap junction',
+ 'relation': 'gapjunction_with',
+ 'relation_id': 2
+ }, {
+ 'name': 'Attachment',
+ 'type': 'Attachment',
+ 'relation': 'attached_to',
+ 'relation_id': 4
+ }, {
+ 'name': 'Close to',
+ 'type': 'Spatial',
+ 'relation': 'close_to',
+ 'relation_id': 5
+ }
+ ]
+
+
[email protected]
+def relation_identifier(connectors_types):
+ catmaid = Mock()
+ catmaid.project_id = 1
+ relid = RelationIdentifier(catmaid)
+ relid._fetch_mappings = Mock(return_value=connectors_types)
+ return relid
diff --git a/tests/test_export.py b/tests/test_export.py
index 5e49e1d..aa19a66 100644
--- a/tests/test_export.py
+++ b/tests/test_export.py
@@ -15,6 +15,7 @@ except ImportError:
from mock import Mock
from tests.constants import FIXTURE_ROOT
+from tests.common import connectors_types, relation_identifier # noqa
nx_version = tuple(int(i) for i in nx.__version__.split('.'))
@@ -46,11 +47,13 @@ def expected_graph():
return json_graph.node_link_graph(json.load(f))
[email protected]
-def export_widget():
[email protected] # noqa
+def export_widget(relation_identifier):
catmaid = Mock()
catmaid.project_id = 1
- return ExportWidget(catmaid)
+ exp = ExportWidget(catmaid)
+ exp.get_relation_identifier = Mock(return_value=relation_identifier)
+ return exp
def test_reads_nodelinks(nodelink_json, export_widget, expected_graph):
diff --git a/tests/test_image.py b/tests/test_image.py
index b73fa41..eac5673 100644
--- a/tests/test_image.py
+++ b/tests/test_image.py
@@ -237,7 +237,7 @@ def test_stackmirror_corrects_file_extension():
assert mirror_dot.file_extension == mirror_no_dot.file_extension == 'png'
[email protected]('tile_source_type', list(TileSourceType))
[email protected]('tile_source_type', format_urls.keys())
def test_stackmirror_formats_url(tile_source_type):
mirror = StackMirror(IMAGE_BASE, 256, 256, tile_source_type, 'png')
tile_idx = TileIndex(0, 0, 0, 0, 256, 256)
diff --git a/tests/test_relation_identifier.py b/tests/test_relation_identifier.py
new file mode 100644
index 0000000..7370a6c
--- /dev/null
+++ b/tests/test_relation_identifier.py
@@ -0,0 +1,11 @@
+from catpy.client import ConnectorRelation
+
+from tests.common import relation_identifier, connectors_types # noqa
+
+
+def test_from_id(relation_identifier): # noqa
+ assert relation_identifier.from_id(0) == ConnectorRelation.presynaptic_to
+
+
+def test_to_id(relation_identifier): # noqa
+ assert relation_identifier.to_id(ConnectorRelation.presynaptic_to) == 0
diff --git a/tests/test_utils/test_coordinate_transformer.py b/tests/test_utils/test_coordinate_transformer.py
index 8d8128c..8a0f5c1 100644
--- a/tests/test_utils/test_coordinate_transformer.py
+++ b/tests/test_utils/test_coordinate_transformer.py
@@ -225,14 +225,14 @@ def test_stack_to_scaled_array(coordinate_generator, default_coord_transformer,
])
def test_can_validate_orientation_valid(orientation):
trans = CoordinateTransformer(orientation=orientation)
- assert trans.orientation == 'xy'
+ assert trans.orientation == StackOrientation.XY
assert trans.depth_dim == 'z'
@pytest.mark.parametrize('orientation,expected_exception', [
- [3, AttributeError],
- ['xyz', ValueError],
- ['xc', ValueError]
+ [3, ValueError],
+ ['xyz', KeyError],
+ ['xc', KeyError]
])
def test_can_validate_orientation_invalid(orientation, expected_exception):
with pytest.raises(expected_exception):
| IntEnums
For connector relation types, because I'm bored of writing it in dependent projects.
Should probably also fill out the catpy.image.TileSourceType one, for completeness. | 0.0 | 474b667314160b8aeaaf13933a686237ebf03f2d | [
"tests/test_export.py::test_reads_nodelinks[1-11]",
"tests/test_export.py::test_converts",
"tests/test_export.py::test_fails_to_convert",
"tests/test_image.py::test_vol_maker[0-shape0-1]",
"tests/test_image.py::test_vol_maker[1-shape1-2]",
"tests/test_image.py::test_vol_maker[0-shape2-10]",
"tests/test_image.py::test_vol_maker[0-shape3-46]",
"tests/test_image.py::test_vol_maker[1-shape4-47]",
"tests/test_image.py::test_response_to_array_png[L]",
"tests/test_image.py::test_response_to_array_png[RGB]",
"tests/test_image.py::test_response_to_array_png[RGBA]",
"tests/test_image.py::test_response_to_array_jpeg[L]",
"tests/test_image.py::test_response_to_array_jpeg[RGB]",
"tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.FILE_BASED-{image_base}{{depth}}/{{row}}_{{col}}_{{zoom_level}}.{file_extension}]",
"tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.FILE_BASED_WITH_ZOOM_DIRS-{image_base}{{depth}}/{{zoom_level}}/{{row}}_{{col}}.{file_extension}]",
"tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.DIR_BASED-{image_base}{{zoom_level}}/{{depth}}/{{row}}/{{col}}.{file_extension}]",
"tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.RENDER_SERVICE-{image_base}largeDataTileSource/{tile_width}/{tile_height}/{{zoom_level}}/{{depth}}/{{row}}/{{col}}.{file_extension}]",
"tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.FLIXSERVER-{image_base}{{depth}}/{{row}}_{{col}}_{{zoom_level}}.{file_extension}]",
"tests/test_image.py::test_as_future_for_not_future",
"tests/test_image.py::test_as_future_for_future",
"tests/test_image.py::test_fill_tiled_cuboid",
"tests/test_image.py::test_fill_tiled_cuboid_raises",
"tests/test_image.py::test_dict_subtract_mismatched_keys",
"tests/test_image.py::test_dict_subtract",
"tests/test_image.py::test_tile_index_coords",
"tests/test_image.py::test_tile_index_comparable[zoom_level]",
"tests/test_image.py::test_tile_index_comparable[height]",
"tests/test_image.py::test_tile_index_comparable[width]",
"tests/test_image.py::test_tile_index_url_kwargs",
"tests/test_image.py::test_stackmirror_corrects_image_base",
"tests/test_image.py::test_stackmirror_corrects_file_extension",
"tests/test_image.py::test_stackmirror_formats_url[TileSourceType.FILE_BASED]",
"tests/test_image.py::test_stackmirror_formats_url[TileSourceType.FILE_BASED_WITH_ZOOM_DIRS]",
"tests/test_image.py::test_stackmirror_formats_url[TileSourceType.DIR_BASED]",
"tests/test_image.py::test_stackmirror_formats_url[TileSourceType.RENDER_SERVICE]",
"tests/test_image.py::test_stackmirror_formats_url[TileSourceType.FLIXSERVER]",
"tests/test_image.py::test_stackmirror_raises_on_incompatible_tile_index",
"tests/test_image.py::test_stackmirror_get_tile_index",
"tests/test_image.py::test_stack_sets_broken_slices_canary",
"tests/test_image.py::test_stack_fastest_mirror_calls_get",
"tests/test_image.py::test_stack_fastest_mirror_raises",
"tests/test_image.py::test_tilecache_can_set",
"tests/test_image.py::test_tilecache_set_refreshes_old",
"tests/test_image.py::test_tilecache_can_get",
"tests/test_image.py::test_tilecache_lru",
"tests/test_image.py::test_tilecache_can_clear",
"tests/test_image.py::test_tilecache_can_constrain_len",
"tests/test_image.py::test_tilecache_can_constrain_bytes",
"tests/test_image.py::test_imagefetcher_can_instantiate",
"tests/test_image.py::test_imagefetcher_mirror_fallback_warning",
"tests/test_image.py::test_imagefetcher_set_mirror_none",
"tests/test_image.py::test_imagefetcher_set_mirror_mirror",
"tests/test_image.py::test_imagefetcher_set_mirror_mirror_raises",
"tests/test_image.py::test_imagefetcher_set_mirror_int",
"tests/test_image.py::test_imagefetcher_set_mirror_int_as_str",
"tests/test_image.py::test_imagefetcher_set_mirror_position_warns_no_match",
"tests/test_image.py::test_imagefetcher_set_mirror_position_warns_too_many",
"tests/test_image.py::test_imagefetcher_set_mirror_title",
"tests/test_image.py::test_imagefetcher_set_mirror_title_warns_no_match",
"tests/test_image.py::test_imagefetcher_set_mirror_title_warns_too_many",
"tests/test_image.py::test_imagefetcher_get_auth_default",
"tests/test_image.py::test_imagefetcher_get_auth_from_mirror",
"tests/test_image.py::test_imagefetcher_get_auth_fallback",
"tests/test_image.py::test_imagefetcher_clear_cache",
"tests/test_image.py::test_imagefetcher_map_dimensions",
"tests/test_image.py::test_imagefetcher_reorient",
"tests/test_image.py::test_imagefetcher_reorient_expands",
"tests/test_image.py::test_imagefetcher_reorient_throws",
"tests/test_image.py::test_imagefetcher_roi_to_tiles[roi0-expected_drc0-expected_yx_minmax0]",
"tests/test_image.py::test_imagefetcher_roi_to_tiles[roi1-expected_drc1-expected_yx_minmax1]",
"tests/test_image.py::test_imagefetcher_roi_to_tiles[roi2-expected_drc2-expected_yx_minmax2]",
"tests/test_image.py::test_imagefetcher_roi_to_tiles[roi3-expected_drc3-expected_yx_minmax3]",
"tests/test_image.py::test_imagefetcher_roi_to_tiles[roi4-expected_drc4-expected_yx_minmax4]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-scaled-0-expected0]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-stack-0-expected1]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-stack--2-expected2]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-stack-1-expected3]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-project-0-expected4]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-project--2-expected5]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-project-1-expected6]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-scaled-0-expected0]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-stack-0-expected1]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-stack--2-expected2]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-stack-1-expected3]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-project-0-expected4]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-project--2-expected5]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-project-1-expected6]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled_raises[ImageFetcher]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled_raises[ThreadedImageFetcher]",
"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi0-1]",
"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi1-2]",
"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi2-1]",
"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi3-2]",
"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi4-2]",
"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi5-4]",
"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi6-12]",
"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi0-1]",
"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi1-2]",
"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi2-1]",
"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi3-2]",
"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi4-2]",
"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi5-4]",
"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi6-12]",
"tests/test_image.py::test_imagefetcher_get_into_array[ImageFetcher]",
"tests/test_image.py::test_imagefetcher_get_into_array[ThreadedImageFetcher]",
"tests/test_image.py::test_imagefetcher_get_tile_from_cache[ImageFetcher]",
"tests/test_image.py::test_imagefetcher_get_tile_from_cache[ThreadedImageFetcher]",
"tests/test_image.py::test_imagefetcher_get_tile_from_broken_slice[ImageFetcher]",
"tests/test_image.py::test_imagefetcher_get_tile_from_broken_slice[ThreadedImageFetcher]",
"tests/test_image.py::test_imagefetcher_get_tile_from_fetch",
"tests/test_image.py::test_imagefetcher_fetch",
"tests/test_image.py::test_imagefetcher_get_wrappers[stack]",
"tests/test_image.py::test_imagefetcher_get_wrappers[scaled]",
"tests/test_image.py::test_imagefetcher_get_wrappers[project]",
"tests/test_image.py::test_404_handled_correctly",
"tests/test_relation_identifier.py::test_from_id",
"tests/test_relation_identifier.py::test_to_id",
"tests/test_utils/test_coordinate_transformer.py::test_instantiate[<lambda>-<lambda>]",
"tests/test_utils/test_coordinate_transformer.py::test_from_catmaid",
"tests/test_utils/test_coordinate_transformer.py::test_project_to_stack_coord[x]",
"tests/test_utils/test_coordinate_transformer.py::test_project_to_stack_coord[y]",
"tests/test_utils/test_coordinate_transformer.py::test_project_to_stack_coord[z]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_project_coord[x]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_project_coord[y]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_project_coord[z]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_project_and_project_to_stack[stack_to_project]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_project_and_project_to_stack[project_to_stack]",
"tests/test_utils/test_coordinate_transformer.py::test_arrays[stack_to_project-dims0]",
"tests/test_utils/test_coordinate_transformer.py::test_arrays[stack_to_project-dims1]",
"tests/test_utils/test_coordinate_transformer.py::test_arrays[stack_to_project-dims2]",
"tests/test_utils/test_coordinate_transformer.py::test_arrays[stack_to_project-dims3]",
"tests/test_utils/test_coordinate_transformer.py::test_arrays[stack_to_project-dims4]",
"tests/test_utils/test_coordinate_transformer.py::test_arrays[stack_to_project-dims5]",
"tests/test_utils/test_coordinate_transformer.py::test_arrays[project_to_stack-dims0]",
"tests/test_utils/test_coordinate_transformer.py::test_arrays[project_to_stack-dims1]",
"tests/test_utils/test_coordinate_transformer.py::test_arrays[project_to_stack-dims2]",
"tests/test_utils/test_coordinate_transformer.py::test_arrays[project_to_stack-dims3]",
"tests/test_utils/test_coordinate_transformer.py::test_arrays[project_to_stack-dims4]",
"tests/test_utils/test_coordinate_transformer.py::test_arrays[project_to_stack-dims5]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_scaled_coord[x]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_scaled_coord[y]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_scaled_coord[z]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_scaled_coord_z",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_scaled[True]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_scaled[False]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_scaled_array[dims0-True]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_scaled_array[dims0-False]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_scaled_array[dims1-True]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_scaled_array[dims1-False]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_scaled_array[dims2-True]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_scaled_array[dims2-False]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_scaled_array[dims3-True]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_scaled_array[dims3-False]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_scaled_array[dims4-True]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_scaled_array[dims4-False]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_scaled_array[dims5-True]",
"tests/test_utils/test_coordinate_transformer.py::test_stack_to_scaled_array[dims5-False]",
"tests/test_utils/test_coordinate_transformer.py::test_can_validate_orientation_valid[XY]",
"tests/test_utils/test_coordinate_transformer.py::test_can_validate_orientation_valid[xy0]",
"tests/test_utils/test_coordinate_transformer.py::test_can_validate_orientation_valid[0]",
"tests/test_utils/test_coordinate_transformer.py::test_can_validate_orientation_valid[xy1]",
"tests/test_utils/test_coordinate_transformer.py::test_can_validate_orientation_valid[None]",
"tests/test_utils/test_coordinate_transformer.py::test_can_validate_orientation_invalid[3-ValueError]",
"tests/test_utils/test_coordinate_transformer.py::test_can_validate_orientation_invalid[xyz-KeyError]",
"tests/test_utils/test_coordinate_transformer.py::test_can_validate_orientation_invalid[xc-KeyError]",
"tests/test_utils/test_coordinate_transformer.py::test_project_to_stack_orientation_xy[XY-project_to_stack-expected0]",
"tests/test_utils/test_coordinate_transformer.py::test_project_to_stack_orientation_xy[XY-stack_to_project-expected1]",
"tests/test_utils/test_coordinate_transformer.py::test_project_to_stack_orientation_xy[XZ-project_to_stack-expected2]",
"tests/test_utils/test_coordinate_transformer.py::test_project_to_stack_orientation_xy[XZ-stack_to_project-expected3]",
"tests/test_utils/test_coordinate_transformer.py::test_project_to_stack_orientation_xy[ZY-project_to_stack-expected4]",
"tests/test_utils/test_coordinate_transformer.py::test_project_to_stack_orientation_xy[ZY-stack_to_project-expected5]"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2018-10-29 21:11:14+00:00 | mit | 1,515 |
|
cclib__cclib-1320 | diff --git a/cclib/bridge/cclib2ase.py b/cclib/bridge/cclib2ase.py
index e81c0a15..d0f8de86 100644
--- a/cclib/bridge/cclib2ase.py
+++ b/cclib/bridge/cclib2ase.py
@@ -7,6 +7,8 @@
"""Bridge for using cclib data in ASE (https://wiki.fysik.dtu.dk/ase/)."""
+from typing import Optional, Sequence
+
from cclib.parser.data import ccData
from cclib.parser.utils import find_package
@@ -19,12 +21,12 @@ if _found_ase:
from ase.io.trajectory import Trajectory
-def _check_ase(found_ase):
+def _check_ase(found_ase: bool) -> None:
if not found_ase:
raise ImportError("You must install `ase` to use this function")
-def makease(atomcoords, atomnos, atomcharges=None, atomspins=None, atommasses=None):
+def makease(atomcoords, atomnos, atomcharges=None, atomspins=None, atommasses=None) -> "Atoms":
"""Create an ASE Atoms object from cclib attributes.
ASE requires atomic partial charges and atomic spin densities rather than
@@ -49,7 +51,9 @@ def makease(atomcoords, atomnos, atomcharges=None, atomspins=None, atommasses=No
)
-def write_trajectory(filename, ccdata, popname="mulliken", index=None):
+def write_trajectory(
+ filename, ccdata: ccData, popname: str = "mulliken", index: Optional[Sequence[int]] = None
+) -> None:
"""Write an ASE Trajectory object from a ccData object.
We try to write the following properties: atomcoords, atomnos, atomcharges,
@@ -106,13 +110,13 @@ def write_trajectory(filename, ccdata, popname="mulliken", index=None):
if i == len(ccdata.atomcoords) - 1: # last geometry
if hasattr(ccdata, "moments"):
properties.update({"dipole": ccdata.moments[1] * units.Bohr})
- if hasattr(ccdata, "free_energy"):
+ if hasattr(ccdata, "freeenergy"):
properties.update({"free_energy": ccdata.freeenergy * units.Hartree})
traj.write(atoms, **properties)
-def read_trajectory(filename):
+def read_trajectory(filename) -> ccData:
"""Read an ASE Trajectory object and return a ccData object.
The returned object has everything write_trajectory writes, plus natom,
@@ -167,7 +171,7 @@ def read_trajectory(filename):
return ccData(attributes)
-def makecclib(atoms, popname="mulliken"):
+def makecclib(atoms: "Atoms", popname: str = "mulliken") -> ccData:
"""Create cclib attributes and return a ccData from an ASE Atoms object.
Available data (such as forces/gradients and potential energy/free
| cclib/cclib | a4831f9c25327f7a49a263da275c9268c58a2f27 | diff --git a/test/bridge/testase.py b/test/bridge/testase.py
index 91bab0f5..610bbe33 100644
--- a/test/bridge/testase.py
+++ b/test/bridge/testase.py
@@ -168,9 +168,9 @@ class ASETest(unittest.TestCase):
assert np.allclose(trajdata.charge, data.charge, atol=1e-5)
assert np.allclose(trajdata.mult, data.mult)
assert np.allclose(trajdata.moments, data.moments)
+ assert np.allclose(trajdata.freeenergy, data.freeenergy)
# No temperature here.
- # No freeenergy here.
assert np.allclose(trajdata.atomcharges["mulliken"], data.atomcharges["mulliken"])
# No atomspins here.
| `free_energy` vs. `freeenergy` issue in `cclib2ase.py`
https://github.com/cclib/cclib/blob/880210c18d4cfd7a40e8af6ce50d40a748ee2d7c/cclib/bridge/cclib2ase.py#L109-L110
I believe the cclib property is named `freeenergy` ([source](https://cclib.github.io/data.html)) rather than `free_energy`. I don't have a test case --- just accidentally stumbled upon it when code diving and wanted to report it. | 0.0 | a4831f9c25327f7a49a263da275c9268c58a2f27 | [
"test/bridge/testase.py::ASETest::test_write_and_read_trivial_trajectories"
]
| [
"test/bridge/testase.py::ASETest::test_makease_allows_optimization",
"test/bridge/testase.py::ASETest::test_makease_works_with_closedshells",
"test/bridge/testase.py::ASETest::test_makease_works_with_openshells",
"test/bridge/testase.py::ASETest::test_makecclib_retrieves_optimization",
"test/bridge/testase.py::ASETest::test_read_ase_native_trajectory",
"test/bridge/testase.py::ASETest::test_write_and_read_opt_trajectories"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2023-12-15 21:44:50+00:00 | bsd-3-clause | 1,516 |
|
cdent__gabbi-153 | diff --git a/gabbi/driver.py b/gabbi/driver.py
index 33c0a98..49088fa 100644
--- a/gabbi/driver.py
+++ b/gabbi/driver.py
@@ -39,7 +39,8 @@ from gabbi import utils
def build_tests(path, loader, host=None, port=8001, intercept=None,
test_loader_name=None, fixture_module=None,
- response_handlers=None, prefix='', require_ssl=False):
+ response_handlers=None, prefix='', require_ssl=False,
+ url=None):
"""Read YAML files from a directory to create tests.
Each YAML file represents an ordered sequence of HTTP requests.
@@ -54,6 +55,7 @@ def build_tests(path, loader, host=None, port=8001, intercept=None,
:param response_handers: ResponseHandler classes.
:type response_handlers: List of ResponseHandler classes.
:param prefix: A URL prefix for all URLs that are not fully qualified.
+ :param url: A full URL to test against. Replaces host, port and prefix.
:param require_ssl: If ``True``, make all tests default to using SSL.
:rtype: TestSuite containing multiple TestSuites (one for each YAML file).
"""
@@ -63,6 +65,12 @@ def build_tests(path, loader, host=None, port=8001, intercept=None,
if not bool(host) ^ bool(intercept):
raise AssertionError('must specify exactly one of host or intercept')
+ # If url is being used, reset host, port and prefix.
+ if url:
+ host, port, prefix, force_ssl = utils.host_info_from_target(url)
+ if force_ssl and not require_ssl:
+ require_ssl = force_ssl
+
if test_loader_name is None:
test_loader_name = inspect.stack()[1]
test_loader_name = os.path.splitext(os.path.basename(
@@ -97,7 +105,7 @@ def build_tests(path, loader, host=None, port=8001, intercept=None,
def py_test_generator(test_dir, host=None, port=8001, intercept=None,
prefix=None, test_loader_name=None,
fixture_module=None, response_handlers=None,
- require_ssl=False):
+ require_ssl=False, url=None):
"""Generate tests cases for py.test
This uses build_tests to create TestCases and then yields them in
@@ -110,7 +118,8 @@ def py_test_generator(test_dir, host=None, port=8001, intercept=None,
test_loader_name=test_loader_name,
fixture_module=fixture_module,
response_handlers=response_handlers,
- prefix=prefix, require_ssl=require_ssl)
+ prefix=prefix, require_ssl=require_ssl,
+ url=url)
for test in tests:
if hasattr(test, '_tests'):
diff --git a/gabbi/runner.py b/gabbi/runner.py
index 3411dbe..d4e79d5 100644
--- a/gabbi/runner.py
+++ b/gabbi/runner.py
@@ -17,8 +17,6 @@ from importlib import import_module
import sys
import unittest
-from six.moves.urllib import parse as urlparse
-
from gabbi import case
from gabbi import handlers
from gabbi.reporter import ConciseTestRunner
@@ -93,7 +91,7 @@ def run():
)
args = parser.parse_args()
- host, port, prefix, force_ssl = process_target_args(
+ host, port, prefix, force_ssl = utils.host_info_from_target(
args.target, args.prefix)
# Initialize response handlers.
@@ -113,31 +111,6 @@ def run():
sys.exit(not result.wasSuccessful())
-def process_target_args(target, prefix):
- """Turn the argparse args into a host, port and prefix."""
- force_ssl = False
- split_url = urlparse.urlparse(target)
-
- if split_url.scheme:
- if split_url.scheme == 'https':
- force_ssl = True
- return split_url.hostname, split_url.port, split_url.path, force_ssl
- else:
- target = target
- prefix = prefix
-
- if ':' in target and '[' not in target:
- host, port = target.rsplit(':', 1)
- elif ']:' in target:
- host, port = target.rsplit(':', 1)
- else:
- host = target
- port = None
- host = host.replace('[', '').replace(']', '')
-
- return host, port, prefix, force_ssl
-
-
def initialize_handlers(response_handlers):
custom_response_handlers = []
for import_path in response_handlers or []:
diff --git a/gabbi/utils.py b/gabbi/utils.py
index 3de040d..172b4bf 100644
--- a/gabbi/utils.py
+++ b/gabbi/utils.py
@@ -126,6 +126,31 @@ def not_binary(content_type):
content_type.startswith('application/json'))
+def host_info_from_target(target, prefix=None):
+ """Turn url or host:port and target into test destination."""
+ force_ssl = False
+ split_url = urlparse.urlparse(target)
+
+ if split_url.scheme:
+ if split_url.scheme == 'https':
+ force_ssl = True
+ return split_url.hostname, split_url.port, split_url.path, force_ssl
+ else:
+ target = target
+ prefix = prefix
+
+ if ':' in target and '[' not in target:
+ host, port = target.rsplit(':', 1)
+ elif ']:' in target:
+ host, port = target.rsplit(':', 1)
+ else:
+ host = target
+ port = None
+ host = host.replace('[', '').replace(']', '')
+
+ return host, port, prefix, force_ssl
+
+
def _colorize(color, message):
"""Add a color to the message."""
try:
| cdent/gabbi | 0a8a3b8faf9a900fd132d9b147f67a851d52f178 | diff --git a/gabbi/tests/test_driver.py b/gabbi/tests/test_driver.py
index 0b2ce0a..8f6bca0 100644
--- a/gabbi/tests/test_driver.py
+++ b/gabbi/tests/test_driver.py
@@ -70,3 +70,20 @@ class DriverTest(unittest.TestCase):
first_test = suite._tests[0]._tests[0]
full_url = first_test._parse_url(first_test.test_data['url'])
self.assertEqual('http://localhost:8001/', full_url)
+
+ def test_build_url_target(self):
+ suite = driver.build_tests(self.test_dir, self.loader,
+ host='localhost', port='999',
+ url='https://example.com:1024/theend')
+ first_test = suite._tests[0]._tests[0]
+ full_url = first_test._parse_url(first_test.test_data['url'])
+ self.assertEqual('https://example.com:1024/theend/', full_url)
+
+ def test_build_url_target_forced_ssl(self):
+ suite = driver.build_tests(self.test_dir, self.loader,
+ host='localhost', port='999',
+ url='http://example.com:1024/theend',
+ require_ssl=True)
+ first_test = suite._tests[0]._tests[0]
+ full_url = first_test._parse_url(first_test.test_data['url'])
+ self.assertEqual('https://example.com:1024/theend/', full_url)
diff --git a/gabbi/tests/test_runner.py b/gabbi/tests/test_runner.py
index 3c132b1..a854cf9 100644
--- a/gabbi/tests/test_runner.py
+++ b/gabbi/tests/test_runner.py
@@ -229,93 +229,6 @@ class RunnerTest(unittest.TestCase):
self._stderr.write(sys.stderr.read())
-class RunnerHostArgParse(unittest.TestCase):
-
- def _test_hostport(self, url_or_host, expected_host,
- provided_prefix=None, expected_port=None,
- expected_prefix=None, expected_ssl=False):
- host, port, prefix, ssl = runner.process_target_args(
- url_or_host, provided_prefix)
-
- # normalize hosts, they are case insensitive
- self.assertEqual(expected_host.lower(), host.lower())
- # port can be a string or int depending on the inputs
- self.assertEqual(expected_port, port)
- self.assertEqual(expected_prefix, prefix)
- self.assertEqual(expected_ssl, ssl)
-
- def test_plain_url_no_port(self):
- self._test_hostport('http://foobar.com/news',
- 'foobar.com',
- expected_port=None,
- expected_prefix='/news')
-
- def test_plain_url_with_port(self):
- self._test_hostport('http://foobar.com:80/news',
- 'foobar.com',
- expected_port=80,
- expected_prefix='/news')
-
- def test_ssl_url(self):
- self._test_hostport('https://foobar.com/news',
- 'foobar.com',
- expected_prefix='/news',
- expected_ssl=True)
-
- def test_ssl_port80_url(self):
- self._test_hostport('https://foobar.com:80/news',
- 'foobar.com',
- expected_prefix='/news',
- expected_port=80,
- expected_ssl=True)
-
- def test_ssl_port_url(self):
- self._test_hostport('https://foobar.com:999/news',
- 'foobar.com',
- expected_prefix='/news',
- expected_port=999,
- expected_ssl=True)
-
- def test_simple_hostport(self):
- self._test_hostport('foobar.com:999',
- 'foobar.com',
- expected_port='999')
-
- def test_simple_hostport_with_prefix(self):
- self._test_hostport('foobar.com:999',
- 'foobar.com',
- provided_prefix='/news',
- expected_port='999',
- expected_prefix='/news')
-
- def test_ipv6_url_long(self):
- self._test_hostport(
- 'http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:999/news',
- 'FEDC:BA98:7654:3210:FEDC:BA98:7654:3210',
- expected_port=999,
- expected_prefix='/news')
-
- def test_ipv6_url_localhost(self):
- self._test_hostport(
- 'http://[::1]:999/news',
- '::1',
- expected_port=999,
- expected_prefix='/news')
-
- def test_ipv6_host_localhost(self):
- # If a user wants to use the hostport form, then they need
- # to hack it with the brackets.
- self._test_hostport(
- '[::1]',
- '::1')
-
- def test_ipv6_hostport_localhost(self):
- self._test_hostport(
- '[::1]:999',
- '::1',
- expected_port='999')
-
-
class HTMLResponseHandler(handlers.ResponseHandler):
test_key_suffix = 'html'
diff --git a/gabbi/tests/test_utils.py b/gabbi/tests/test_utils.py
index 1754dad..d5b8b50 100644
--- a/gabbi/tests/test_utils.py
+++ b/gabbi/tests/test_utils.py
@@ -158,3 +158,90 @@ class CreateURLTest(unittest.TestCase):
'/foo', 'FEDC:BA98:7654:3210:FEDC:BA98:7654:3210', port=999)
self.assertEqual(
'http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:999/foo', url)
+
+
+class UtilsHostInfoFromTarget(unittest.TestCase):
+
+ def _test_hostport(self, url_or_host, expected_host,
+ provided_prefix=None, expected_port=None,
+ expected_prefix=None, expected_ssl=False):
+ host, port, prefix, ssl = utils.host_info_from_target(
+ url_or_host, provided_prefix)
+
+ # normalize hosts, they are case insensitive
+ self.assertEqual(expected_host.lower(), host.lower())
+ # port can be a string or int depending on the inputs
+ self.assertEqual(expected_port, port)
+ self.assertEqual(expected_prefix, prefix)
+ self.assertEqual(expected_ssl, ssl)
+
+ def test_plain_url_no_port(self):
+ self._test_hostport('http://foobar.com/news',
+ 'foobar.com',
+ expected_port=None,
+ expected_prefix='/news')
+
+ def test_plain_url_with_port(self):
+ self._test_hostport('http://foobar.com:80/news',
+ 'foobar.com',
+ expected_port=80,
+ expected_prefix='/news')
+
+ def test_ssl_url(self):
+ self._test_hostport('https://foobar.com/news',
+ 'foobar.com',
+ expected_prefix='/news',
+ expected_ssl=True)
+
+ def test_ssl_port80_url(self):
+ self._test_hostport('https://foobar.com:80/news',
+ 'foobar.com',
+ expected_prefix='/news',
+ expected_port=80,
+ expected_ssl=True)
+
+ def test_ssl_port_url(self):
+ self._test_hostport('https://foobar.com:999/news',
+ 'foobar.com',
+ expected_prefix='/news',
+ expected_port=999,
+ expected_ssl=True)
+
+ def test_simple_hostport(self):
+ self._test_hostport('foobar.com:999',
+ 'foobar.com',
+ expected_port='999')
+
+ def test_simple_hostport_with_prefix(self):
+ self._test_hostport('foobar.com:999',
+ 'foobar.com',
+ provided_prefix='/news',
+ expected_port='999',
+ expected_prefix='/news')
+
+ def test_ipv6_url_long(self):
+ self._test_hostport(
+ 'http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:999/news',
+ 'FEDC:BA98:7654:3210:FEDC:BA98:7654:3210',
+ expected_port=999,
+ expected_prefix='/news')
+
+ def test_ipv6_url_localhost(self):
+ self._test_hostport(
+ 'http://[::1]:999/news',
+ '::1',
+ expected_port=999,
+ expected_prefix='/news')
+
+ def test_ipv6_host_localhost(self):
+ # If a user wants to use the hostport form, then they need
+ # to hack it with the brackets.
+ self._test_hostport(
+ '[::1]',
+ '::1')
+
+ def test_ipv6_hostport_localhost(self):
+ self._test_hostport(
+ '[::1]:999',
+ '::1',
+ expected_port='999')
| In 'live' testing scenarios argument passing to build_tests is convoluted and SSL may not work
If you want to use `build_tests` to create real TestCases against a live server it's likely you know the URL and that would be most convenient thing to pass instead of having to parse out the host, port and prefix (script_name) and then pass those.
In addition, if you have a URL you know if your server is SSL but the tests may not have been written to do SSL (with an `ssl: true` entry). Because of the test building process this is a bit awkward at the moment. It would be better to be able to say "yeah, this is SSL" for the whole run. | 0.0 | 0a8a3b8faf9a900fd132d9b147f67a851d52f178 | [
"gabbi/tests/test_driver.py::DriverTest::test_build_url_target",
"gabbi/tests/test_driver.py::DriverTest::test_build_url_target_forced_ssl",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ipv6_host_localhost",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ipv6_hostport_localhost",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ipv6_url_localhost",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ipv6_url_long",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_plain_url_no_port",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_plain_url_with_port",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ssl_port80_url",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ssl_port_url",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ssl_url"
]
| [
"gabbi/tests/test_driver.py::DriverTest::test_build_require_ssl",
"gabbi/tests/test_driver.py::DriverTest::test_build_requires_host_or_intercept",
"gabbi/tests/test_driver.py::DriverTest::test_driver_loads_two_tests",
"gabbi/tests/test_driver.py::DriverTest::test_driver_prefix",
"gabbi/tests/test_runner.py::RunnerTest::test_target_url_parsing",
"gabbi/tests/test_runner.py::RunnerTest::test_target_url_parsing_standard_port",
"gabbi/tests/test_utils.py::BinaryTypesTest::test_binary",
"gabbi/tests/test_utils.py::BinaryTypesTest::test_not_binary",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_bad_params",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_default_both",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_default_charset",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_multiple_params",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_with_charset",
"gabbi/tests/test_utils.py::ColorizeTest::test_colorize_missing_color",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ipv6_already_bracket",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ipv6_full",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ipv6_ssl",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ipv6_ssl_weird_port",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_no_double_colon",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_not_ssl_on_443",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_port",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_port_and_ssl",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_prefix",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_preserve_query",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_simple",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ssl",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ssl_on_80"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2016-06-12 20:11:11+00:00 | apache-2.0 | 1,517 |
|
cdent__gabbi-167 | diff --git a/gabbi/handlers.py b/gabbi/handlers.py
index e4ed7f5..5302f3e 100644
--- a/gabbi/handlers.py
+++ b/gabbi/handlers.py
@@ -110,6 +110,8 @@ class JSONResponseHandler(ResponseHandler):
if (hasattr(expected, 'startswith') and expected.startswith('/')
and expected.endswith('/')):
expected = expected.strip('/').rstrip('/')
+ # match may be a number so stringify
+ match = str(match)
test.assertRegexpMatches(
match, expected,
'Expect jsonpath %s to match /%s/, got %s' %
| cdent/gabbi | 2bd1c803b71bd26be7822be87e249a811467b863 | diff --git a/gabbi/tests/__init__.py b/gabbi/tests/__init__.py
index e69de29..d17f718 100644
--- a/gabbi/tests/__init__.py
+++ b/gabbi/tests/__init__.py
@@ -0,0 +1,16 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import six
+
+six.add_move(six.MovedModule('mock', 'mock', 'unittest.mock'))
diff --git a/gabbi/tests/gabbits_intercept/regex.yaml b/gabbi/tests/gabbits_intercept/regex.yaml
index 4416be9..9a0c055 100644
--- a/gabbi/tests/gabbits_intercept/regex.yaml
+++ b/gabbi/tests/gabbits_intercept/regex.yaml
@@ -14,6 +14,8 @@ tests:
data:
alpha: cow
beta: pig
+ gamma: 1
response_json_paths:
$.alpha: /ow$/
$.beta: /(?!cow).*/
+ $.gamma: /\d+/
diff --git a/gabbi/tests/test_fixtures.py b/gabbi/tests/test_fixtures.py
index 19a8747..6e75a53 100644
--- a/gabbi/tests/test_fixtures.py
+++ b/gabbi/tests/test_fixtures.py
@@ -13,7 +13,7 @@
"""Use mocks to confirm that fixtures operate as context managers.
"""
-import mock
+from six.moves import mock
import unittest
from gabbi import fixture
diff --git a/gabbi/tests/test_handlers.py b/gabbi/tests/test_handlers.py
index 67bd30c..3f8b72f 100644
--- a/gabbi/tests/test_handlers.py
+++ b/gabbi/tests/test_handlers.py
@@ -128,6 +128,34 @@ class HandlersTest(unittest.TestCase):
with self.assertRaises(AssertionError):
self._assert_handler(handler)
+ def test_response_json_paths_regex(self):
+ handler = handlers.JSONResponseHandler(self.test_class)
+ self.test.content_type = "application/json"
+ self.test.test_data = {'response_json_paths': {
+ '$.objects[0].name': '/ow/',
+ }}
+ self.test.json_data = {
+ 'objects': [{'name': 'cow',
+ 'location': 'barn'},
+ {'name': 'chris',
+ 'location': 'house'}]
+ }
+ self._assert_handler(handler)
+
+ def test_response_json_paths_regex_number(self):
+ handler = handlers.JSONResponseHandler(self.test_class)
+ self.test.content_type = "application/json"
+ self.test.test_data = {'response_json_paths': {
+ '$.objects[0].name': '/\d+/',
+ }}
+ self.test.json_data = {
+ 'objects': [{'name': 99,
+ 'location': 'barn'},
+ {'name': 'chris',
+ 'location': 'house'}]
+ }
+ self._assert_handler(handler)
+
def test_response_headers(self):
handler = handlers.HeadersResponseHandler(self.test_class)
self.test.response = {'content-type': 'text/plain'}
diff --git a/test-requirements.txt b/test-requirements.txt
index c6439fa..3dd01e6 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,4 +1,4 @@
-mock
+mock ; python_version < '3.3'
testrepository
coverage
hacking
| regex matching in JSONResponseHandler doesn't deal with numbers
If the match is a number, the regex matching errors out. We need to cast to a string before doing the regex test. | 0.0 | 2bd1c803b71bd26be7822be87e249a811467b863 | [
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_regex_number"
]
| [
"gabbi/tests/test_fixtures.py::FixtureTest::test_fixture_informs_on_exception",
"gabbi/tests/test_fixtures.py::FixtureTest::test_fixture_starts_and_stop",
"gabbi/tests/test_handlers.py::HandlersTest::test_resonse_headers_stringify",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_fail_data",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_fail_header",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_regex",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_fail_data",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_fail_path",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_regex",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_strings",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_strings_fail",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_strings_fail_big_output",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_strings_fail_big_payload"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2016-09-02 12:17:35+00:00 | apache-2.0 | 1,518 |
|
cdent__gabbi-186 | diff --git a/gabbi/httpclient.py b/gabbi/httpclient.py
index 22869e5..88f873d 100644
--- a/gabbi/httpclient.py
+++ b/gabbi/httpclient.py
@@ -19,6 +19,7 @@ import sys
import urllib3
+from gabbi.handlers import jsonhandler
from gabbi import utils
@@ -138,11 +139,20 @@ class VerboseHttp(Http):
def _print_body(self, headers, content):
"""Output body if not binary."""
- if self._show_body and utils.not_binary(
- utils.extract_content_type(headers)[0]):
+ content_type = utils.extract_content_type(headers)[0]
+ if self._show_body and utils.not_binary(content_type):
+ content = utils.decode_response_content(headers, content)
+ # TODO(cdent): Using the JSONHandler here instead of
+ # just the json module to make it clear that eventually
+ # we could pretty print any printable output by using a
+ # handler's loads() and dumps(). Not doing that now
+ # because it would be pointless (no other interesting
+ # handlers) and this approach may be entirely wrong.
+ if jsonhandler.JSONHandler.accepts(content_type):
+ data = jsonhandler.JSONHandler.loads(content)
+ content = jsonhandler.JSONHandler.dumps(data, pretty=True)
self._verbose_output('')
- self._verbose_output(
- utils.decode_response_content(headers, content))
+ self._verbose_output(content)
def _print_header(self, name, value, prefix='', stream=None):
"""Output one single header."""
| cdent/gabbi | f27b9aba8590dbdb16749f917cdcf3fffc6218e3 | diff --git a/gabbi/tests/gabbits_runner/test_verbose.yaml b/gabbi/tests/gabbits_runner/test_verbose.yaml
new file mode 100644
index 0000000..99b0b0f
--- /dev/null
+++ b/gabbi/tests/gabbits_runner/test_verbose.yaml
@@ -0,0 +1,18 @@
+tests:
+
+- name: POST data with verbose true
+ verbose: true
+ POST: /
+ request_headers:
+ content-type: application/json
+ data:
+ - our text
+
+- name: structured data
+ verbose: true
+ POST: /
+ request_headers:
+ content-type: application/json
+ data:
+ cow: moo
+ dog: bark
diff --git a/gabbi/tests/test_runner.py b/gabbi/tests/test_runner.py
index 2f313a4..bf882ab 100644
--- a/gabbi/tests/test_runner.py
+++ b/gabbi/tests/test_runner.py
@@ -228,6 +228,27 @@ class RunnerTest(unittest.TestCase):
except SystemExit as err:
self.assertSuccess(err)
+ def test_verbose_output_formatting(self):
+ """Confirm that a verbose test handles output properly."""
+ sys.argv = ['gabbi-run', 'http://%s:%s/foo' % (self.host, self.port)]
+
+ sys.argv.append('--')
+ sys.argv.append('gabbi/tests/gabbits_runner/test_verbose.yaml')
+ with self.server():
+ try:
+ runner.run()
+ except SystemExit as err:
+ self.assertSuccess(err)
+
+ sys.stdout.seek(0)
+ output = sys.stdout.read()
+ self.assertIn('"our text"', output)
+ self.assertIn('"cow": "moo"', output)
+ self.assertIn('"dog": "bark"', output)
+ # confirm pretty printing
+ self.assertIn('{\n', output)
+ self.assertIn('}\n', output)
+
def assertSuccess(self, exitError):
errors = exitError.args[0]
if errors:
diff --git a/gabbi/tests/test_verbose.yaml b/gabbi/tests/test_verbose.yaml
deleted file mode 100644
index 29a6c64..0000000
--- a/gabbi/tests/test_verbose.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-tests:
-
- - name: POST data with verbose true
- verbose: true
- POST: /
- request_headers:
- content-type: application/json
- data:
- 'text'
| If verbose: True and response content-type is json may as well pretty print the output
It kinda seems like if we know the content-type when being verbose about bodies, we may as well pretty print if it is json. It's not much effort to do so (there's already pretty printing happening in some test failure messages) and it is nice.
Is it too nice?
/cc @FND
| 0.0 | f27b9aba8590dbdb16749f917cdcf3fffc6218e3 | [
"gabbi/tests/test_runner.py::RunnerTest::test_verbose_output_formatting"
]
| [
"gabbi/tests/test_runner.py::RunnerTest::test_input_files",
"gabbi/tests/test_runner.py::RunnerTest::test_target_url_parsing",
"gabbi/tests/test_runner.py::RunnerTest::test_target_url_parsing_standard_port"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2016-11-10 17:05:40+00:00 | apache-2.0 | 1,519 |
|
cdent__gabbi-191 | diff --git a/docs/source/format.rst b/docs/source/format.rst
index cc1b129..51dcf24 100644
--- a/docs/source/format.rst
+++ b/docs/source/format.rst
@@ -269,8 +269,9 @@ flexibility when doing a ``POST`` or ``PUT``. If the value is not a
string (that is, it is a sequence or structure) it is treated as a
data structure which is turned into a JSON string. If the value is a
string that begins with ``<@`` then the rest of the string is treated
-as the name of a file to be loaded from the same directory as the YAML
-file. If the value is an undecorated string, that's the value.
+as a filepath to be loaded. The path is relative to the test directory
+and may not traverse up into parent directories. If the value is an
+undecorated string, that's the value.
When reading from a file care should be taken to ensure that a
reasonable content-type is set for the data as this will control if any
diff --git a/gabbi/runner.py b/gabbi/runner.py
index ac98dea..9f45351 100644
--- a/gabbi/runner.py
+++ b/gabbi/runner.py
@@ -14,6 +14,7 @@
import argparse
from importlib import import_module
+import os
import sys
import unittest
@@ -84,8 +85,9 @@ def run():
else:
for input_file in input_files:
with open(input_file, 'r') as fh:
+ data_dir = os.path.dirname(input_file)
success = run_suite(fh, handler_objects, host, port,
- prefix, force_ssl, failfast)
+ prefix, force_ssl, failfast, data_dir)
if not failure: # once failed, this is considered immutable
failure = not success
if failure and failfast:
@@ -95,7 +97,7 @@ def run():
def run_suite(handle, handler_objects, host, port, prefix, force_ssl=False,
- failfast=False):
+ failfast=False, data_dir='.'):
"""Run the tests from the YAML in handle."""
data = utils.load_yaml(handle)
if force_ssl:
@@ -106,7 +108,7 @@ def run_suite(handle, handler_objects, host, port, prefix, force_ssl=False,
loader = unittest.defaultTestLoader
test_suite = suitemaker.test_suite_from_dict(
- loader, 'input', data, '.', host, port, None, None, prefix=prefix,
+ loader, 'input', data, data_dir, host, port, None, None, prefix=prefix,
handlers=handler_objects)
result = ConciseTestRunner(
| cdent/gabbi | 07dc4913eb980dd4a4a6130abfd708a39235d7f2 | diff --git a/gabbi/tests/gabbits_runner/subdir/sample.json b/gabbi/tests/gabbits_runner/subdir/sample.json
new file mode 100644
index 0000000..ddbce20
--- /dev/null
+++ b/gabbi/tests/gabbits_runner/subdir/sample.json
@@ -0,0 +1,1 @@
+{"items": {"house": "blue"}}
diff --git a/gabbi/tests/gabbits_runner/test_data.yaml b/gabbi/tests/gabbits_runner/test_data.yaml
new file mode 100644
index 0000000..35d056a
--- /dev/null
+++ b/gabbi/tests/gabbits_runner/test_data.yaml
@@ -0,0 +1,8 @@
+tests:
+
+- name: POST data from file
+ verbose: true
+ POST: /
+ request_headers:
+ content-type: application/json
+ data: <@subdir/sample.json
diff --git a/gabbi/tests/test_runner.py b/gabbi/tests/test_runner.py
index bf882ab..1b86235 100644
--- a/gabbi/tests/test_runner.py
+++ b/gabbi/tests/test_runner.py
@@ -22,6 +22,7 @@ from wsgi_intercept.interceptor import Urllib3Interceptor
from gabbi import exception
from gabbi.handlers import base
+from gabbi.handlers.jsonhandler import JSONHandler
from gabbi import runner
from gabbi.tests.simple_wsgi import SimpleWsgi
@@ -249,6 +250,28 @@ class RunnerTest(unittest.TestCase):
self.assertIn('{\n', output)
self.assertIn('}\n', output)
+ def test_data_dir_good(self):
+ """Confirm that data dir is the test file's dir."""
+ sys.argv = ['gabbi-run', 'http://%s:%s/foo' % (self.host, self.port)]
+
+ sys.argv.append('--')
+ sys.argv.append('gabbi/tests/gabbits_runner/test_data.yaml')
+ with self.server():
+ try:
+ runner.run()
+ except SystemExit as err:
+ self.assertSuccess(err)
+
+ # Compare the verbose output of tests with pretty printed
+ # data.
+ with open('gabbi/tests/gabbits_runner/subdir/sample.json') as data:
+ data = JSONHandler.loads(data.read())
+ expected_string = JSONHandler.dumps(data, pretty=True)
+
+ sys.stdout.seek(0)
+ output = sys.stdout.read()
+ self.assertIn(expected_string, output)
+
def assertSuccess(self, exitError):
errors = exitError.args[0]
if errors:
| Data <@filename isn't relative to the YAML file
The [docs say](https://gabbi.readthedocs.io/en/latest/format.html#data):
> If the value is a string that begins with <@ then the rest of the string is treated as the name of a file to be loaded from the same directory as the YAML file.
But I haven't found it works like this unless I cd into the directory containing the yaml file.
`_load_data_file` says:
```python
path = os.path.join(self.test_directory, os.path.basename(filename))
```
this does a few things:
- makes the path begin with `self.test_directory` (defaults to `.`, the current working directory)
- disguards any folders in the <@ `filename` path
- appends `filename`
This means, if I'm in `/`, and I have a test that says `data: <@cake.jpg`, I cannot run my tests as advised in the docs:
`gabbi-run -- /my/test.yaml /my/other.yaml`
`FileNotFoundError: [Errno 2] No such file or directory: './cake.jpg'`
So in our test running script, we have to start with `cd tests/`.
My preference, would be to make the path relative to the location of the yaml file. That way, I can run my tests from whatever directory.
Obviously this isn't possible when redirecting with `<` as the filename is never seen by gabbi. So I'm happy is discuss other ideas.
---
Furthermore I can't keep my test data in a child directory, unless I cd into that directory like:
```shell
cd yaml_tests/test_data
gabbi-run -- ../test_things.yaml
```
So for that reason, I'd like to allow directories like `<@test_data/filename.txt` to be included in the path.
---
Happy to write the patch, if we come up with an approach here. | 0.0 | 07dc4913eb980dd4a4a6130abfd708a39235d7f2 | [
"gabbi/tests/test_runner.py::RunnerTest::test_data_dir_good"
]
| [
"gabbi/tests/test_runner.py::RunnerTest::test_input_files",
"gabbi/tests/test_runner.py::RunnerTest::test_target_url_parsing",
"gabbi/tests/test_runner.py::RunnerTest::test_target_url_parsing_standard_port",
"gabbi/tests/test_runner.py::RunnerTest::test_verbose_output_formatting"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2016-11-27 14:39:00+00:00 | apache-2.0 | 1,520 |
|
cdent__gabbi-200 | diff --git a/gabbi/suite.py b/gabbi/suite.py
index 765a929..f6d3868 100644
--- a/gabbi/suite.py
+++ b/gabbi/suite.py
@@ -16,6 +16,7 @@ This suite has two features: the contained tests are ordered and there
are suite-level fixtures that operate as context managers.
"""
+import sys
import unittest
from wsgi_intercept import interceptor
@@ -58,6 +59,26 @@ class GabbiSuite(unittest.TestSuite):
except unittest.SkipTest as exc:
for test in self._tests:
result.addSkip(test, str(exc))
+ # If we have an exception in the nested fixtures, that means
+ # there's been an exception somewhere in the cycle other
+ # than a specific test (as that would have been caught
+ # already), thus from a fixture. If that exception were to
+ # continue to raise here, then some test runners would
+ # swallow it and the traceback of the failure would be
+ # undiscoverable. To ensure the traceback is reported (via
+ # the testrunner) to a human, the first test in the suite is
+ # marked as having an error (it's fixture failed) and then
+ # the entire suite is skipped, and the result stream told
+ # we're done. If there are no tests (an empty suite) the
+ # exception is re-raised.
+ except Exception as exc:
+ if self._tests:
+ result.addError(self._tests[0], sys.exc_info())
+ for test in self._tests:
+ result.addSkip(test, 'fixture failure')
+ result.stop()
+ else:
+ raise
return result
| cdent/gabbi | 3e450b1d375c2ee8f18a7c6798cfca211ba2fa98 | diff --git a/gabbi/tests/test_suite.py b/gabbi/tests/test_suite.py
new file mode 100644
index 0000000..eb1bd22
--- /dev/null
+++ b/gabbi/tests/test_suite.py
@@ -0,0 +1,54 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Unit tests for the gabbi.suite.
+"""
+
+import sys
+import unittest
+
+from gabbi import fixture
+from gabbi import suitemaker
+
+VALUE_ERROR = 'value error sentinel'
+
+
+class FakeFixture(fixture.GabbiFixture):
+
+ def start_fixture(self):
+ raise ValueError(VALUE_ERROR)
+
+
+class SuiteTest(unittest.TestCase):
+
+ def test_suite_catches_fixture_fail(self):
+ """When a fixture fails in start_fixture it should fail
+ the first test in the suite and skip the others.
+ """
+ loader = unittest.defaultTestLoader
+ result = unittest.TestResult()
+ test_data = {'fixtures': ['FakeFixture'],
+ 'tests': [{'name': 'alpha', 'GET': '/'},
+ {'name': 'beta', 'GET': '/'}]}
+ test_suite = suitemaker.test_suite_from_dict(
+ loader, 'foo', test_data, '.', 'localhost',
+ 80, sys.modules[__name__], None)
+
+ test_suite.run(result)
+
+ self.assertEqual(2, len(result.skipped))
+ self.assertEqual(1, len(result.errors))
+
+ errored_test, trace = result.errors[0]
+
+ self.assertIn('foo_alpha', str(errored_test))
+ self.assertIn(VALUE_ERROR, trace)
| exceptions that happen in start_fixture() are swallowed during the test discovery process
The discovery will fail and end up producing a seemingly non-related error like `math domain error` and the real exception is nowhere to be found.
This needs to be narrowed to a minimal test case and then figured out.
What seems likely is that something about the test suites is being processed is not following unittest/testtools rules of failure. | 0.0 | 3e450b1d375c2ee8f18a7c6798cfca211ba2fa98 | [
"gabbi/tests/test_suite.py::SuiteTest::test_suite_catches_fixture_fail"
]
| []
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2017-01-13 15:18:55+00:00 | apache-2.0 | 1,521 |
|
cdent__gabbi-210 | diff --git a/gabbi/handlers/base.py b/gabbi/handlers/base.py
index 03c37e3..055aff8 100644
--- a/gabbi/handlers/base.py
+++ b/gabbi/handlers/base.py
@@ -13,6 +13,9 @@
"""Base classes for response and content handlers."""
+from gabbi.exception import GabbiFormatError
+
+
class ResponseHandler(object):
"""Add functionality for making assertions about an HTTP response.
@@ -38,6 +41,11 @@ class ResponseHandler(object):
def __call__(self, test):
if test.test_data[self._key]:
self.preprocess(test)
+ if type(self.test_key_value) != type(test.test_data[self._key]):
+ raise GabbiFormatError(
+ "%s in '%s' has incorrect type, must be %s"
+ % (self._key, test.test_data['name'],
+ type(self.test_key_value)))
for item in test.test_data[self._key]:
try:
value = test.test_data[self._key][item]
| cdent/gabbi | 867042bc99a844d37e3eb5e7dd748dbb0734ac69 | diff --git a/gabbi/tests/test_handlers.py b/gabbi/tests/test_handlers.py
index df655b4..3caa086 100644
--- a/gabbi/tests/test_handlers.py
+++ b/gabbi/tests/test_handlers.py
@@ -17,6 +17,7 @@ import json
import unittest
from gabbi import case
+from gabbi.exception import GabbiFormatError
from gabbi.handlers import core
from gabbi.handlers import jsonhandler
from gabbi import suitemaker
@@ -104,6 +105,19 @@ class HandlersTest(unittest.TestCase):
# Check the pprint of the json
self.assertIn(' "location": "house"', msg)
+ def test_response_string_list_type(self):
+ handler = core.StringResponseHandler()
+ self.test.test_data = {
+ 'name': 'omega test',
+ 'response_strings': 'omega'
+ }
+ self.test.output = 'omega\n'
+ with self.assertRaises(GabbiFormatError) as exc:
+ self._assert_handler(handler)
+ self.assertIn('has incorrect type', str(exc))
+ self.assertIn("response_strings in 'omega test'",
+ str(exc))
+
def test_response_json_paths(self):
handler = jsonhandler.JSONHandler()
self.test.content_type = "application/json"
@@ -178,6 +192,19 @@ class HandlersTest(unittest.TestCase):
}
self._assert_handler(handler)
+ def test_response_json_paths_dict_type(self):
+ handler = jsonhandler.JSONHandler()
+ self.test.test_data = {
+ 'name': 'omega test',
+ 'response_json_paths': ['alpha', 'beta']
+ }
+ self.test.output = 'omega\n'
+ with self.assertRaises(GabbiFormatError) as exc:
+ self._assert_handler(handler)
+ self.assertIn('has incorrect type', str(exc))
+ self.assertIn("response_json_paths in 'omega test'",
+ str(exc))
+
def test_response_headers(self):
handler = core.HeadersResponseHandler()
self.test.response = {'content-type': 'text/plain'}
| response_string can lead to false positives if value is a string
The correct way to do the `response_strings` test is to provide a list of strings:
```yaml
response_strings:
- foo
- bar
```
If you forget and do a string, though, it can sometimes pass because the value is use as an iterable and strings iterate. In the following example of the response contains the letters `f` and `o` the test will pass:
```yaml
response_strings:
foo
```
Stronger type checking is required. | 0.0 | 867042bc99a844d37e3eb5e7dd748dbb0734ac69 | [
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_dict_type",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_string_list_type"
]
| [
"gabbi/tests/test_handlers.py::HandlersTest::test_empty_response_handler",
"gabbi/tests/test_handlers.py::HandlersTest::test_resonse_headers_stringify",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_fail_data",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_fail_header",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_regex",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_fail_data",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_fail_path",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_regex",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_regex_number",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_strings",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_strings_fail",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_strings_fail_big_output",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_strings_fail_big_payload"
]
| {
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2017-03-20 19:06:23+00:00 | apache-2.0 | 1,522 |
|
cdent__gabbi-215 | diff --git a/docs/source/format.rst b/docs/source/format.rst
index c12d3b2..de15bbb 100644
--- a/docs/source/format.rst
+++ b/docs/source/format.rst
@@ -274,7 +274,8 @@ All of these variables may be used in all of the following fields:
* ``data``
* ``request_headers``
* ``response_strings``
-* ``response_json_paths`` (on the value side of the key value pair)
+* ``response_json_paths`` (in both the key and value, see
+ :ref:`json path substitution <json-subs>` for more info)
* ``response_headers`` (on the value side of the key value pair)
* ``response_forbidden_headers``
* ``count`` and ``delay`` fields of ``poll``
diff --git a/docs/source/jsonpath.rst b/docs/source/jsonpath.rst
index 15d4e1d..317603b 100644
--- a/docs/source/jsonpath.rst
+++ b/docs/source/jsonpath.rst
@@ -64,8 +64,48 @@ lead to difficult to read tests and it also indicates that your
gabbi tests are being used to test your serializers and data models,
not just your API interactions.
+It is also possible to read raw JSON from disk for either all or
+some of a JSON response::
+
+ response_json_paths:
+ $: @<data.json
+
+or::
+
+ response_json_paths:
+ $.pets: <@pets.json
+ $.pets[0]: <@cat.json
+
+Examples like this can be found in one of gabbi's `own tests`_.
+
There are more JSONPath examples in :doc:`example` and in the
`jsonpath_rw`_ and `jsonpath_rw_ext`_ documentation.
+.. _json-subs:
+
+Substitution
+------------
+
+:ref:`Substitutions <state-substitution>` can be made in both the
+left (query) and right (expected) hand sides of the json path
+expression. When subtitutions are used in the query, care must be
+taken to ensure proper quoting of the resulting value. For example
+if there is a uuid (with hyphens) at ``$RESPONSE['$.id']`` then this
+expression may fail::
+
+ $.nested.structure.$RESPONSE['$.id'].name: foobar
+
+as it will evaluate to something like::
+
+ $.nested.structure.ADC8AAFC-D564-40D1-9724-7680D3C010C2.name: foobar
+
+which may be treated as an arithemtic expression by the json path
+parser. The test author should write::
+
+ $.nested.structure["$RESPONSE['$.id']"].name: foobar
+
+to quote the result of the substitution.
+
.. _jsonpath_rw: http://jsonpath-rw.readthedocs.io/en/latest/
.. _jsonpath_rw_ext: https://python-jsonpath-rw-ext.readthedocs.io/en/latest/
+.. _own tests: https://github.com/cdent/gabbi/blob/master/gabbi/tests/gabbits_intercept/data.yaml
diff --git a/gabbi/case.py b/gabbi/case.py
index d903dd5..79c18c9 100644
--- a/gabbi/case.py
+++ b/gabbi/case.py
@@ -174,6 +174,10 @@ class HTTPTestCase(testtools.TestCase):
return message
+ def load_data_file(self, filename):
+ """Read a file from the current test directory."""
+ return self._load_data_file(filename)
+
def _assert_response(self):
"""Compare the response with expected data."""
self._test_status(self.test_data['status'], self.response['status'])
@@ -505,7 +509,7 @@ class HTTPTestCase(testtools.TestCase):
"""
if isinstance(data, str):
if data.startswith('<@'):
- info = self._load_data_file(data.replace('<@', '', 1))
+ info = self.load_data_file(data.replace('<@', '', 1))
if utils.not_binary(content_type):
data = six.text_type(info, 'UTF-8')
else:
diff --git a/gabbi/handlers/jsonhandler.py b/gabbi/handlers/jsonhandler.py
index 5b45720..2f85360 100644
--- a/gabbi/handlers/jsonhandler.py
+++ b/gabbi/handlers/jsonhandler.py
@@ -14,6 +14,8 @@
import json
+import six
+
from gabbi.handlers import base
from gabbi import json_parser
@@ -73,11 +75,8 @@ class JSONHandler(base.ContentHandler):
def action(self, test, path, value=None):
"""Test json_paths against json data."""
- # NOTE: This process has some advantages over other process that
- # might come along because the JSON data has already been
- # processed (to provided for the magic template replacing).
- # Other handlers that want access to data structures will need
- # to do their own processing.
+ # Do template expansion in the left hand side.
+ path = test.replace_template(path)
try:
match = self.extract_json_path_value(
test.response_data, path)
@@ -86,6 +85,13 @@ class JSONHandler(base.ContentHandler):
except ValueError:
raise AssertionError('json path %s cannot match %s' %
(path, test.response_data))
+
+ # read data from disk if the value starts with '<@'
+ if isinstance(value, str) and value.startswith('<@'):
+ info = test.load_data_file(value.replace('<@', '', 1))
+ info = six.text_type(info, 'UTF-8')
+ value = self.loads(info)
+
expected = test.replace_template(value)
# If expected is a string, check to see if it is a regex.
if (hasattr(expected, 'startswith') and expected.startswith('/')
| cdent/gabbi | 87f389688a5aa7753e6f14a50869eb19fa12e592 | diff --git a/gabbi/tests/gabbits_intercept/cat.json b/gabbi/tests/gabbits_intercept/cat.json
new file mode 100644
index 0000000..d27c320
--- /dev/null
+++ b/gabbi/tests/gabbits_intercept/cat.json
@@ -0,0 +1,4 @@
+{
+ "type": "cat",
+ "sound": "meow"
+}
diff --git a/gabbi/tests/gabbits_intercept/data.yaml b/gabbi/tests/gabbits_intercept/data.yaml
index 7bd1a7f..fbe832d 100644
--- a/gabbi/tests/gabbits_intercept/data.yaml
+++ b/gabbi/tests/gabbits_intercept/data.yaml
@@ -49,3 +49,26 @@ tests:
request_headers:
content-type: text/plain
data: <@utf8.txt
+
+ - name: json value from disk
+ POST: /
+ request_headers:
+ content-type: application/json
+ data: <@data.json
+ response_json_paths:
+ foo['bár']: 1
+ $: <@data.json
+
+ - name: partial json from disk
+ POST: /
+ request_headers:
+ content-type: application/json
+ data:
+ pets:
+ - type: cat
+ sound: meow
+ - type: dog
+ sound: woof
+ response_json_paths:
+ $.pets: <@pets.json
+ $.pets[0]: <@cat.json
diff --git a/gabbi/tests/gabbits_intercept/json-left-side.yaml b/gabbi/tests/gabbits_intercept/json-left-side.yaml
new file mode 100644
index 0000000..6ce342c
--- /dev/null
+++ b/gabbi/tests/gabbits_intercept/json-left-side.yaml
@@ -0,0 +1,37 @@
+defaults:
+ request_headers:
+ content-type: application/json
+ verbose: True
+
+tests:
+- name: left side json one
+ desc: for reuse on the next test
+ POST: /
+ data:
+ alpha: alpha1
+ beta: beta1
+
+- name: expand left side
+ POST: /
+ data:
+ alpha1: alpha
+ beta1: beta
+ response_json_paths:
+ $["$RESPONSE['$.alpha']"]: alpha
+
+- name: expand environ left side
+ POST: /
+ data:
+ alpha1: alpha
+ beta1: beta
+ 1: cow
+ response_json_paths:
+ $.['$ENVIRON['ONE']']: cow
+
+- name: set key and value
+ GET: /jsonator?key=$ENVIRON['ONE']&value=10
+
+- name: check key and value
+ GET: /jsonator?key=$ENVIRON['ONE']&value=10
+ response_json_paths:
+ $.["$ENVIRON['ONE']"]: $RESPONSE['$['1']']
diff --git a/gabbi/tests/gabbits_intercept/pets.json b/gabbi/tests/gabbits_intercept/pets.json
new file mode 100644
index 0000000..3c56db0
--- /dev/null
+++ b/gabbi/tests/gabbits_intercept/pets.json
@@ -0,0 +1,10 @@
+[
+ {
+ "type": "cat",
+ "sound": "meow"
+ },
+ {
+ "type": "dog",
+ "sound": "woof"
+ }
+]
diff --git a/gabbi/tests/simple_wsgi.py b/gabbi/tests/simple_wsgi.py
index fabb7e3..b7528ff 100644
--- a/gabbi/tests/simple_wsgi.py
+++ b/gabbi/tests/simple_wsgi.py
@@ -107,6 +107,11 @@ class SimpleWsgi(object):
# fall through if we've ended the loop
elif path_info == '/cookie':
headers.append(('Set-Cookie', 'session=1234; domain=.example.com'))
+ elif path_info == '/jsonator':
+ json_data = json.dumps({query_data['key'][0]:
+ query_data['value'][0]})
+ start_response('200 OK', [('Content-Type', 'application/json')])
+ return [json_data.encode('utf-8')]
start_response('200 OK', headers)
diff --git a/gabbi/tests/test_load_data_file.py b/gabbi/tests/test_load_data_file.py
index 99536d4..f3304a6 100644
--- a/gabbi/tests/test_load_data_file.py
+++ b/gabbi/tests/test_load_data_file.py
@@ -35,7 +35,7 @@ class DataFileTest(unittest.TestCase):
def _assert_content_read(self, filepath):
self.assertEqual(
- 'dummy content', self.http_case._load_data_file(filepath))
+ 'dummy content', self.http_case.load_data_file(filepath))
def test_load_file(self, m_open):
self.http_case.test_directory = '.'
@@ -52,7 +52,7 @@ class DataFileTest(unittest.TestCase):
filepath = '/top-level.private'
with self.assertRaises(ValueError):
- self.http_case._load_data_file(filepath)
+ self.http_case.load_data_file(filepath)
self.assertFalse(m_open.called)
def test_load_file_in_parent_dir(self, m_open):
@@ -60,7 +60,7 @@ class DataFileTest(unittest.TestCase):
filepath = '../file-in-parent-dir.txt'
with self.assertRaises(ValueError):
- self.http_case._load_data_file(filepath)
+ self.http_case.load_data_file(filepath)
self.assertFalse(m_open.called)
def test_load_file_within_test_directory(self, m_open):
@@ -73,5 +73,5 @@ class DataFileTest(unittest.TestCase):
self.http_case.test_directory = '/a/b/c'
filepath = '../../b/E/file-in-test-dir.txt'
with self.assertRaises(ValueError):
- self.http_case._load_data_file(filepath)
+ self.http_case.load_data_file(filepath)
self.assertFalse(m_open.called)
| substitutions not allowed in left hand side of json paths in response_json_paths
``` yaml
$.allocations.$ENVIRON['RP_UUID'].DISK_GB: 40
```
does not work, maybe it should?
| 0.0 | 87f389688a5aa7753e6f14a50869eb19fa12e592 | [
"gabbi/tests/test_load_data_file.py::DataFileTest::test_load_file",
"gabbi/tests/test_load_data_file.py::DataFileTest::test_load_file_in_directory",
"gabbi/tests/test_load_data_file.py::DataFileTest::test_load_file_in_parent_dir",
"gabbi/tests/test_load_data_file.py::DataFileTest::test_load_file_in_root",
"gabbi/tests/test_load_data_file.py::DataFileTest::test_load_file_not_within_test_directory",
"gabbi/tests/test_load_data_file.py::DataFileTest::test_load_file_within_test_directory"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2017-07-06 12:35:04+00:00 | apache-2.0 | 1,523 |
|
cdent__gabbi-292 | diff --git a/gabbi/utils.py b/gabbi/utils.py
index cc0285d..82fcb76 100644
--- a/gabbi/utils.py
+++ b/gabbi/utils.py
@@ -147,26 +147,16 @@ def parse_content_type(content_type, default_charset='utf-8'):
def host_info_from_target(target, prefix=None):
"""Turn url or host:port and target into test destination."""
force_ssl = False
+ # If we have a bare host prefix it with a scheme.
+ if '//' not in target and not target.startswith('http'):
+ target = 'http://' + target
+ if prefix:
+ target = target + prefix
split_url = urlparse.urlparse(target)
- if split_url.scheme:
- if split_url.scheme == 'https':
- force_ssl = True
- return split_url.hostname, split_url.port, split_url.path, force_ssl
- else:
- target = target
- prefix = prefix
-
- if ':' in target and '[' not in target:
- host, port = target.rsplit(':', 1)
- elif ']:' in target:
- host, port = target.rsplit(':', 1)
- else:
- host = target
- port = None
- host = host.replace('[', '').replace(']', '')
-
- return host, port, prefix, force_ssl
+ if split_url.scheme == 'https':
+ force_ssl = True
+ return split_url.hostname, split_url.port, split_url.path, force_ssl
def _colorize(color, message):
diff --git a/tox.ini b/tox.ini
index 67670a7..3fec99d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,7 +1,7 @@
[tox]
minversion = 3.1.1
skipsdist = True
-envlist = py35,py36,py37,py38,pypy3,pep8,limit,failskip,docs,py37-prefix,py37-limit,py37-verbosity,py37-failskip,py35-pytest,py36-pytest,py37-pytest
+envlist = py35,py36,py37,py38,py39,pypy3,pep8,limit,failskip,docs,py37-prefix,py37-limit,py37-verbosity,py37-failskip,py35-pytest,py36-pytest,py37-pytest
[testenv]
deps = -r{toxinidir}/requirements.txt
| cdent/gabbi | 3e804a7bd35e449c08ef9cb6d47d094691768499 | diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml
index a0c8abb..395eb0e 100644
--- a/.github/workflows/tests.yaml
+++ b/.github/workflows/tests.yaml
@@ -18,6 +18,8 @@ jobs:
toxenv: py37
- python: 3.8
toxenv: py38
+ - python: 3.9
+ toxenv: py39
- python: 3.5
toxenv: py35-pytest
- python: 3.6
diff --git a/gabbi/tests/test_utils.py b/gabbi/tests/test_utils.py
index 8f2c030..e623bcc 100644
--- a/gabbi/tests/test_utils.py
+++ b/gabbi/tests/test_utils.py
@@ -202,14 +202,14 @@ class UtilsHostInfoFromTarget(unittest.TestCase):
def _test_hostport(self, url_or_host, expected_host,
provided_prefix=None, expected_port=None,
- expected_prefix=None, expected_ssl=False):
+ expected_prefix='', expected_ssl=False):
host, port, prefix, ssl = utils.host_info_from_target(
url_or_host, provided_prefix)
# normalize hosts, they are case insensitive
self.assertEqual(expected_host.lower(), host.lower())
# port can be a string or int depending on the inputs
- self.assertEqual(expected_port, port)
+ self.assertEqual(str(expected_port), str(port))
self.assertEqual(expected_prefix, prefix)
self.assertEqual(expected_ssl, ssl)
| Python 3.7.6 urllib.parse changes break gabbi tests
Python 3.7.6 was released with a change to how urllib.parse will deal with "localhost:80".
3.7.5:
```python
>>> from urllib import parse
>>> parse.urlparse("foobar.com:999")
ParseResult(scheme='', netloc='', path='foobar.com:999', params='', query='', fragment='')
```
3.7.6
```python
>>> from urllib import parse
>>> parse.urlparse("foobar.com:999")
ParseResult(scheme='foobar.com', netloc='', path='999', params='', query='', fragment='')
```
See this bug: https://bugs.python.org/issue27657 and a related pull request: https://github.com/python/cpython/pull/16839
This breaks [gabbi.utils:host_info_from_target](https://github.com/cdent/gabbi/blob/8e02d4d2df99999e0eb8022f8fe99d7796f52151/gabbi/utils.py#L147-L169). It also apparently break a whole ton of other software out there in the world. This a) seems like the wrong fix, b) if it is indeed correct, is bad for a minor release.
Since gabbi only talks http, it can probably be fixed by branch on whether the target starts with `http` but some discussion is probably worth having before going down that route. | 0.0 | 3e804a7bd35e449c08ef9cb6d47d094691768499 | [
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ipv6_host_localhost",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ipv6_hostport_localhost",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_simple_hostport",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_simple_hostport_with_prefix"
]
| [
"gabbi/tests/test_utils.py::BinaryTypesTest::test_binary",
"gabbi/tests/test_utils.py::BinaryTypesTest::test_not_binary",
"gabbi/tests/test_utils.py::ParseContentTypeTest::test_parse_default",
"gabbi/tests/test_utils.py::ParseContentTypeTest::test_parse_error_default",
"gabbi/tests/test_utils.py::ParseContentTypeTest::test_parse_extra",
"gabbi/tests/test_utils.py::ParseContentTypeTest::test_parse_nocharset_default",
"gabbi/tests/test_utils.py::ParseContentTypeTest::test_parse_override_default",
"gabbi/tests/test_utils.py::ParseContentTypeTest::test_parse_simple",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_bad_params",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_default_both",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_default_charset",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_multiple_params",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_with_charset",
"gabbi/tests/test_utils.py::ColorizeTest::test_colorize_missing_color",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ipv6_already_bracket",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ipv6_full",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ipv6_ssl",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ipv6_ssl_weird_port",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_no_double_colon",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_not_ssl_on_443",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_port",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_port_and_ssl",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_prefix",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_preserve_query",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_simple",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ssl",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ssl_on_80",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ipv6_url_localhost",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ipv6_url_long",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_plain_url_no_port",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_plain_url_with_port",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ssl_port80_url",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ssl_port_url",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ssl_url"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2020-12-19 13:56:29+00:00 | apache-2.0 | 1,524 |
|
cdent__gabbi-302 | diff --git a/gabbi/case.py b/gabbi/case.py
index 6b7823c..c1c0dec 100644
--- a/gabbi/case.py
+++ b/gabbi/case.py
@@ -595,8 +595,12 @@ class HTTPTestCase(unittest.TestCase):
data = self.replace_template(data)
data = dumper_class.dumps(data, test=self)
else:
- raise ValueError(
- 'unable to process data to %s' % content_type)
+ if content_type:
+ raise ValueError(
+ 'unable to process data to %s' % content_type)
+ else:
+ raise ValueError(
+ 'no content-type available for processing data')
data = self.replace_template(data)
diff --git a/gabbi/handlers/jsonhandler.py b/gabbi/handlers/jsonhandler.py
index 6647f45..6177779 100644
--- a/gabbi/handlers/jsonhandler.py
+++ b/gabbi/handlers/jsonhandler.py
@@ -38,9 +38,14 @@ class JSONHandler(base.ContentHandler):
@staticmethod
def accepts(content_type):
- content_type = content_type.split(';', 1)[0].strip()
+ content_type = content_type.lower()
+ parameters = ''
+ if ';' in content_type:
+ content_type, parameters = content_type.split(';', 1)
+ content_type = content_type.strip()
return (content_type.endswith('+json') or
- content_type.startswith('application/json'))
+ content_type == 'application/json'
+ and 'stream=' not in parameters)
@classmethod
def replacer(cls, response_data, match):
| cdent/gabbi | a8d200bdfcaabb2351b2ccb8f9a2b4de693b2b81 | diff --git a/gabbi/tests/gabbits_intercept/self.yaml b/gabbi/tests/gabbits_intercept/self.yaml
index bdd0705..14def1c 100644
--- a/gabbi/tests/gabbits_intercept/self.yaml
+++ b/gabbi/tests/gabbits_intercept/self.yaml
@@ -149,22 +149,6 @@ tests:
response_json_paths:
$.data[0]: hello
-- name: json home content type is json
- url: /?data=hello
- method: GET
- request_headers:
- accept: application/json-home
- response_json_paths:
- $.data[0]: hello
-
-- name: json content type detection case insensitive
- url: /?data=hello
- method: GET
- request_headers:
- accept: ApPlIcAtIoN/JsOn-hOmE
- response_json_paths:
- $.data[0]: hello
-
- name: xml derived content type
desc: +xml types should not work for json paths
xfail: true
diff --git a/gabbi/tests/test_data_to_string.py b/gabbi/tests/test_data_to_string.py
new file mode 100644
index 0000000..d3150ba
--- /dev/null
+++ b/gabbi/tests/test_data_to_string.py
@@ -0,0 +1,54 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Test handling of data field in tests.
+"""
+
+import unittest
+
+from gabbi import case
+from gabbi import handlers
+
+
+class TestDataToString(unittest.TestCase):
+
+ def setUp(self):
+ self.case = case.HTTPTestCase('test_request')
+ self.case.content_handlers = []
+ for handler in handlers.RESPONSE_HANDLERS:
+ h = handler()
+ if hasattr(h, 'content_handler') and h.content_handler:
+ self.case.content_handlers.append(h)
+
+ def testHappyPath(self):
+ data = [{"hi": "low"}, {"yes": "no"}]
+ content_type = 'application/json'
+ body = self.case._test_data_to_string(data, content_type)
+ self.assertEqual('[{"hi": "low"}, {"yes": "no"}]', body)
+
+ def testNoContentType(self):
+ data = [{"hi": "low"}, {"yes": "no"}]
+ content_type = ''
+ with self.assertRaises(ValueError) as exc:
+ self.case._test_data_to_string(data, content_type)
+ self.assertEqual(
+ 'no content-type available for processing data',
+ str(exc.exception))
+
+ def testNoHandler(self):
+ data = [{"hi": "low"}, {"yes": "no"}]
+ content_type = 'application/xml'
+ with self.assertRaises(ValueError) as exc:
+ self.case._test_data_to_string(data, content_type)
+ self.assertEqual(
+ 'unable to process data to application/xml',
+ str(exc.exception))
diff --git a/gabbi/tests/test_handlers.py b/gabbi/tests/test_handlers.py
index b07e73f..17c753e 100644
--- a/gabbi/tests/test_handlers.py
+++ b/gabbi/tests/test_handlers.py
@@ -444,3 +444,40 @@ class HandlersTest(unittest.TestCase):
# method and then run its tests to confirm.
test = self.test('test_request')
handler(test)
+
+
+class TestJSONHandlerAccept(unittest.TestCase):
+ """Test that the json handler accepts function.
+
+ We need to confirm that it returns True and False at the right
+ times. This is somewhat tricky as there are a fair number of
+ MIME-types that include the string "JSON" but aren't, as a
+ whole document, capable of being decoded.
+ """
+
+ def _test_content_type(self, content_type, expected):
+ if expected:
+ self.assertTrue(
+ jsonhandler.JSONHandler.accepts(content_type),
+ "expected %s to be accepted but it was not!" % content_type)
+ else:
+ self.assertFalse(
+ jsonhandler.JSONHandler.accepts(content_type),
+ "expected %s to not be accepted but it was!" % content_type)
+
+ def test_many_content_types(self):
+ cases = [
+ ("application/json", True),
+ ("application/JSON", True),
+ ("text/plain", False),
+ ("application/jsonlines", False),
+ ("application/json;stream=true", False),
+ ("application/json;streamable=pony", True),
+ ("application/stream+json", True),
+ ("application/xml", False),
+ ("application/json-seq", False),
+ ("application/json-home", False),
+ ]
+ for test in cases:
+ with self.subTest(test[0]):
+ self._test_content_type(*test)
| Error when response content-type is application/jsonlines
If the response is `application/jsonlines` the JSON content handler kicks in and attempts to decode the response from json, which fails as it treats the whole doc as one single json object, not lines of json.
This is because the accept method for the handler is checking for a content type that starts with 'application/json'
So the simple fix is straightforward, but it might make better sense to create a jsonlines response handler and see if we can make a way to jsonpath on those lines. | 0.0 | a8d200bdfcaabb2351b2ccb8f9a2b4de693b2b81 | [
"gabbi/tests/test_data_to_string.py::TestDataToString::testNoContentType",
"gabbi/tests/test_handlers.py::TestJSONHandlerAccept::test_many_content_types"
]
| [
"gabbi/tests/test_data_to_string.py::TestDataToString::testHappyPath",
"gabbi/tests/test_data_to_string.py::TestDataToString::testNoHandler",
"gabbi/tests/test_handlers.py::HandlersTest::test_empty_response_handler",
"gabbi/tests/test_handlers.py::HandlersTest::test_resonse_headers_stringify",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_fail_data",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_fail_header",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_noregex_path_match",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_noregex_path_nomatch",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_regex",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_regex_path_match",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_regex_path_nomatch",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_substitute_esc_regex",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_substitute_noregex",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_substitute_regex",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_dict_type",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_fail_data",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_fail_path",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_from_disk_json_path",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_from_disk_json_path_fail",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_regex",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_regex_number",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_regex_path_match",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_regex_path_nomatch",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_substitution_esc_regex",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_substitution_noregex",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_substitution_regex",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_yamlhandler",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_string_list_type",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_strings",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_strings_fail",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_strings_fail_big_output",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_strings_fail_big_payload"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2021-09-02 13:47:57+00:00 | apache-2.0 | 1,525 |
|
cdent__gabbi-303 | diff --git a/gabbi/case.py b/gabbi/case.py
index 6b7823c..c1c0dec 100644
--- a/gabbi/case.py
+++ b/gabbi/case.py
@@ -595,8 +595,12 @@ class HTTPTestCase(unittest.TestCase):
data = self.replace_template(data)
data = dumper_class.dumps(data, test=self)
else:
- raise ValueError(
- 'unable to process data to %s' % content_type)
+ if content_type:
+ raise ValueError(
+ 'unable to process data to %s' % content_type)
+ else:
+ raise ValueError(
+ 'no content-type available for processing data')
data = self.replace_template(data)
| cdent/gabbi | a8d200bdfcaabb2351b2ccb8f9a2b4de693b2b81 | diff --git a/gabbi/tests/test_data_to_string.py b/gabbi/tests/test_data_to_string.py
new file mode 100644
index 0000000..d3150ba
--- /dev/null
+++ b/gabbi/tests/test_data_to_string.py
@@ -0,0 +1,54 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Test handling of data field in tests.
+"""
+
+import unittest
+
+from gabbi import case
+from gabbi import handlers
+
+
+class TestDataToString(unittest.TestCase):
+
+ def setUp(self):
+ self.case = case.HTTPTestCase('test_request')
+ self.case.content_handlers = []
+ for handler in handlers.RESPONSE_HANDLERS:
+ h = handler()
+ if hasattr(h, 'content_handler') and h.content_handler:
+ self.case.content_handlers.append(h)
+
+ def testHappyPath(self):
+ data = [{"hi": "low"}, {"yes": "no"}]
+ content_type = 'application/json'
+ body = self.case._test_data_to_string(data, content_type)
+ self.assertEqual('[{"hi": "low"}, {"yes": "no"}]', body)
+
+ def testNoContentType(self):
+ data = [{"hi": "low"}, {"yes": "no"}]
+ content_type = ''
+ with self.assertRaises(ValueError) as exc:
+ self.case._test_data_to_string(data, content_type)
+ self.assertEqual(
+ 'no content-type available for processing data',
+ str(exc.exception))
+
+ def testNoHandler(self):
+ data = [{"hi": "low"}, {"yes": "no"}]
+ content_type = 'application/xml'
+ with self.assertRaises(ValueError) as exc:
+ self.case._test_data_to_string(data, content_type)
+ self.assertEqual(
+ 'unable to process data to application/xml',
+ str(exc.exception))
| If content-type is not set, but data is, the error message is incomplete
If you try to write `data` without setting a content-type header, there's no content handler and the error message is not very useful:
```
File "/mnt/share/cdentsrc/nova/.tox/functional/local/lib/python2.7/site-packages/gabbi/suitemaker.py", line 95, in do_test
return test_method(*args, **kwargs)
File "/mnt/share/cdentsrc/nova/.tox/functional/local/lib/python2.7/site-packages/gabbi/case.py", line 94, in wrapper
func(self)
File "/mnt/share/cdentsrc/nova/.tox/functional/local/lib/python2.7/site-packages/gabbi/case.py", line 148, in test_request
self._run_test()
File "/mnt/share/cdentsrc/nova/.tox/functional/local/lib/python2.7/site-packages/gabbi/case.py", line 524, in _run_test
utils.extract_content_type(headers, default='')[0])
File "/mnt/share/cdentsrc/nova/.tox/functional/local/lib/python2.7/site-packages/gabbi/case.py", line 587, in _test_data_to_string
'unable to process data to %s' % content_type)
ValueError: unable to process data to
```
If content_type is None, we should provide a different message. | 0.0 | a8d200bdfcaabb2351b2ccb8f9a2b4de693b2b81 | [
"gabbi/tests/test_data_to_string.py::TestDataToString::testNoContentType"
]
| [
"gabbi/tests/test_data_to_string.py::TestDataToString::testHappyPath",
"gabbi/tests/test_data_to_string.py::TestDataToString::testNoHandler"
]
| {
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2021-09-02 17:18:57+00:00 | apache-2.0 | 1,526 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.