Dataset Viewer
Auto-converted to Parquet
instance_id
stringlengths
15
41
patch
stringlengths
349
14.1k
repo
stringlengths
10
37
base_commit
stringlengths
40
40
hints_text
stringclasses
1 value
test_patch
stringlengths
527
14.4k
problem_statement
stringlengths
69
7.21k
version
stringclasses
1 value
FAIL_TO_PASS
sequencelengths
1
69
PASS_TO_PASS
sequencelengths
0
1.07k
created_at
stringlengths
25
25
__index_level_0__
int64
4.23k
6.4k
nolanbconaway__binoculars-4
diff --git a/lib/binoculars/__init__.py b/lib/binoculars/__init__.py index ab04d17..7a4885d 100644 --- a/lib/binoculars/__init__.py +++ b/lib/binoculars/__init__.py @@ -7,7 +7,7 @@ from scipy import stats def binomial_jeffreys_interval(p: float, n: int, tail: str, z: float = 1.96): """Use compute a jeffrey's interval via beta distirbution CDF.""" - alpha = stats.norm.sf(z) + alpha = stats.norm.sf(z) * 2 a = n * p + 0.5 b = n - n * p + 0.5 if tail == "lower": @@ -41,6 +41,18 @@ def binomial_normal_interval(p: float, n: int, tail: str, z: float = 1.96): raise ValueError("Invalid tail! Choose from: lower, upper") +def binomial_clopper_pearson_interval(p: float, n: int, tail: str, z: float = 1.96): + """Return the clopper-pearson interval for a proportion.""" + alpha = stats.norm.sf(z) * 2 + k = p * n + if tail == "lower": + return stats.beta.ppf(alpha / 2, k, n - k + 1) + elif tail == "upper": + return stats.beta.ppf(1 - alpha / 2, k + 1, n - k) + else: + raise ValueError("Invalid tail! Choose from: lower, upper") + + def binomial_confidence( p: float, n: int, tail: str = None, z: float = 1.96, method="jeffrey" ) -> Union[float, Tuple[float]]: @@ -53,13 +65,14 @@ def binomial_confidence( n : int The n parameter of the binomial for the distributionon, tail : str - Tail of the CI to return, either lower or upper. If not provided, this function returns - a tuple of (lower, upper). if provided, it returns a float value. + Tail of the CI to return, either lower or upper. If not provided, this + function returns a tuple of (lower, upper). if provided, it returns a float + value. z : float Optional Z critical value. Default 1.96 for 95%. method : str - Optional approximation method. By default this uses Jeffrey's interval. Options: - jeffrey, wilson, normal. + Optional approximation method. By default this uses Jeffrey's interval. + Options: jeffrey, wilson, normal, clopper-pearson. Returns A tuple of (lower, upper) confidence interval values, or a single value. @@ -69,10 +82,13 @@ def binomial_confidence( "jeffrey": binomial_jeffreys_interval, "wilson": binomial_wilson_interval, "normal": binomial_normal_interval, + "clopper-pearson": binomial_clopper_pearson_interval, }[method] except KeyError: - raise ValueError("Invalid method! Choose from: jeffrey, wilson, normal") + raise ValueError( + "Invalid method! Choose from: jeffrey, wilson, normal, clopper-pearson" + ) if tail is not None: return func(p=p, n=n, z=z, tail=tail) diff --git a/readme.md b/readme.md index 2f37517..76827e6 100644 --- a/readme.md +++ b/readme.md @@ -14,6 +14,7 @@ Presently, the package implements: - [The Normal Approximation](https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval#Normal_approximation_interval) - [The Wilson Interval](https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval#Wilson_score_interval) (no continuity correction) - [Jeffrey's interval](https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval#Jeffreys_interval) (via scipy.stats.beta) +- [Clopper-Pearson interval](https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval#Clopper%E2%80%93Pearson_interval) (also via scipy.stats.beta) If you haven't spent a lot of time thinking about which interval _you_ should use (and why would you want to?), I suggest using the Wilson interval or Jeffrey's interval. Jeffrey's interval is returned by default by the `binomial_confidence` function in this package. diff --git a/setup.py b/setup.py index 070b04b..359819e 100644 --- a/setup.py +++ b/setup.py @@ -15,7 +15,7 @@ LONG_DESCRIPTION = (THIS_DIRECTORY / "readme.md").read_text() setup( name="binoculars", - version="0.1.2", + version="0.1.3", description="Various calculations for binomial confidence intervals.", long_description=LONG_DESCRIPTION, long_description_content_type="text/markdown", @@ -35,5 +35,4 @@ setup( extras_require=dict( test=["black==20.8b1", "pytest==6.2.1", "pytest-cov==2.10.1", "codecov==2.1.11"] ), - package_data={"shabadoo": ["version"]}, )
nolanbconaway/binoculars
fa6efdd5b2b671668a2be50ea647fc06c156dcaa
diff --git a/.github/workflows/test_on_push.yml b/.github/workflows/test_on_push.yml index e3e403f..3569dce 100644 --- a/.github/workflows/test_on_push.yml +++ b/.github/workflows/test_on_push.yml @@ -7,7 +7,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.6, 3.7, 3.8] + python-version: [3.6, 3.7, 3.8, 3.9] steps: - uses: actions/checkout@master diff --git a/test_binoculars.py b/test_binoculars.py index 3300fac..efe4166 100644 --- a/test_binoculars.py +++ b/test_binoculars.py @@ -3,8 +3,10 @@ import pytest import binoculars +METHODS = ["jeffrey", "wilson", "normal", "clopper-pearson"] [email protected]("method", ["jeffrey", "wilson", "normal"]) + [email protected]("method", METHODS) @pytest.mark.parametrize("n", [5, 1e2, 1e3, 1e5, 1e8]) @pytest.mark.parametrize("p", [0.01, 0.5, 0.99]) def test_lower_less_upper(method, n, p): @@ -21,7 +23,7 @@ def test_lower_less_upper(method, n, p): assert u == binoculars.binomial_confidence(p, n, method=method, tail="upper") [email protected]("method", ["jeffrey", "wilson", "normal"]) [email protected]("method", METHODS) @pytest.mark.parametrize("lower_n, greater_n", [(2, 3), (10, 20), (100, 200)]) def test_more_certain_with_n(method, lower_n, greater_n): """Test that certainty diminishes with greater N.""" @@ -32,7 +34,7 @@ def test_more_certain_with_n(method, lower_n, greater_n): assert lower_u > greater_u [email protected]("method", ["jeffrey", "wilson", "normal"]) [email protected]("method", METHODS) @pytest.mark.parametrize("lower_z, greater_z", [(1, 1.01), (1.96, 2.58)]) def test_z_certainty(method, lower_z, greater_z): """Test that the interval tightens with lower Z""" @@ -45,7 +47,7 @@ def test_z_certainty(method, lower_z, greater_z): assert lower_u < greater_u [email protected]("method", ["jeffrey", "wilson", "normal"]) [email protected]("method", METHODS) def test_invalid_tail_error(method): with pytest.raises(ValueError): binoculars.binomial_confidence(0.1, 10, tail="NOPE", method=method)
Cloper Pearson? For completeness and elegance it would be interesting to also have this method: https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval#Clopper%E2%80%93Pearson_interval which, if I understood correctly is the most conservative. What do you think?
0.0
[ "test_binoculars.py::test_lower_less_upper[0.01-5-clopper-pearson]", "test_binoculars.py::test_lower_less_upper[0.01-100.0-clopper-pearson]", "test_binoculars.py::test_lower_less_upper[0.01-1000.0-clopper-pearson]", "test_binoculars.py::test_lower_less_upper[0.01-100000.0-clopper-pearson]", "test_binoculars.py::test_lower_less_upper[0.01-100000000.0-clopper-pearson]", "test_binoculars.py::test_lower_less_upper[0.5-5-clopper-pearson]", "test_binoculars.py::test_lower_less_upper[0.5-100.0-clopper-pearson]", "test_binoculars.py::test_lower_less_upper[0.5-1000.0-clopper-pearson]", "test_binoculars.py::test_lower_less_upper[0.5-100000.0-clopper-pearson]", "test_binoculars.py::test_lower_less_upper[0.5-100000000.0-clopper-pearson]", "test_binoculars.py::test_lower_less_upper[0.99-5-clopper-pearson]", "test_binoculars.py::test_lower_less_upper[0.99-100.0-clopper-pearson]", "test_binoculars.py::test_lower_less_upper[0.99-1000.0-clopper-pearson]", "test_binoculars.py::test_lower_less_upper[0.99-100000.0-clopper-pearson]", "test_binoculars.py::test_lower_less_upper[0.99-100000000.0-clopper-pearson]", "test_binoculars.py::test_more_certain_with_n[2-3-clopper-pearson]", "test_binoculars.py::test_more_certain_with_n[10-20-clopper-pearson]", "test_binoculars.py::test_more_certain_with_n[100-200-clopper-pearson]", "test_binoculars.py::test_z_certainty[1-1.01-clopper-pearson]", "test_binoculars.py::test_z_certainty[1.96-2.58-clopper-pearson]" ]
[ "test_binoculars.py::test_lower_less_upper[0.01-5-jeffrey]", "test_binoculars.py::test_lower_less_upper[0.01-5-wilson]", "test_binoculars.py::test_lower_less_upper[0.01-5-normal]", "test_binoculars.py::test_lower_less_upper[0.01-100.0-jeffrey]", "test_binoculars.py::test_lower_less_upper[0.01-100.0-wilson]", "test_binoculars.py::test_lower_less_upper[0.01-100.0-normal]", "test_binoculars.py::test_lower_less_upper[0.01-1000.0-jeffrey]", "test_binoculars.py::test_lower_less_upper[0.01-1000.0-wilson]", "test_binoculars.py::test_lower_less_upper[0.01-1000.0-normal]", "test_binoculars.py::test_lower_less_upper[0.01-100000.0-jeffrey]", "test_binoculars.py::test_lower_less_upper[0.01-100000.0-wilson]", "test_binoculars.py::test_lower_less_upper[0.01-100000.0-normal]", "test_binoculars.py::test_lower_less_upper[0.01-100000000.0-jeffrey]", "test_binoculars.py::test_lower_less_upper[0.01-100000000.0-wilson]", "test_binoculars.py::test_lower_less_upper[0.01-100000000.0-normal]", "test_binoculars.py::test_lower_less_upper[0.5-5-jeffrey]", "test_binoculars.py::test_lower_less_upper[0.5-5-wilson]", "test_binoculars.py::test_lower_less_upper[0.5-5-normal]", "test_binoculars.py::test_lower_less_upper[0.5-100.0-jeffrey]", "test_binoculars.py::test_lower_less_upper[0.5-100.0-wilson]", "test_binoculars.py::test_lower_less_upper[0.5-100.0-normal]", "test_binoculars.py::test_lower_less_upper[0.5-1000.0-jeffrey]", "test_binoculars.py::test_lower_less_upper[0.5-1000.0-wilson]", "test_binoculars.py::test_lower_less_upper[0.5-1000.0-normal]", "test_binoculars.py::test_lower_less_upper[0.5-100000.0-jeffrey]", "test_binoculars.py::test_lower_less_upper[0.5-100000.0-wilson]", "test_binoculars.py::test_lower_less_upper[0.5-100000.0-normal]", "test_binoculars.py::test_lower_less_upper[0.5-100000000.0-jeffrey]", "test_binoculars.py::test_lower_less_upper[0.5-100000000.0-wilson]", "test_binoculars.py::test_lower_less_upper[0.5-100000000.0-normal]", "test_binoculars.py::test_lower_less_upper[0.99-5-jeffrey]", "test_binoculars.py::test_lower_less_upper[0.99-5-wilson]", "test_binoculars.py::test_lower_less_upper[0.99-5-normal]", "test_binoculars.py::test_lower_less_upper[0.99-100.0-jeffrey]", "test_binoculars.py::test_lower_less_upper[0.99-100.0-wilson]", "test_binoculars.py::test_lower_less_upper[0.99-100.0-normal]", "test_binoculars.py::test_lower_less_upper[0.99-1000.0-jeffrey]", "test_binoculars.py::test_lower_less_upper[0.99-1000.0-wilson]", "test_binoculars.py::test_lower_less_upper[0.99-1000.0-normal]", "test_binoculars.py::test_lower_less_upper[0.99-100000.0-jeffrey]", "test_binoculars.py::test_lower_less_upper[0.99-100000.0-wilson]", "test_binoculars.py::test_lower_less_upper[0.99-100000.0-normal]", "test_binoculars.py::test_lower_less_upper[0.99-100000000.0-jeffrey]", "test_binoculars.py::test_lower_less_upper[0.99-100000000.0-wilson]", "test_binoculars.py::test_lower_less_upper[0.99-100000000.0-normal]", "test_binoculars.py::test_more_certain_with_n[2-3-jeffrey]", "test_binoculars.py::test_more_certain_with_n[2-3-wilson]", "test_binoculars.py::test_more_certain_with_n[2-3-normal]", "test_binoculars.py::test_more_certain_with_n[10-20-jeffrey]", "test_binoculars.py::test_more_certain_with_n[10-20-wilson]", "test_binoculars.py::test_more_certain_with_n[10-20-normal]", "test_binoculars.py::test_more_certain_with_n[100-200-jeffrey]", "test_binoculars.py::test_more_certain_with_n[100-200-wilson]", "test_binoculars.py::test_more_certain_with_n[100-200-normal]", "test_binoculars.py::test_z_certainty[1-1.01-jeffrey]", "test_binoculars.py::test_z_certainty[1-1.01-wilson]", "test_binoculars.py::test_z_certainty[1-1.01-normal]", "test_binoculars.py::test_z_certainty[1.96-2.58-jeffrey]", "test_binoculars.py::test_z_certainty[1.96-2.58-wilson]", "test_binoculars.py::test_z_certainty[1.96-2.58-normal]", "test_binoculars.py::test_invalid_tail_error[jeffrey]", "test_binoculars.py::test_invalid_tail_error[wilson]", "test_binoculars.py::test_invalid_tail_error[normal]", "test_binoculars.py::test_invalid_tail_error[clopper-pearson]", "test_binoculars.py::test_invalid_method_error" ]
2021-07-03 14:11:51+00:00
4,225
online-judge-tools__template-generator-56
diff --git a/onlinejudge_template/generator/python.py b/onlinejudge_template/generator/python.py index 88075ef..5fd6303 100644 --- a/onlinejudge_template/generator/python.py +++ b/onlinejudge_template/generator/python.py @@ -114,11 +114,11 @@ def _generate_input_dfs(node: FormatNode, *, declared: Set[VarName], initialized elif type_ == VarType.Float: return OtherNode(line=f"""{var} = 100.0 * random.random() # TODO: edit here""") elif type_ == VarType.String: - return OtherNode(line=f"""{var} = ''.join([random.choice('abcde') for range(random.randint(1, 100))]) # TODO: edit here""") + return OtherNode(line=f"""{var} = ''.join([random.choice('abcde') for _ in range(random.randint(1, 100))]) # TODO: edit here""") elif type_ == VarType.Char: return OtherNode(line=f"""{var} = random.choice('abcde') # TODO: edit here""") else: - return OtherNode(line=f"""{var} = None # TODO: edit here""") + return OtherNode(line=f"""{var} = random.randint(1, 10) # TODO: edit here""") elif isinstance(node, NewlineNode): return SentencesNode(sentences=[]) elif isinstance(node, SequenceNode): diff --git a/setup.cfg b/setup.cfg index ac26f88..ca5af62 100644 --- a/setup.cfg +++ b/setup.cfg @@ -14,9 +14,9 @@ classifiers = [options.extras_require] dev = - isort == 5.4.1 + isort == 5.5.2 mypy == 0.782 - pylint == 2.5.3 + pylint == 2.6.0 yapf == 0.30.0 doc = sphinx >= 2.4
online-judge-tools/template-generator
f0230727fb9e67eb20d3f5c537d1747bb9fb90b3
diff --git a/tests/command_template.py b/tests/command_template.py index 28de86f..facea02 100644 --- a/tests/command_template.py +++ b/tests/command_template.py @@ -13,6 +13,7 @@ from onlinejudge_template.main import main class TestOJTemplateCommand(unittest.TestCase): """TestOJTemplateCommand is a class for end-to-end tests about oj-template command. + The tests actually compile and execute the generated code and check them get AC on sample cases. """ def _helper(self, *, url: str, template: str, placeholder: str, code: str, compile: Callable[[pathlib.Path], List[str]], command: Callable[[pathlib.Path], str]): with tempfile.TemporaryDirectory() as tmpdir_: @@ -43,8 +44,8 @@ class TestOJTemplateCommand(unittest.TestCase): y = str(b) * a return int(min(x, y)) """), ' ') - compile = lambda tmpdir: [sys.executable, '--version'] - command = lambda tmpdir: ' '.join([sys.executable, str(tmpdir / 'main.py')]) + compile = lambda tmpdir: [sys.executable, '--version'] # nop + command = lambda tmpdir: ' '.join([sys.executable, str(tmpdir / template)]) self._helper(url=url, template=template, placeholder=placeholder, code=code, compile=compile, command=command) def test_main_cpp_aplusb(self) -> None: @@ -54,6 +55,60 @@ class TestOJTemplateCommand(unittest.TestCase): code = textwrap.indent(textwrap.dedent("""\ return A + B; """), ' ') - compile = lambda tmpdir: ['g++', '-std=c++14', str(tmpdir / 'main.cpp'), '-o', str(tmpdir / 'a.out')] + compile = lambda tmpdir: ['g++', '-std=c++14', str(tmpdir / template), '-o', str(tmpdir / 'a.out')] command = lambda tmpdir: str(tmpdir / 'a.out') self._helper(url=url, template=template, placeholder=placeholder, code=code, compile=compile, command=command) + + +class TestOJTemplateCommandGenerator(unittest.TestCase): + """TestOJTemplateCommandGenerator is a class for end-to-end tests about oj-template command. + The tests actually executes the generator and check the result with a validator. + """ + def _helper(self, *, url: str, template: str, compile: Callable[[pathlib.Path], List[str]], command: Callable[[pathlib.Path], List[str]]): + with tempfile.TemporaryDirectory() as tmpdir_: + tmpdir = pathlib.Path(tmpdir_) + source_file = tmpdir / template + + # generate + with open(source_file, 'w') as fh: + with contextlib.redirect_stdout(fh): + main(['-t', template, url]) + + # test + subprocess.check_call(compile(tmpdir), stdout=sys.stdout, stderr=sys.stderr) + return subprocess.check_output(command(tmpdir), stderr=sys.stderr) + + def test_generate_py_arc088_b(self) -> None: + # arc088_b has a format with a binary string variable. + url = 'https://atcoder.jp/contests/arc088/tasks/arc088_b' + template = 'generate.py' + compile = lambda tmpdir: [sys.executable, '--version'] # nop + command = lambda tmpdir: [sys.executable, str(tmpdir / template)] + + def validate(case: bytes) -> None: + lines = case.splitlines() + self.assertEqual(len(lines), 1) + s, = lines[0].split() + self.assertTrue(s.isalpha()) + + validate(self._helper(url=url, template=template, compile=compile, command=command)) + + def test_generate_py_arc089_b(self) -> None: + # arc089_b has a non-trivial format with char variables. + url = 'https://atcoder.jp/contests/arc089/tasks/arc089_b' + template = 'generate.py' + compile = lambda tmpdir: [sys.executable, '--version'] # nop + command = lambda tmpdir: [sys.executable, str(tmpdir / template)] + + def validate(case: bytes) -> None: + lines = case.splitlines() + n, k = map(int, lines[0].split()) + self.assertEqual(len(lines) - 1, n) + for line in lines[1:]: + x, y, c = line.split() + int(x) + int(y) + self.assertTrue(c.isalpha()) + self.assertEqual(len(c), 1) + + validate(self._helper(url=url, template=template, compile=compile, command=command))
The default template `generate.py` has syntax error when string variables exsit in input ```shell oj-template -t generate.py https://atcoder.jp/contests/abc166/tasks/abc166_a ``` の生成コードが syntax error となっているようです。文字列が入力に与えられる他の問題でも同様でした。僕が python に不慣れなため、具体的な間違いの内容は分かりませんでした。。
0.0
[ "tests/command_template.py::TestOJTemplateCommandGenerator::test_generate_py_arc088_b" ]
[ "tests/command_template.py::TestOJTemplateCommand::test_main_py_abc152_b", "tests/command_template.py::TestOJTemplateCommandGenerator::test_generate_py_arc089_b" ]
2020-09-19 04:45:29+00:00
4,397
openshift__openshift-ansible-4543
diff --git a/filter_plugins/openshift_version.py b/filter_plugins/openshift_version.py new file mode 100644 index 000000000..1403e9dcc --- /dev/null +++ b/filter_plugins/openshift_version.py @@ -0,0 +1,129 @@ +#!/usr/bin/python + +# -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 +""" +Custom version comparison filters for use in openshift-ansible +""" + +# pylint can't locate distutils.version within virtualenv +# https://github.com/PyCQA/pylint/issues/73 +# pylint: disable=no-name-in-module, import-error +from distutils.version import LooseVersion + + +def legacy_gte_function_builder(name, versions): + """ + Build and return a version comparison function. + + Ex: name = 'oo_version_gte_3_1_or_1_1' + versions = {'enterprise': '3.1', 'origin': '1.1'} + + returns oo_version_gte_3_1_or_1_1, a function which based on the + version and deployment type will return true if the provided + version is greater than or equal to the function's version + """ + enterprise_version = versions['enterprise'] + origin_version = versions['origin'] + + def _gte_function(version, deployment_type): + """ + Dynamic function created by gte_function_builder. + + Ex: version = '3.1' + deployment_type = 'openshift-enterprise' + returns True/False + """ + version_gte = False + if 'enterprise' in deployment_type: + if str(version) >= LooseVersion(enterprise_version): + version_gte = True + elif 'origin' in deployment_type: + if str(version) >= LooseVersion(origin_version): + version_gte = True + return version_gte + _gte_function.__name__ = name + return _gte_function + + +def gte_function_builder(name, gte_version): + """ + Build and return a version comparison function. + + Ex: name = 'oo_version_gte_3_6' + version = '3.6' + + returns oo_version_gte_3_6, a function which based on the + version will return true if the provided version is greater + than or equal to the function's version + """ + def _gte_function(version): + """ + Dynamic function created by gte_function_builder. + + Ex: version = '3.1' + returns True/False + """ + version_gte = False + if str(version) >= LooseVersion(gte_version): + version_gte = True + return version_gte + _gte_function.__name__ = name + return _gte_function + + +# pylint: disable=too-few-public-methods +class FilterModule(object): + """ + Filters for version checking. + """ + # Each element of versions is composed of (major, minor_start, minor_end) + # Origin began versioning 3.x with 3.6, so begin 3.x with 3.6. + versions = [(3, 6, 10)] + + def __init__(self): + """ + Creates a new FilterModule for ose version checking. + """ + self._filters = {} + + # For each set of (major, minor, minor_iterations) + for major, minor_start, minor_end in self.versions: + # For each minor version in the range + for minor in range(minor_start, minor_end): + # Create the function name + func_name = 'oo_version_gte_{}_{}'.format(major, minor) + # Create the function with the builder + func = gte_function_builder(func_name, "{}.{}.0".format(major, minor)) + # Add the function to the mapping + self._filters[func_name] = func + + # Create filters with special versioning requirements. + # Treat all Origin 1.x as special case. + legacy_filters = [{'name': 'oo_version_gte_3_1_or_1_1', + 'versions': {'enterprise': '3.0.2.905', + 'origin': '1.1.0'}}, + {'name': 'oo_version_gte_3_1_1_or_1_1_1', + 'versions': {'enterprise': '3.1.1', + 'origin': '1.1.1'}}, + {'name': 'oo_version_gte_3_2_or_1_2', + 'versions': {'enterprise': '3.1.1.901', + 'origin': '1.2.0'}}, + {'name': 'oo_version_gte_3_3_or_1_3', + 'versions': {'enterprise': '3.3.0', + 'origin': '1.3.0'}}, + {'name': 'oo_version_gte_3_4_or_1_4', + 'versions': {'enterprise': '3.4.0', + 'origin': '1.4.0'}}, + {'name': 'oo_version_gte_3_5_or_1_5', + 'versions': {'enterprise': '3.5.0', + 'origin': '1.5.0'}}] + for legacy_filter in legacy_filters: + self._filters[legacy_filter['name']] = legacy_gte_function_builder(legacy_filter['name'], + legacy_filter['versions']) + + def filters(self): + """ + Return the filters mapping. + """ + return self._filters
openshift/openshift-ansible
a23bf82b8f58b8e4d0ee57b16415b0a380d64d19
diff --git a/test/openshift_version_tests.py b/test/openshift_version_tests.py new file mode 100644 index 000000000..52e9a9888 --- /dev/null +++ b/test/openshift_version_tests.py @@ -0,0 +1,72 @@ +""" Tests for the openshift_version Ansible filter module. """ +# pylint: disable=missing-docstring,invalid-name + +import os +import sys +import unittest + +sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../filter_plugins/")] + sys.path + +# pylint: disable=import-error +import openshift_version # noqa: E402 + + +class OpenShiftVersionTests(unittest.TestCase): + + openshift_version_filters = openshift_version.FilterModule() + + # Static tests for legacy filters. + legacy_gte_tests = [{'name': 'oo_version_gte_3_1_or_1_1', + 'positive_enterprise_version': '3.2.0', + 'negative_enterprise_version': '3.0.0', + 'positive_origin_version': '1.2.0', + 'negative_origin_version': '1.0.0'}, + {'name': 'oo_version_gte_3_1_1_or_1_1_1', + 'positive_enterprise_version': '3.2.0', + 'negative_enterprise_version': '3.1.0', + 'positive_origin_version': '1.2.0', + 'negative_origin_version': '1.1.0'}, + {'name': 'oo_version_gte_3_2_or_1_2', + 'positive_enterprise_version': '3.3.0', + 'negative_enterprise_version': '3.1.0', + 'positive_origin_version': '1.3.0', + 'negative_origin_version': '1.1.0'}, + {'name': 'oo_version_gte_3_3_or_1_3', + 'positive_enterprise_version': '3.4.0', + 'negative_enterprise_version': '3.2.0', + 'positive_origin_version': '1.4.0', + 'negative_origin_version': '1.2.0'}, + {'name': 'oo_version_gte_3_4_or_1_4', + 'positive_enterprise_version': '3.5.0', + 'negative_enterprise_version': '3.3.0', + 'positive_origin_version': '1.5.0', + 'negative_origin_version': '1.3.0'}, + {'name': 'oo_version_gte_3_5_or_1_5', + 'positive_enterprise_version': '3.6.0', + 'negative_enterprise_version': '3.4.0', + 'positive_origin_version': '1.6.0', + 'negative_origin_version': '1.4.0'}] + + def test_legacy_gte_filters(self): + for test in self.legacy_gte_tests: + for deployment_type in ['enterprise', 'origin']: + # Test negative case per deployment_type + self.assertFalse( + self.openshift_version_filters._filters[test['name']]( + test["negative_{}_version".format(deployment_type)], deployment_type)) + # Test positive case per deployment_type + self.assertTrue( + self.openshift_version_filters._filters[test['name']]( + test["positive_{}_version".format(deployment_type)], deployment_type)) + + def test_gte_filters(self): + for major, minor_start, minor_end in self.openshift_version_filters.versions: + for minor in range(minor_start, minor_end): + # Test positive case + self.assertTrue( + self.openshift_version_filters._filters["oo_version_gte_{}_{}".format(major, minor)]( + "{}.{}".format(major, minor + 1))) + # Test negative case + self.assertFalse( + self.openshift_version_filters._filters["oo_version_gte_{}_{}".format(major, minor)]( + "{}.{}".format(major, minor)))
"template error while templating string: no filter named 'oo_version_gte_3_5_or_1_5'" when installing using release-1.5 When attempting to install using the `release-1.5` branch, I'm getting an error: ~~~ TASK [openshift_ca : Generate the loopback master client config] ********************************************************************************************** fatal: [openshift-master-2]: FAILED! => {"failed": true, "msg": "template error while templating string: no filter named 'oo_version_gte_3_5_or_1_5'. String: {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config\n {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}\n --certificate-authority {{ named_ca_certificate }}\n {% endfor %}\n --certificate-authority={{ openshift_ca_cert }}\n --client-dir={{ openshift_ca_config_dir }}\n --groups=system:masters,system:openshift-master\n --master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}\n --public-master={{ hostvars[openshift_ca_h ost].openshift.master.loopback_api_url }}\n --signer-cert={{ openshift_ca_cert }}\n --signer-key={{ openshift_ca_key }}\n --signer-serial={{ openshift_ca_serial }}\n --user=system:openshift-master\n --basename=openshift-master\n {% if open shift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}\n --expire-days={{ openshift_master_cert_expire_days }}\n {% endif %}"} ~~~ This seems to be because #4467 backported a change that used code from [filter_plugins/openshift_version.py](https://github.com/openshift/openshift-ansible/blob/402e8caa5bcbc22050bb4a4f189a802262ca725f/filter_plugins/openshift_version.py), which isn't in `release-1.5`.
0.0
[ "test/openshift_version_tests.py::OpenShiftVersionTests::test_gte_filters", "test/openshift_version_tests.py::OpenShiftVersionTests::test_legacy_gte_filters" ]
[]
2017-06-22 17:01:50+00:00
4,419
passaH2O__dorado-25
diff --git a/docs/source/userguide/index.rst b/docs/source/userguide/index.rst index 7427ad5..8efb28d 100644 --- a/docs/source/userguide/index.rst +++ b/docs/source/userguide/index.rst @@ -53,14 +53,6 @@ Defining the `Particles` Defining a :obj:`dorado.particle_track.Particles` class is a key step in using `dorado` to perform particle routing. To define a set of particles, the model parameters must first be defined as described above. The `Particles` class is initialized using an instance of the model parameters. From there, particles can be generated and routed. -.. Note:: - When :obj:`dorado.particle_track.Particles` is initialized, all of the - routing weights are automatically calculated. This may take some time for - larger model domains, but allows for faster particle routing when particles - are actually moved through the domain. There is a progress bar associated - with this process so you don't feel like Python has gotten stuck in the - object initialization. - Particle Generation and Routing ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/dorado/__init__.py b/dorado/__init__.py index 2c2564f..5f2a210 100644 --- a/dorado/__init__.py +++ b/dorado/__init__.py @@ -1,4 +1,4 @@ -__version__ = "2.4.0" +__version__ = "2.4.1" from . import lagrangian_walker diff --git a/dorado/lagrangian_walker.py b/dorado/lagrangian_walker.py index 225ca49..63f3901 100644 --- a/dorado/lagrangian_walker.py +++ b/dorado/lagrangian_walker.py @@ -9,7 +9,7 @@ from builtins import range, map from math import cos import numpy as np from numpy.random import random -from tqdm import tqdm +from numpy import maximum, nansum def random_pick_seed(choices, probs=None): @@ -39,76 +39,63 @@ def random_pick_seed(choices, probs=None): return choices[idx] -def make_weight(Particles): - """Make an array with the routing weights.""" - # local namespace function imports - from numpy import maximum - from numpy import nansum - # init the weight array - L, W = np.shape(Particles.stage) - Particles.weight = np.zeros((L, W, 9)) - # do weighting calculation for each cell - print('Calculating routing weights ...') - for i in tqdm(list(range(1, L-1)), ascii=True): - for j in list(range(1, W-1)): - # weights for each location in domain - # get stage values for neighboring cells - stage_ind = Particles.stage[i-1:i+2, j-1:j+2] +def make_weight(Particles, ind): + """Update weighting array with weights at this index""" + # get stage values for neighboring cells + stage_ind = Particles.stage[ind[0]-1:ind[0]+2, ind[1]-1:ind[1]+2] - # calculate surface slope weights - weight_sfc = maximum(0, - (Particles.stage[i, j]-stage_ind) / - Particles.distances) + # calculate surface slope weights + weight_sfc = maximum(0, + (Particles.stage[ind] - stage_ind) / + Particles.distances) - # calculate inertial component weights - weight_int = maximum(0, ((Particles.qx[i, j] * Particles.jvec + - Particles.qy[i, j] * Particles.ivec) / - Particles.distances)) + # calculate inertial component weights + weight_int = maximum(0, ((Particles.qx[ind] * Particles.jvec + + Particles.qy[ind] * Particles.ivec) / + Particles.distances)) - # get depth and cell types for neighboring cells - depth_ind = Particles.depth[i-1:i+2, j-1:j+2] - ct_ind = Particles.cell_type[i-1:i+2, j-1:j+2] + # get depth and cell types for neighboring cells + depth_ind = Particles.depth[ind[0]-1:ind[0]+2, ind[1]-1:ind[1]+2] + ct_ind = Particles.cell_type[ind[0]-1:ind[0]+2, ind[1]-1:ind[1]+2] - # set weights for cells that are too shallow, or invalid 0 - weight_sfc[(depth_ind <= Particles.dry_depth) | (ct_ind == 2)] = 0 - weight_int[(depth_ind <= Particles.dry_depth) | (ct_ind == 2)] = 0 + # set weights for cells that are too shallow, or invalid 0 + weight_sfc[(depth_ind <= Particles.dry_depth) | (ct_ind == 2)] = 0 + weight_int[(depth_ind <= Particles.dry_depth) | (ct_ind == 2)] = 0 - # if sum of weights is above 0 normalize by sum of weights - if nansum(weight_sfc) > 0: - weight_sfc = weight_sfc / nansum(weight_sfc) + # if sum of weights is above 0 normalize by sum of weights + if nansum(weight_sfc) > 0: + weight_sfc = weight_sfc / nansum(weight_sfc) - # if sum of weight is above 0 normalize by sum of weights - if nansum(weight_int) > 0: - weight_int = weight_int / nansum(weight_int) + # if sum of weight is above 0 normalize by sum of weights + if nansum(weight_int) > 0: + weight_int = weight_int / nansum(weight_int) - # define actual weight by using gamma, and weight components - weight = Particles.gamma * weight_sfc + \ - (1 - Particles.gamma) * weight_int + # define actual weight by using gamma, and weight components + weight = Particles.gamma * weight_sfc + \ + (1 - Particles.gamma) * weight_int - # modify the weight by the depth and theta weighting parameter - weight = depth_ind ** Particles.theta * weight + # modify the weight by the depth and theta weighting parameter + weight = depth_ind ** Particles.theta * weight - # if the depth is below the minimum depth then location is not - # considered therefore set the associated weight to nan - weight[(depth_ind <= Particles.dry_depth) | (ct_ind == 2)] \ - = np.nan + # if the depth is below the minimum depth then location is not + # considered therefore set the associated weight to nan + weight[(depth_ind <= Particles.dry_depth) | (ct_ind == 2)] \ + = np.nan - # if it's a dead end with only nans and 0's, choose deepest cell - if nansum(weight) <= 0: - weight = np.zeros_like(weight) - weight[depth_ind == np.max(depth_ind)] = 1.0 + # if it's a dead end with only nans and 0's, choose deepest cell + if nansum(weight) <= 0: + weight = np.zeros_like(weight) + weight[depth_ind == np.max(depth_ind)] = 1.0 - # set weight in the true weight array - Particles.weight[i, j, :] = weight.ravel() - - print('Finished routing weight calculation.') + # set weight in the true weight array + Particles.weight[ind[0], ind[1], :] = weight.ravel() def get_weight(Particles, ind): """Choose new cell location given an initial location. Function to randomly choose 1 of the surrounding 8 cells around the - current index using the pre-calculated routing weights. + current index using the routing weights from make_weight. **Inputs** : @@ -124,6 +111,9 @@ def get_weight(Particles, ind): New location given as a value between 1 and 8 (inclusive) """ + # Check if weights have been computed for this location: + if nansum(Particles.weight[ind[0], ind[1], :]) <= 0: + make_weight(Particles, ind) # randomly pick the new cell for the particle to move to using the # random_pick function and the set of weights if Particles.steepest_descent is not True: diff --git a/dorado/particle_track.py b/dorado/particle_track.py index 3a6886e..75d5de0 100644 --- a/dorado/particle_track.py +++ b/dorado/particle_track.py @@ -377,8 +377,8 @@ class Particles(): # initialize the walk_data self.walk_data = None - # create weights - this might take a bit of time for large domains - lw.make_weight(self) + # initialize routing weights array + self.weight = np.zeros((self.stage.shape[0], self.stage.shape[1], 9)) # function to clear walk data if you've made a mistake while generating it def clear_walk_data(self):
passaH2O/dorado
4ab8cc77496d52940d14c84a25a5f1acfd06d556
diff --git a/tests/test_lagrangian_walker.py b/tests/test_lagrangian_walker.py index b43cb9e..a1d52d2 100644 --- a/tests/test_lagrangian_walker.py +++ b/tests/test_lagrangian_walker.py @@ -286,6 +286,11 @@ def test_make_weight_shallow(): ind = (1, 1) # set seed np.random.seed(0) + # do weighting calculation for each cell + L, W = np.shape(particles.stage) + for i in list(range(1, L-1)): + for j in list(range(1, W-1)): + lw.make_weight(particles, (i, j)) # make assertions about weights # at index, index[4] (self) will be 1 while neighbors will be 0 assert particles.weight[1, 1, 4] == 1.0 @@ -328,6 +333,11 @@ def test_make_weight_equal_opportunity(): ind = (1, 1) # set seed np.random.seed(0) + # do weighting calculation for each cell + L, W = np.shape(particles.stage) + for i in list(range(1, L-1)): + for j in list(range(1, W-1)): + lw.make_weight(particles, (i, j)) # make assertions about weights # at index, 3 neighbors will be equiprobable assert np.sum(particles.weight[1, 1, :]) == 3.0 @@ -372,6 +382,11 @@ def test_make_weight_unequal_opportunity(): ind = (1, 1) # set seed np.random.seed(0) + # do weighting calculation for each cell + L, W = np.shape(particles.stage) + for i in list(range(1, L-1)): + for j in list(range(1, W-1)): + lw.make_weight(particles, (i, j)) # make assertions about weights # at index, staying put index[4] higher probability than neighbors assert particles.weight[1, 1, 4] > particles.weight[1, 1, 5] @@ -411,6 +426,11 @@ def test_wet_boundary_no_weight(): particles = pt.Particles(tools) # set seed np.random.seed(0) + # do weighting calculation for each cell + L, W = np.shape(particles.stage) + for i in list(range(1, L-1)): + for j in list(range(1, W-1)): + lw.make_weight(particles, (i, j)) # assert weights at boundary cells should be 0 assert np.all(np.sum(particles.weight[0, :, 4]) == 0.0) assert np.all(np.sum(particles.weight[-1, :, 4]) == 0.0)
Further Speeding Up Routing Weight Calculations From #16, @wrightky said: > So, looking at this code, I see that the main structure of the weight computation hasn't changed. We're still constructing small sub-arrays at each index, doing a few quick operations (max, cleaning, multiplications, summing), and saving the 9 resulting weights in a weight array. Surprised we hadn't tried this sooner given how much better it performs. > It looks like the key reason the runtime scales better in this model isn't anything about how this computation works, it's how many times we do it. Originally, we performed these operations locally once for every particle at every iteration. So, the runtime scaled as Np_tracer * iterations_per_particle. Now, we perform this once for every cell, so it scales with domain size (L-2) * (W-2). I bet if you checked the example cases you benchmarked, the ratio of these values should give roughly the speedup you observed. > One thing I wonder, though, is whether we could obtain even faster runtimes by modifying the structure of this computation itself, by switching from local array operations to global. Whatever is causing this function to take so long must be due to the fact that we're repeatedly constructing many sub-arrays in a loop and operating on them, instead of performing a few big global array operations (inside which we'd be taking advantage of all the fast numpy broadcasting stuff). In principal, if we broke up these operations (max, cleaning, multiplication, summing) into a loop over each of the D8 directions, with each being a global matrix operation, instead of a loop over each cell, we could reduce the amount of overhead repeatedly calling these functions. I don't know exactly how that scales, but it'd be the difference between time(np.max(small array)) * (L-2) * (W-2) and time(np.max(big array)) * 9. Does that scale better? Not sure. So that is a potential route for further speeding up the routing weight calculation.
0.0
[ "tests/test_lagrangian_walker.py::test_make_weight_shallow", "tests/test_lagrangian_walker.py::test_make_weight_equal_opportunity", "tests/test_lagrangian_walker.py::test_make_weight_unequal_opportunity", "tests/test_lagrangian_walker.py::test_wet_boundary_no_weight" ]
[ "tests/test_lagrangian_walker.py::test_random_pick_seed", "tests/test_lagrangian_walker.py::test_get_weight", "tests/test_lagrangian_walker.py::test_calculate_new_ind", "tests/test_lagrangian_walker.py::test_step_update_straight", "tests/test_lagrangian_walker.py::test_step_update_diagonal", "tests/test_lagrangian_walker.py::test_calc_travel_times", "tests/test_lagrangian_walker.py::test_check_for_boundary", "tests/test_lagrangian_walker.py::test_random_pick", "tests/test_lagrangian_walker.py::test_get_weight_norm", "tests/test_lagrangian_walker.py::test_get_weight_deep", "tests/test_lagrangian_walker.py::test_make_weight_deep" ]
2021-01-22 22:27:27+00:00
4,461
pawelzny__dotty_dict-44
diff --git a/dotty_dict/dotty_dict.py b/dotty_dict/dotty_dict.py index 96d47cf..b806aa9 100644 --- a/dotty_dict/dotty_dict.py +++ b/dotty_dict/dotty_dict.py @@ -127,7 +127,10 @@ class Dotty: except ValueError: raise KeyError("List index must be an integer, got {}".format(it)) if idx < len(data): - return get_from(items, data[idx]) + if len(items) > 0: + return get_from(items, data[idx]) + else: + return data[idx] else: raise IndexError("List index out of range") # /end Handle embedded lists
pawelzny/dotty_dict
96d795ba81eb2d785c18c971ae53a195716c084a
diff --git a/tests/test_list_in_dotty.py b/tests/test_list_in_dotty.py index a3feb5c..583ac25 100644 --- a/tests/test_list_in_dotty.py +++ b/tests/test_list_in_dotty.py @@ -25,9 +25,13 @@ class TestListInDotty(unittest.TestCase): { 'subfield1': [{'subsubfield': 'Value of sub subfield (item 0)'}] } - ] + ], + 'field6': ['a', 'b'] }) + def test_root_level_list_element(self): + self.assertEqual(self.dot['field6.0'], 'a') + def test_access_subfield1_of_field3(self): self.assertEqual(self.dot['field3.0.subfield1'], 'Value of subfield1 (item 0)') self.assertEqual(self.dot['field3.1.subfield1'], 'Value of subfield1 (item 1)')
Accessing root level list items by index fails Thanks for the library! Using latest version (v1.1.1), this test fails ```python #!/usr/bin/env python # -*- coding: utf-8 -*- import unittest from dotty_dict import dotty_l as dotty class TestListInDotty(unittest.TestCase): def setUp(self): self.dot = dotty({ 'field1': 'Value of F1', 'field2': 'Value of F2', 'field3': [ { 'subfield1': 'Value of subfield1 (item 0)', 'subfield2': 'Value of subfield2 (item 0)' }, { 'subfield1': 'Value of subfield1 (item 1)', 'subfield2': 'Value of subfield2 (item 1)' }, ], 'field4': 'Not wanted', 'field5': [ { 'subfield1': [{'subsubfield': 'Value of sub subfield (item 0)'}] } ], 'field6': ['a', 'b'] }) def test_root_level_list_element(self): self.assertEqual(self.dot['field6.0'], 'a') ``` This fails on `dotty_dict.py", line 122, in get_from` since no subfield is specified ``` it = items.pop(0) IndexError: pop from empty list ```
0.0
[ "tests/test_list_in_dotty.py::TestListInDotty::test_root_level_list_element" ]
[ "tests/test_list_in_dotty.py::TestListInDotty::test_access_multidimensional_lists", "tests/test_list_in_dotty.py::TestListInDotty::test_access_sub_sub_field", "tests/test_list_in_dotty.py::TestListInDotty::test_access_subfield1_of_field3", "tests/test_list_in_dotty.py::TestListInDotty::test_assert_index_error_if_index_is_out_of_range", "tests/test_list_in_dotty.py::TestListInDotty::test_assert_key_error_if_index_is_not_integer", "tests/test_list_in_dotty.py::TestListInDotty::test_delete_subfield", "tests/test_list_in_dotty.py::TestListInDotty::test_dotty_contains_subfield_of_field", "tests/test_list_in_dotty.py::TestListInDotty::test_dotty_not_contains_out_of_range_subfield", "tests/test_list_in_dotty.py::TestListInDotty::test_set_subfield_in_list", "tests/test_list_in_dotty.py::TestListInDotty::test_update_subfield_in_list" ]
2019-07-17 08:56:03+00:00
4,467
pydantic__pydantic-1291
diff --git a/docs/examples/exporting_models_json.py b/docs/examples/exporting_models_json.py --- a/docs/examples/exporting_models_json.py +++ b/docs/examples/exporting_models_json.py @@ -1,6 +1,5 @@ -from datetime import datetime, timedelta +from datetime import datetime from pydantic import BaseModel -from pydantic.json import timedelta_isoformat class BarModel(BaseModel): whatever: int @@ -11,16 +10,3 @@ class FooBarModel(BaseModel): m = FooBarModel(foo=datetime(2032, 6, 1, 12, 13, 14), bar={'whatever': 123}) print(m.json()) -# (returns a str) -class WithCustomEncoders(BaseModel): - dt: datetime - diff: timedelta - - class Config: - json_encoders = { - datetime: lambda v: v.timestamp(), - timedelta: timedelta_isoformat, - } - -m = WithCustomEncoders(dt=datetime(2032, 6, 1), diff=timedelta(hours=100)) -print(m.json()) diff --git a/docs/examples/exporting_models_json_encoders.py b/docs/examples/exporting_models_json_encoders.py new file mode 100644 --- /dev/null +++ b/docs/examples/exporting_models_json_encoders.py @@ -0,0 +1,16 @@ +from datetime import datetime, timedelta +from pydantic import BaseModel +from pydantic.json import timedelta_isoformat + +class WithCustomEncoders(BaseModel): + dt: datetime + diff: timedelta + + class Config: + json_encoders = { + datetime: lambda v: v.timestamp(), + timedelta: timedelta_isoformat, + } + +m = WithCustomEncoders(dt=datetime(2032, 6, 1), diff=timedelta(hours=100)) +print(m.json()) diff --git a/docs/examples/exporting_models_json_subclass.py b/docs/examples/exporting_models_json_subclass.py new file mode 100644 --- /dev/null +++ b/docs/examples/exporting_models_json_subclass.py @@ -0,0 +1,23 @@ +from datetime import date, timedelta +from pydantic import BaseModel +from pydantic.validators import int_validator + +class DayThisYear(date): + """ + Contrived example of a special type of date that + takes an int and interprets it as a day in the current year + """ + @classmethod + def __get_validators__(cls): + yield int_validator + yield cls.validate + + @classmethod + def validate(cls, v: int): + return date.today().replace(month=1, day=1) + timedelta(days=v) + +class FooModel(BaseModel): + date: DayThisYear + +m = FooModel(date=300) +print(m.json()) diff --git a/pydantic/json.py b/pydantic/json.py --- a/pydantic/json.py +++ b/pydantic/json.py @@ -18,25 +18,27 @@ def isoformat(o: Union[datetime.date, datetime.time]) -> str: ENCODERS_BY_TYPE: Dict[Type[Any], Callable[[Any], Any]] = { + bytes: lambda o: o.decode(), Color: str, + datetime.date: isoformat, + datetime.datetime: isoformat, + datetime.time: isoformat, + datetime.timedelta: lambda td: td.total_seconds(), + Decimal: float, + Enum: lambda o: o.value, + frozenset: list, + GeneratorType: list, IPv4Address: str, - IPv6Address: str, IPv4Interface: str, - IPv6Interface: str, IPv4Network: str, + IPv6Address: str, + IPv6Interface: str, IPv6Network: str, - SecretStr: str, + Path: str, SecretBytes: str, - UUID: str, - datetime.datetime: isoformat, - datetime.date: isoformat, - datetime.time: isoformat, - datetime.timedelta: lambda td: td.total_seconds(), + SecretStr: str, set: list, - frozenset: list, - GeneratorType: list, - bytes: lambda o: o.decode(), - Decimal: float, + UUID: str, } @@ -46,26 +48,29 @@ def pydantic_encoder(obj: Any) -> Any: if isinstance(obj, BaseModel): return obj.dict() - elif isinstance(obj, Enum): - return obj.value - elif isinstance(obj, Path): - return str(obj) elif is_dataclass(obj): return asdict(obj) - try: - encoder = ENCODERS_BY_TYPE[obj.__class__] - except KeyError: - raise TypeError(f"Object of type '{obj.__class__.__name__}' is not JSON serializable") - else: + # Check the class type and its superclasses for a matching encoder + for base in obj.__class__.__mro__[:-1]: + try: + encoder = ENCODERS_BY_TYPE[base] + except KeyError: + continue return encoder(obj) + else: # We have exited the for loop without finding a suitable encoder + raise TypeError(f"Object of type '{obj.__class__.__name__}' is not JSON serializable") def custom_pydantic_encoder(type_encoders: Dict[Any, Callable[[Type[Any]], Any]], obj: Any) -> Any: - encoder = type_encoders.get(obj.__class__) - if encoder: + # Check the class type and its superclasses for a matching encoder + for base in obj.__class__.__mro__[:-1]: + try: + encoder = type_encoders[base] + except KeyError: + continue return encoder(obj) - else: + else: # We have exited the for loop without finding a suitable encoder return pydantic_encoder(obj)
pydantic/pydantic
5a705a202fd6d10f895145bb625e4c8c9a54e4e3
diff --git a/tests/test_json.py b/tests/test_json.py --- a/tests/test_json.py +++ b/tests/test_json.py @@ -91,6 +91,41 @@ class Model(BaseModel): assert m.json(exclude={'b'}) == '{"a": 10.2, "c": 10.2, "d": {"x": 123, "y": "123"}}' +def test_subclass_encoding(): + class SubDate(datetime.datetime): + pass + + class Model(BaseModel): + a: datetime.datetime + b: SubDate + + m = Model(a=datetime.datetime(2032, 1, 1, 1, 1), b=SubDate(2020, 2, 29, 12, 30)) + assert m.dict() == {'a': datetime.datetime(2032, 1, 1, 1, 1), 'b': SubDate(2020, 2, 29, 12, 30)} + assert m.json() == '{"a": "2032-01-01T01:01:00", "b": "2020-02-29T12:30:00"}' + + +def test_subclass_custom_encoding(): + class SubDate(datetime.datetime): + pass + + class SubDelta(datetime.timedelta): + pass + + class Model(BaseModel): + a: SubDate + b: SubDelta + + class Config: + json_encoders = { + datetime.datetime: lambda v: v.strftime('%a, %d %b %C %H:%M:%S'), + datetime.timedelta: timedelta_isoformat, + } + + m = Model(a=SubDate(2032, 1, 1, 1, 1), b=SubDelta(hours=100)) + assert m.dict() == {'a': SubDate(2032, 1, 1, 1, 1), 'b': SubDelta(days=4, seconds=14400)} + assert m.json() == '{"a": "Thu, 01 Jan 20 01:01:00", "b": "P4DT4H0M0.000000S"}' + + def test_invalid_model(): class Foo: pass
Subclasses of known types are not JSON serializable # Feature Request/(Bug?) Output of `python -c "import pydantic.utils; print(pydantic.utils.version_info())"`: ``` pydantic version: 1.4 pydantic compiled: False install path: /Users/step7212/.pyenv/versions/3.8.1/envs/fastdate/lib/python3.8/site-packages/pydantic python version: 3.8.1 (default, Jan 10 2020, 09:36:37) [Clang 11.0.0 (clang-1100.0.33.16)] platform: macOS-10.14.6-x86_64-i386-64bit optional deps. installed: [] ``` I would like to use a package that subclasses a standard python library class, adding convenience features but maintaining the interface of it's inheritance. Currently, pydantic will error when attempting to serialize the subclass, as it is not known in `ENCODERS_BY_TYPE`: https://github.com/samuelcolvin/pydantic/blob/e3243d267b06bda2d7b2213a8bad70f82171033c/pydantic/json.py#L20-L40 Example: ```py >>> import pendulum >>> import pydantic >>> class MyModel(pydantic.BaseModel): ... date_field: pendulum.DateTime ... >>> m = MyModel(date_field=pendulum.now('UTC')) >>> m MyModel(date_field=DateTime(2020, 3, 2, 16, 44, 42, 977836, tzinfo=Timezone('UTC'))) >>> m.dict() {'date_field': DateTime(2020, 3, 2, 16, 44, 42, 977836, tzinfo=Timezone('UTC'))} >>> m.json() Traceback (most recent call last): File "/Users/step7212/git/hub/pydantic/pydantic/json.py", line 57, in pydantic_encoder encoder = ENCODERS_BY_TYPE[type(obj)] KeyError: <class 'pendulum.datetime.DateTime'> During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/step7212/git/hub/pydantic/pydantic/main.py", line 419, in json return self.__config__.json_dumps(data, default=encoder, **dumps_kwargs) File "/Users/step7212/.pyenv/versions/3.8.1/lib/python3.8/json/__init__.py", line 234, in dumps return cls( File "/Users/step7212/.pyenv/versions/3.8.1/lib/python3.8/json/encoder.py", line 199, in encode chunks = self.iterencode(o, _one_shot=True) File "/Users/step7212/.pyenv/versions/3.8.1/lib/python3.8/json/encoder.py", line 257, in iterencode return _iterencode(o, 0) File "/Users/step7212/git/hub/pydantic/pydantic/json.py", line 59, in pydantic_encoder raise TypeError(f"Object of type '{obj.__class__.__name__}' is not JSON serializable") TypeError: Object of type 'DateTime' is not JSON serializable ``` Since all pendulum types are children of the std `datetime` types, they should be serializable in the same way: ```py >>> import pendulum >>> import pydantic >>> >>> class MyModel(pydantic.BaseModel): ... date_field: pendulum.DateTime ... >>> m = MyModel(date_field=pendulum.now('UTC')) >>> m MyModel(date_field=DateTime(2020, 3, 2, 16, 42, 22, 403965, tzinfo=Timezone('UTC'))) >>> m.dict() {'date_field': DateTime(2020, 3, 2, 16, 42, 22, 403965, tzinfo=Timezone('UTC'))} >>> m.json() '{"date_field": "2020-03-02T16:42:22.403965+00:00"}' ``` This is doable by iterating over the [method resolution order](https://docs.python.org/3/library/stdtypes.html#class.__mro__) attribute, and I've created a branch to demonstrate, which was used for the desired output above: https://github.com/samuelcolvin/pydantic/compare/master...StephenBrown2:encode_known_subclasses <details> <summary>Benchmarks for funsies, this actually turns out to be a little faster than before:</summary> ``` ❯ git checkout master Switched to branch 'master' Your branch is up to date with 'origin/master'. ❯ make benchmark-pydantic python benchmarks/run.py pydantic-only generating test cases... pydantic time=1.043s, success=50.10% pydantic time=0.982s, success=50.10% pydantic time=0.788s, success=50.10% pydantic time=0.732s, success=50.10% pydantic time=0.741s, success=50.10% pydantic best=0.732s, avg=0.857s, stdev=0.145s pydantic best=121.947μs/iter avg=142.857μs/iter stdev=24.164μs/iter version=1.4a1 ❯ git checkout encode_known_subclasses Switched to branch 'encode_known_subclasses' Your branch is up to date with 'mine/encode_known_subclasses'. ❯ make benchmark-pydantic python benchmarks/run.py pydantic-only pydantic time=0.973s, success=50.10% pydantic time=0.742s, success=50.10% pydantic time=0.731s, success=50.10% pydantic time=0.764s, success=50.10% pydantic time=0.734s, success=50.10% pydantic best=0.731s, avg=0.789s, stdev=0.104s pydantic best=121.800μs/iter avg=131.485μs/iter stdev=17.306μs/iter version=1.4a1 ``` Perhaps this could also encourage the merge of using the `__class__` attribute instead of `type()` as well: https://github.com/samuelcolvin/pydantic/compare/samuelcolvin:master...use-class-attribute EDITED Rebased on current master: https://github.com/samuelcolvin/pydantic/compare/master...StephenBrown2:use-class-attribute </details> <details> <summary>Aside on usage of __mro__:</summary> I noticed that [`__mro__`](https://docs.python.org/3/library/stdtypes.html?highlight=__mro__#class.__mro__) is not used in many places, though it is referenced in a few places, mostly in mypy.py: https://github.com/samuelcolvin/pydantic/blob/e3243d267b06bda2d7b2213a8bad70f82171033c/pydantic/class_validators.py#L331-L332 https://github.com/samuelcolvin/pydantic/blob/e3243d267b06bda2d7b2213a8bad70f82171033c/pydantic/main.py#L153 https://github.com/samuelcolvin/pydantic/blob/e3243d267b06bda2d7b2213a8bad70f82171033c/pydantic/mypy.py#L84 https://github.com/samuelcolvin/pydantic/blob/e3243d267b06bda2d7b2213a8bad70f82171033c/pydantic/mypy.py#L174 https://github.com/samuelcolvin/pydantic/blob/e3243d267b06bda2d7b2213a8bad70f82171033c/pydantic/mypy.py#L204 https://github.com/samuelcolvin/pydantic/blob/e3243d267b06bda2d7b2213a8bad70f82171033c/pydantic/mypy.py#L275 Rather, [typing's `__supertype__`](https://github.com/python/cpython/blob/ab6423fe2de0ed5f8a0dc86a9c7070229326b0f0/Lib/typing.py#L1929) is used, which I assumed was custom only because I couldn't find it in the [Python docs](https://docs.python.org/3/search.html?q=__supertype__) or anywhere outside of https://github.com/samuelcolvin/pydantic/blob/e3243d267b06bda2d7b2213a8bad70f82171033c/pydantic/typing.py#L180-L187 Would it be prudent/performant to replace the usage of `__supertype__` with a fast-breaking iteration of `__mro__` or perhaps more common usage of `lenient_subclass` in `typing.py`? and subsequently: https://github.com/samuelcolvin/pydantic/blob/e3243d267b06bda2d7b2213a8bad70f82171033c/pydantic/fields.py#L361-L362 https://github.com/samuelcolvin/pydantic/blob/e3243d267b06bda2d7b2213a8bad70f82171033c/pydantic/schema.py#L641-L642 In particular, other checks for inheritance: https://github.com/samuelcolvin/pydantic/blob/e3243d267b06bda2d7b2213a8bad70f82171033c/pydantic/validators.py#L554-L565 https://github.com/samuelcolvin/pydantic/blob/e3243d267b06bda2d7b2213a8bad70f82171033c/pydantic/utils.py#L103-L104 and of course, the tests: https://github.com/samuelcolvin/pydantic/blob/e3243d267b06bda2d7b2213a8bad70f82171033c/tests/test_utils.py#L141-L153 </details>
0.0
[ "tests/test_json.py::test_subclass_encoding", "tests/test_json.py::test_subclass_custom_encoding" ]
[ "tests/test_json.py::test_encoding[input0-\"ebcdab58-6eb8-46fb-a190-d07a33e9eac8\"]", "tests/test_json.py::test_encoding[input1-\"192.168.0.1\"]", "tests/test_json.py::test_encoding[input2-\"black\"]", "tests/test_json.py::test_encoding[input3-\"#010c7b\"]", "tests/test_json.py::test_encoding[input4-\"**********\"]", "tests/test_json.py::test_encoding[input5-\"\"]", "tests/test_json.py::test_encoding[input6-\"**********\"]", "tests/test_json.py::test_encoding[input7-\"\"]", "tests/test_json.py::test_encoding[input8-\"::1:0:1\"]", "tests/test_json.py::test_encoding[input9-\"192.168.0.0/24\"]", "tests/test_json.py::test_encoding[input10-\"2001:db00::/120\"]", "tests/test_json.py::test_encoding[input11-\"192.168.0.0/24\"]", "tests/test_json.py::test_encoding[input12-\"2001:db00::/120\"]", "tests/test_json.py::test_encoding[input13-\"2032-01-01T01:01:00\"]", "tests/test_json.py::test_encoding[input14-\"2032-01-01T01:01:00+00:00\"]", "tests/test_json.py::test_encoding[input15-\"2032-01-01T00:00:00\"]", "tests/test_json.py::test_encoding[input16-\"12:34:56\"]", "tests/test_json.py::test_encoding[input17-1036834.000056]", "tests/test_json.py::test_encoding[input18-[1,", "tests/test_json.py::test_encoding[input19-[1,", "tests/test_json.py::test_encoding[<genexpr>-[0,", "tests/test_json.py::test_encoding[this", "tests/test_json.py::test_encoding[input22-12.34]", "tests/test_json.py::test_encoding[input23-{\"a\":", "tests/test_json.py::test_encoding[MyEnum.foo-\"bar\"]", "tests/test_json.py::test_path_encoding", "tests/test_json.py::test_model_encoding", "tests/test_json.py::test_invalid_model", "tests/test_json.py::test_iso_timedelta[input0-P12DT0H0M34.000056S]", "tests/test_json.py::test_iso_timedelta[input1-P1001DT1H2M3.654321S]", "tests/test_json.py::test_custom_encoder", "tests/test_json.py::test_custom_iso_timedelta", "tests/test_json.py::test_custom_encoder_arg", "tests/test_json.py::test_encode_dataclass", "tests/test_json.py::test_encode_pydantic_dataclass", "tests/test_json.py::test_encode_custom_root", "tests/test_json.py::test_custom_decode_encode" ]
2020-03-04 19:27:19+00:00
4,736
pymor__pymor-577
diff --git a/src/pymor/algorithms/gram_schmidt.py b/src/pymor/algorithms/gram_schmidt.py index 2e65b2e15..114a5e06b 100644 --- a/src/pymor/algorithms/gram_schmidt.py +++ b/src/pymor/algorithms/gram_schmidt.py @@ -10,10 +10,10 @@ from pymor.core.logger import getLogger @defaults('atol', 'rtol', 'reiterate', 'reiteration_threshold', 'check', 'check_tol') -def gram_schmidt(A, product=None, atol=1e-13, rtol=1e-13, offset=0, +def gram_schmidt(A, product=None, return_R=False, atol=1e-13, rtol=1e-13, offset=0, reiterate=True, reiteration_threshold=1e-1, check=True, check_tol=1e-3, copy=True): - """Orthonormalize a |VectorArray| using the stabilized Gram-Schmidt algorithm. + """Orthonormalize a |VectorArray| using the modified Gram-Schmidt algorithm. Parameters ---------- @@ -22,6 +22,8 @@ def gram_schmidt(A, product=None, atol=1e-13, rtol=1e-13, offset=0, product The inner product |Operator| w.r.t. which to orthonormalize. If `None`, the Euclidean product is used. + return_R + If `True`, the R matrix from QR decomposition is returned. atol Vectors of norm smaller than `atol` are removed from the array. rtol @@ -45,7 +47,10 @@ def gram_schmidt(A, product=None, atol=1e-13, rtol=1e-13, offset=0, Returns ------- - The orthonormalized |VectorArray|. + Q + The orthonormalized |VectorArray|. + R + The upper-triangular/trapezoidal matrix (if `compute_R` is `True`). """ logger = getLogger('pymor.algorithms.gram_schmidt.gram_schmidt') @@ -54,7 +59,8 @@ def gram_schmidt(A, product=None, atol=1e-13, rtol=1e-13, offset=0, A = A.copy() # main loop - remove = [] + R = np.eye(len(A)) + remove = [] # indices of to be removed vectors for i in range(offset, len(A)): # first calculate norm initial_norm = A[i].norm(product)[0] @@ -65,41 +71,41 @@ def gram_schmidt(A, product=None, atol=1e-13, rtol=1e-13, offset=0, continue if i == 0: - A[0].scal(1/initial_norm) - + A[0].scal(1 / initial_norm) + R[i, i] = initial_norm else: - first_iteration = True norm = initial_norm # If reiterate is True, reiterate as long as the norm of the vector changes - # strongly during orthonormalization (due to Andreas Buhr). - while first_iteration or reiterate and norm/old_norm < reiteration_threshold: - - if first_iteration: - first_iteration = False - else: - logger.info(f'Orthonormalizing vector {i} again') - + # strongly during orthogonalization (due to Andreas Buhr). + while True: # orthogonalize to all vectors left for j in range(i): if j in remove: continue - p = A[i].pairwise_inner(A[j], product)[0] + p = A[j].pairwise_inner(A[i], product)[0] A[i].axpy(-p, A[j]) + R[j, i] += p # calculate new norm old_norm, norm = norm, A[i].norm(product)[0] - # remove vector if it got too small: - if norm / initial_norm < rtol: - logger.info(f"Removing linear dependent vector {i}") + # remove vector if it got too small + if norm < rtol * initial_norm: + logger.info(f"Removing linearly dependent vector {i}") remove.append(i) break - if norm > 0: - A[i].scal(1 / norm) + # check if reorthogonalization should be done + if reiterate and norm < reiteration_threshold * old_norm: + logger.info(f"Orthonormalizing vector {i} again") + else: + A[i].scal(1 / norm) + R[i, i] = norm + break if remove: del A[remove] + R = np.delete(R, remove, axis=0) if check: error_matrix = A[offset:len(A)].inner(A, product) @@ -107,12 +113,16 @@ def gram_schmidt(A, product=None, atol=1e-13, rtol=1e-13, offset=0, if error_matrix.size > 0: err = np.max(np.abs(error_matrix)) if err >= check_tol: - raise AccuracyError(f'result not orthogonal (max err={err})') + raise AccuracyError(f"result not orthogonal (max err={err})") - return A + if return_R: + return A, R + else: + return A -def gram_schmidt_biorth(V, W, product=None, reiterate=True, reiteration_threshold=1e-1, check=True, check_tol=1e-3, +def gram_schmidt_biorth(V, W, product=None, + reiterate=True, reiteration_threshold=1e-1, check=True, check_tol=1e-3, copy=True): """Biorthonormalize a pair of |VectorArrays| using the biorthonormal Gram-Schmidt process. @@ -161,16 +171,10 @@ def gram_schmidt_biorth(V, W, product=None, reiterate=True, reiteration_threshol if i == 0: V[0].scal(1 / initial_norm) else: - first_iteration = True norm = initial_norm # If reiterate is True, reiterate as long as the norm of the vector changes # strongly during projection. - while first_iteration or reiterate and norm / old_norm < reiteration_threshold: - if first_iteration: - first_iteration = False - else: - logger.info(f'Projecting vector V[{i}] again') - + while True: for j in range(i): # project by (I - V[j] * W[j]^T * E) p = W[j].pairwise_inner(V[i], product)[0] @@ -179,8 +183,12 @@ def gram_schmidt_biorth(V, W, product=None, reiterate=True, reiteration_threshol # calculate new norm old_norm, norm = norm, V[i].norm(product)[0] - if norm > 0: - V[i].scal(1 / norm) + # check if reorthogonalization should be done + if reiterate and norm < reiteration_threshold * old_norm: + logger.info(f"Projecting vector V[{i}] again") + else: + V[i].scal(1 / norm) + break # calculate norm of W[i] initial_norm = W[i].norm(product)[0] @@ -189,16 +197,10 @@ def gram_schmidt_biorth(V, W, product=None, reiterate=True, reiteration_threshol if i == 0: W[0].scal(1 / initial_norm) else: - first_iteration = True norm = initial_norm # If reiterate is True, reiterate as long as the norm of the vector changes # strongly during projection. - while first_iteration or reiterate and norm / old_norm < reiteration_threshold: - if first_iteration: - first_iteration = False - else: - logger.info(f'Projecting vector W[{i}] again') - + while True: for j in range(i): # project by (I - W[j] * V[j]^T * E) p = V[j].pairwise_inner(W[i], product)[0] @@ -207,8 +209,12 @@ def gram_schmidt_biorth(V, W, product=None, reiterate=True, reiteration_threshol # calculate new norm old_norm, norm = norm, W[i].norm(product)[0] - if norm > 0: - W[i].scal(1 / norm) + # check if reorthogonalization should be done + if reiterate and norm < reiteration_threshold * old_norm: + logger.info(f"Projecting vector W[{i}] again") + else: + W[i].scal(1 / norm) + break # rescale V[i] p = W[i].pairwise_inner(V[i], product)[0] @@ -220,6 +226,6 @@ def gram_schmidt_biorth(V, W, product=None, reiterate=True, reiteration_threshol if error_matrix.size > 0: err = np.max(np.abs(error_matrix)) if err >= check_tol: - raise AccuracyError(f'Result not biorthogonal (max err={err})') + raise AccuracyError(f"result not biorthogonal (max err={err})") return V, W
pymor/pymor
f441a7917f3c30219abdbed99143a4bf31abc613
diff --git a/src/pymortests/algorithms/gram_schmidt.py b/src/pymortests/algorithms/gram_schmidt.py index 82debb6aa..285081e30 100644 --- a/src/pymortests/algorithms/gram_schmidt.py +++ b/src/pymortests/algorithms/gram_schmidt.py @@ -24,6 +24,22 @@ def test_gram_schmidt(vector_array): assert np.all(almost_equal(onb, U)) +def test_gram_schmidt_with_R(vector_array): + U = vector_array + + V = U.copy() + onb, R = gram_schmidt(U, return_R=True, copy=True) + assert np.all(almost_equal(U, V)) + assert np.allclose(onb.dot(onb), np.eye(len(onb))) + assert np.all(almost_equal(U, onb.lincomb(U.dot(onb)), rtol=1e-13)) + assert np.all(almost_equal(V, onb.lincomb(R.T))) + + onb2, R2 = gram_schmidt(U, return_R=True, copy=False) + assert np.all(almost_equal(onb, onb2)) + assert np.all(R == R2) + assert np.all(almost_equal(onb, U)) + + def test_gram_schmidt_with_product(operator_with_arrays_and_products): _, _, U, _, p, _ = operator_with_arrays_and_products @@ -38,6 +54,22 @@ def test_gram_schmidt_with_product(operator_with_arrays_and_products): assert np.all(almost_equal(onb, U)) +def test_gram_schmidt_with_product_and_R(operator_with_arrays_and_products): + _, _, U, _, p, _ = operator_with_arrays_and_products + + V = U.copy() + onb, R = gram_schmidt(U, product=p, return_R=True, copy=True) + assert np.all(almost_equal(U, V)) + assert np.allclose(p.apply2(onb, onb), np.eye(len(onb))) + assert np.all(almost_equal(U, onb.lincomb(p.apply2(U, onb)), rtol=1e-13)) + assert np.all(almost_equal(U, onb.lincomb(R.T))) + + onb2, R2 = gram_schmidt(U, product=p, return_R=True, copy=False) + assert np.all(almost_equal(onb, onb2)) + assert np.all(R == R2) + assert np.all(almost_equal(onb, U)) + + def test_gram_schmidt_biorth(vector_array): U = vector_array if U.dim < 2:
gram_schmidt doesn't return the R matrix Currently, `gram_schmidt` only returns the Q matrix. The R matrix is used in the [shift selection for the LRADI method](https://github.com/pymor/pymor/blob/1aeccb428700fa140cb6faa7490bdaf87a9754a3/src/pymor/algorithms/lyapunov.py#L285-L287) and in the [rational Krylov subspace method for Lyapunov equations](http://www.dm.unibo.it/~simoncin/rksm.m) (related to #387). It might also be useful for computing an SVD of a vectorarray (Q * svd(R)).
0.0
[ "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[0-<lambda>0]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[0-<lambda>1]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[0-<lambda>2]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[0-<lambda>3]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[0-<lambda>4]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[0-<lambda>5]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[0-<lambda>6]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[0-<lambda>7]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[0-<lambda>8]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[0-<lambda>9]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[0-<lambda>10]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[0-<lambda>11]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[0-<lambda>12]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[0-<lambda>13]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[0-<lambda>14]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[0-<lambda>15]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[0-<lambda>16]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[1-<lambda>0]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[1-<lambda>1]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[1-<lambda>2]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[1-<lambda>3]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[1-<lambda>4]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[1-<lambda>5]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[1-<lambda>6]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[1-<lambda>7]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[1-<lambda>8]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[1-<lambda>9]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[1-<lambda>10]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[1-<lambda>11]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[1-<lambda>12]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[1-<lambda>13]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[1-<lambda>14]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[1-<lambda>15]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[1-<lambda>16]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[2-<lambda>0]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[2-<lambda>1]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[2-<lambda>2]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[2-<lambda>3]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[2-<lambda>4]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[2-<lambda>5]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[2-<lambda>6]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[2-<lambda>7]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[2-<lambda>8]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[2-<lambda>9]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[2-<lambda>10]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[2-<lambda>11]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[2-<lambda>12]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[2-<lambda>13]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[2-<lambda>14]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[2-<lambda>15]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_R[2-<lambda>16]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product_and_R[<lambda>24]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product_and_R[<lambda>25]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product_and_R[<lambda>26]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product_and_R[<lambda>27]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product_and_R[<lambda>28]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product_and_R[<lambda>29]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product_and_R[<lambda>30]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product_and_R[<lambda>31]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product_and_R[<lambda>32]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product_and_R[<lambda>33]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product_and_R[<lambda>34]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product_and_R[<lambda>35]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product_and_R[<lambda>36]" ]
[ "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[0-<lambda>0]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[0-<lambda>1]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[0-<lambda>2]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[0-<lambda>3]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[0-<lambda>4]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[0-<lambda>5]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[0-<lambda>6]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[0-<lambda>7]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[0-<lambda>8]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[0-<lambda>9]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[0-<lambda>10]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[0-<lambda>11]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[0-<lambda>12]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[0-<lambda>13]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[0-<lambda>14]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[0-<lambda>15]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[0-<lambda>16]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[1-<lambda>0]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[1-<lambda>1]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[1-<lambda>2]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[1-<lambda>3]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[1-<lambda>4]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[1-<lambda>5]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[1-<lambda>6]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[1-<lambda>7]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[1-<lambda>8]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[1-<lambda>9]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[1-<lambda>10]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[1-<lambda>11]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[1-<lambda>12]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[1-<lambda>13]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[1-<lambda>14]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[1-<lambda>15]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[1-<lambda>16]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[2-<lambda>0]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[2-<lambda>1]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[2-<lambda>2]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[2-<lambda>3]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[2-<lambda>4]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[2-<lambda>5]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[2-<lambda>6]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[2-<lambda>7]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[2-<lambda>8]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[2-<lambda>9]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[2-<lambda>10]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[2-<lambda>11]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[2-<lambda>12]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[2-<lambda>13]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[2-<lambda>14]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[2-<lambda>15]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt[2-<lambda>16]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product[<lambda>24]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product[<lambda>25]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product[<lambda>26]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product[<lambda>27]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product[<lambda>28]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product[<lambda>29]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product[<lambda>30]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product[<lambda>31]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product[<lambda>32]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product[<lambda>33]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product[<lambda>34]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product[<lambda>35]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_with_product[<lambda>36]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[0-<lambda>0]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[0-<lambda>1]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[0-<lambda>2]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[0-<lambda>3]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[0-<lambda>4]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[0-<lambda>5]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[0-<lambda>6]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[0-<lambda>7]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[0-<lambda>8]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[0-<lambda>9]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[0-<lambda>10]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[0-<lambda>11]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[0-<lambda>12]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[0-<lambda>13]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[0-<lambda>14]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[0-<lambda>15]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[0-<lambda>16]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[1-<lambda>0]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[1-<lambda>1]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[1-<lambda>2]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[1-<lambda>3]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[1-<lambda>4]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[1-<lambda>5]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[1-<lambda>6]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[1-<lambda>7]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[1-<lambda>8]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[1-<lambda>9]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[1-<lambda>10]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[1-<lambda>11]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[1-<lambda>12]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[1-<lambda>13]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[1-<lambda>14]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[1-<lambda>15]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[1-<lambda>16]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[2-<lambda>0]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[2-<lambda>1]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[2-<lambda>2]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[2-<lambda>3]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[2-<lambda>4]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[2-<lambda>5]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[2-<lambda>6]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[2-<lambda>7]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[2-<lambda>8]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[2-<lambda>9]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[2-<lambda>10]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[2-<lambda>11]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[2-<lambda>12]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[2-<lambda>13]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[2-<lambda>14]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[2-<lambda>15]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth[2-<lambda>16]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth_with_product[<lambda>24]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth_with_product[<lambda>25]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth_with_product[<lambda>26]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth_with_product[<lambda>27]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth_with_product[<lambda>28]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth_with_product[<lambda>29]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth_with_product[<lambda>30]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth_with_product[<lambda>31]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth_with_product[<lambda>32]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth_with_product[<lambda>33]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth_with_product[<lambda>34]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth_with_product[<lambda>35]", "src/pymortests/algorithms/gram_schmidt.py::test_gram_schmidt_biorth_with_product[<lambda>36]" ]
2019-01-31 19:07:41+00:00
4,948
pypa__auditwheel-343
diff --git a/README.rst b/README.rst index 8b6047d..02bbaae 100644 --- a/README.rst +++ b/README.rst @@ -127,7 +127,7 @@ Limitations Testing ------- -The tests can be run with ``tox``, which will automatically install +The tests can be run with ``nox``, which will automatically install test dependencies. Some of the integration tests also require a running and accessible Docker diff --git a/src/auditwheel/main_repair.py b/src/auditwheel/main_repair.py index c4fafa7..1e0ae58 100644 --- a/src/auditwheel/main_repair.py +++ b/src/auditwheel/main_repair.py @@ -31,7 +31,10 @@ below. epilog += f" (aliased by {', '.join(p['aliases'])})" epilog += "\n" highest_policy = get_policy_name(POLICY_PRIORITY_HIGHEST) - help = "Vendor in external shared library dependencies of a wheel." + help = """Vendor in external shared library dependencies of a wheel. +If multiple wheels are specified, an error processing one +wheel will abort processing of subsequent wheels. +""" p = sub_parsers.add_parser( "repair", help=help, @@ -39,7 +42,7 @@ below. epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter, ) - p.add_argument("WHEEL_FILE", help="Path to wheel file.") + p.add_argument("WHEEL_FILE", help="Path to wheel file.", nargs="+") p.add_argument( "--plat", action=EnvironmentDefault, @@ -101,72 +104,73 @@ def execute(args, p): from .repair import repair_wheel from .wheel_abi import NonPlatformWheel, analyze_wheel_abi - if not isfile(args.WHEEL_FILE): - p.error("cannot access %s. No such file" % args.WHEEL_FILE) + for wheel_file in args.WHEEL_FILE: + if not isfile(wheel_file): + p.error("cannot access %s. No such file" % wheel_file) - logger.info("Repairing %s", basename(args.WHEEL_FILE)) + logger.info("Repairing %s", basename(wheel_file)) - if not exists(args.WHEEL_DIR): - os.makedirs(args.WHEEL_DIR) + if not exists(args.WHEEL_DIR): + os.makedirs(args.WHEEL_DIR) - try: - wheel_abi = analyze_wheel_abi(args.WHEEL_FILE) - except NonPlatformWheel: - logger.info("This does not look like a platform wheel") - return 1 + try: + wheel_abi = analyze_wheel_abi(wheel_file) + except NonPlatformWheel: + logger.info("This does not look like a platform wheel") + return 1 - policy = get_policy_by_name(args.PLAT) - reqd_tag = policy["priority"] + policy = get_policy_by_name(args.PLAT) + reqd_tag = policy["priority"] - if reqd_tag > get_priority_by_name(wheel_abi.sym_tag): - msg = ( - 'cannot repair "%s" to "%s" ABI because of the presence ' - "of too-recent versioned symbols. You'll need to compile " - "the wheel on an older toolchain." % (args.WHEEL_FILE, args.PLAT) - ) - p.error(msg) - - if reqd_tag > get_priority_by_name(wheel_abi.ucs_tag): - msg = ( - 'cannot repair "%s" to "%s" ABI because it was compiled ' - "against a UCS2 build of Python. You'll need to compile " - "the wheel against a wide-unicode build of Python." - % (args.WHEEL_FILE, args.PLAT) - ) - p.error(msg) + if reqd_tag > get_priority_by_name(wheel_abi.sym_tag): + msg = ( + 'cannot repair "%s" to "%s" ABI because of the presence ' + "of too-recent versioned symbols. You'll need to compile " + "the wheel on an older toolchain." % (wheel_file, args.PLAT) + ) + p.error(msg) + + if reqd_tag > get_priority_by_name(wheel_abi.ucs_tag): + msg = ( + 'cannot repair "%s" to "%s" ABI because it was compiled ' + "against a UCS2 build of Python. You'll need to compile " + "the wheel against a wide-unicode build of Python." + % (wheel_file, args.PLAT) + ) + p.error(msg) - if reqd_tag > get_priority_by_name(wheel_abi.blacklist_tag): - msg = ( - 'cannot repair "%s" to "%s" ABI because it depends on ' - "black-listed symbols." % (args.WHEEL_FILE, args.PLAT) - ) - p.error(msg) - - abis = [policy["name"]] + policy["aliases"] - if not args.ONLY_PLAT: - if reqd_tag < get_priority_by_name(wheel_abi.overall_tag): - logger.info( - ( - "Wheel is eligible for a higher priority tag. " - "You requested %s but I have found this wheel is " - "eligible for %s." - ), - args.PLAT, - wheel_abi.overall_tag, + if reqd_tag > get_priority_by_name(wheel_abi.blacklist_tag): + msg = ( + 'cannot repair "%s" to "%s" ABI because it depends on ' + "black-listed symbols." % (wheel_file, args.PLAT) ) - higher_policy = get_policy_by_name(wheel_abi.overall_tag) - abis = [higher_policy["name"]] + higher_policy["aliases"] + abis - - patcher = Patchelf() - out_wheel = repair_wheel( - args.WHEEL_FILE, - abis=abis, - lib_sdir=args.LIB_SDIR, - out_dir=args.WHEEL_DIR, - update_tags=args.UPDATE_TAGS, - patcher=patcher, - strip=args.STRIP, - ) + p.error(msg) + + abis = [policy["name"]] + policy["aliases"] + if not args.ONLY_PLAT: + if reqd_tag < get_priority_by_name(wheel_abi.overall_tag): + logger.info( + ( + "Wheel is eligible for a higher priority tag. " + "You requested %s but I have found this wheel is " + "eligible for %s." + ), + args.PLAT, + wheel_abi.overall_tag, + ) + higher_policy = get_policy_by_name(wheel_abi.overall_tag) + abis = [higher_policy["name"]] + higher_policy["aliases"] + abis + + patcher = Patchelf() + out_wheel = repair_wheel( + wheel_file, + abis=abis, + lib_sdir=args.LIB_SDIR, + out_dir=args.WHEEL_DIR, + update_tags=args.UPDATE_TAGS, + patcher=patcher, + strip=args.STRIP, + ) - if out_wheel is not None: - logger.info("\nFixed-up wheel written to %s", out_wheel) + if out_wheel is not None: + logger.info("\nFixed-up wheel written to %s", out_wheel)
pypa/auditwheel
f3c7691f258de76c11f679ab85826b730c658c14
diff --git a/tests/integration/test_bundled_wheels.py b/tests/integration/test_bundled_wheels.py index fa51f63..f3f5bc8 100644 --- a/tests/integration/test_bundled_wheels.py +++ b/tests/integration/test_bundled_wheels.py @@ -66,7 +66,7 @@ def test_wheel_source_date_epoch(tmp_path, monkeypatch): STRIP=False, UPDATE_TAGS=True, WHEEL_DIR=str(wheel_output_path), - WHEEL_FILE=str(wheel_path), + WHEEL_FILE=[str(wheel_path)], cmd="repair", func=Mock(), prog="auditwheel",
Support multiple wheel files in repair command Currently ``auditwheel repair`` command supports only one ``WHEEL_FILE`` argument, I thought it would be nice if it will be possible to give multiple wheels to repair, what do you think? It will be useful in my use case when I build a set of wheels some of which should be repaired. And if you consider it to be useful addition, I'll submit a patch.
0.0
[ "[100%]", "tests/integration/test_bundled_wheels.py::test_wheel_source_date_epoch" ]
[ "tests/integration/test_bundled_wheels.py::test_analyze_wheel_abi[cffi-1.5.0-cp27-none-linux_x86_64.whl-external_libs0]", "tests/integration/test_bundled_wheels.py::test_analyze_wheel_abi[python_snappy-0.5.2-pp260-pypy_41-linux_x86_64.whl-external_libs1]", "tests/integration/test_bundled_wheels.py::test_analyze_wheel_abi_pyfpe" ]
2021-10-21 19:09:20+00:00
4,960
pypa__auditwheel-367
diff --git a/src/auditwheel/tools.py b/src/auditwheel/tools.py index ed8b514..bce72f2 100644 --- a/src/auditwheel/tools.py +++ b/src/auditwheel/tools.py @@ -70,19 +70,21 @@ def dir2zip(in_dir: str, zip_fname: str, date_time: Optional[datetime] = None) - st = os.stat(in_dir) date_time = datetime.fromtimestamp(st.st_mtime, tz=timezone.utc) date_time_args = date_time.timetuple()[:6] - with zipfile.ZipFile(zip_fname, "w", compression=zipfile.ZIP_DEFLATED) as z: + compression = zipfile.ZIP_DEFLATED + with zipfile.ZipFile(zip_fname, "w", compression=compression) as z: for root, dirs, files in os.walk(in_dir): for dir in dirs: dname = os.path.join(root, dir) out_dname = os.path.relpath(dname, in_dir) + "/" - zinfo = zipfile.ZipInfo(out_dname, date_time=date_time_args) - zinfo.external_attr = os.stat(dname).st_mode << 16 - z.writestr(zinfo, "") + zinfo = zipfile.ZipInfo.from_file(dname, out_dname) + zinfo.date_time = date_time_args + z.writestr(zinfo, b"") for file in files: fname = os.path.join(root, file) out_fname = os.path.relpath(fname, in_dir) - zinfo = zipfile.ZipInfo(out_fname, date_time=date_time_args) - zinfo.external_attr = os.stat(fname).st_mode << 16 + zinfo = zipfile.ZipInfo.from_file(fname, out_fname) + zinfo.date_time = date_time_args + zinfo.compress_type = compression with open(fname, "rb") as fp: z.writestr(zinfo, fp.read())
pypa/auditwheel
68aad23937225a629d70cb4b3e224abd07f16f44
diff --git a/tests/unit/test_tools.py b/tests/unit/test_tools.py index dd2a086..ee79467 100644 --- a/tests/unit/test_tools.py +++ b/tests/unit/test_tools.py @@ -87,3 +87,14 @@ def test_zip2dir_round_trip_permissions(tmp_path): dir2zip(str(tmp_path / "unzip1"), str(tmp_path / "tmp.zip")) zip2dir(str(tmp_path / "tmp.zip"), str(extract_path)) _check_permissions(extract_path) + + +def test_dir2zip_deflate(tmp_path): + buffer = b"\0" * 1024 * 1024 + input_dir = tmp_path / "input_dir" + input_dir.mkdir() + input_file = input_dir / "zeros.bin" + input_file.write_bytes(buffer) + output_file = tmp_path / "ouput.zip" + dir2zip(str(input_dir), str(output_file)) + assert output_file.stat().st_size < len(buffer) / 4
auditwheel repair: whls not getting compressed for auditwheel 5.1.0 and above? After upgrading to 5.1.0, we noticed that our wheels are not getting compressed, resulting in very large whls after running `auditwheel repair foo.whl`: ``` -rw-r--r-- 1 root root 104093514 Jan 7 01:54 accera-1.2.1.dev17-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.5.1.0.whl ``` auditwheel 5.0.0 was able to compress the whls by roughly 70%: ``` -rw-r--r-- 1 root root 35413658 Jan 7 01:44 accera-1.2.1.dev17-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.5.0.0.whl ``` The contents of both files are the same after unzipping, so the binaries themselves are unchanged but the whl does not seem to be compressed when using auditwheel 5.1.0 and above. ```shell [root@9a0260dd545d wheelhouse]# ls -al accera-5.0.0/ total 101324 drwxr-xr-x 8 root root 4096 Jan 7 01:44 . drwxr-xr-x 6 root root 4096 Jan 7 01:55 .. -rw-r--r-- 1 root root 476 Jan 7 01:44 Constants.py -rw-r--r-- 1 root root 3219 Jan 7 01:44 Debug.py drwxr-xr-x 2 root root 4096 Jan 7 01:44 hat -rw-r--r-- 1 root root 1035 Jan 7 01:44 __init__.py drwxr-xr-x 2 root root 4096 Jan 7 01:44 lang -rwxr-xr-x 1 root root 103664768 Jan 7 01:44 _lang_python.cpython-38-x86_64-linux-gnu.so -rw-r--r-- 1 root root 20883 Jan 7 01:44 Package.py -rw-r--r-- 1 root root 1563 Jan 7 01:44 Parameter.py drwxr-xr-x 2 root root 4096 Jan 7 01:44 samples -rw-r--r-- 1 root root 9045 Jan 7 01:44 Targets.py drwxr-xr-x 2 root root 4096 Jan 7 01:44 test drwxr-xr-x 3 root root 4096 Jan 7 01:44 tools drwxr-xr-x 2 root root 4096 Jan 7 01:44 tuning -rw-r--r-- 1 root root 36 Jan 7 01:44 _version.py [root@9a0260dd545d wheelhouse]# ls -al accera-5.1.0/ total 101324 drwxr-xr-x 8 root root 4096 Jan 7 01:54 . drwxr-xr-x 6 root root 4096 Jan 7 01:55 .. -rw-r--r-- 1 root root 476 Jan 7 01:54 Constants.py -rw-r--r-- 1 root root 3219 Jan 7 01:54 Debug.py drwxr-xr-x 2 root root 4096 Jan 7 01:54 hat -rw-r--r-- 1 root root 1035 Jan 7 01:54 __init__.py drwxr-xr-x 2 root root 4096 Jan 7 01:54 lang -rwxr-xr-x 1 root root 103664768 Jan 7 01:54 _lang_python.cpython-38-x86_64-linux-gnu.so -rw-r--r-- 1 root root 20883 Jan 7 01:54 Package.py -rw-r--r-- 1 root root 1563 Jan 7 01:54 Parameter.py drwxr-xr-x 2 root root 4096 Jan 7 01:54 samples -rw-r--r-- 1 root root 9045 Jan 7 01:54 Targets.py drwxr-xr-x 2 root root 4096 Jan 7 01:54 test drwxr-xr-x 3 root root 4096 Jan 7 01:54 tools drwxr-xr-x 2 root root 4096 Jan 7 01:54 tuning -rw-r--r-- 1 root root 36 Jan 7 01:54 _version.py ```
0.0
[ "tests/unit/test_tools.py::test_dir2zip_deflate" ]
[ "tests/unit/test_tools.py::test_environment_action[None-None-manylinux1]", "tests/unit/test_tools.py::test_environment_action[None-manylinux2010-manylinux2010]", "tests/unit/test_tools.py::test_environment_action[manylinux2010-None-manylinux2010]", "tests/unit/test_tools.py::test_environment_action[manylinux2010-linux-linux]", "tests/unit/test_tools.py::test_environment_action_invalid_env", "tests/unit/test_tools.py::test_zip2dir_permissions", "tests/unit/test_tools.py::test_zip2dir_round_trip_permissions" ]
2022-01-07 22:20:14+00:00
4,961
pypa__auditwheel-424
diff --git a/src/auditwheel/policy/external_references.py b/src/auditwheel/policy/external_references.py index 1c05b10..23afde8 100644 --- a/src/auditwheel/policy/external_references.py +++ b/src/auditwheel/policy/external_references.py @@ -8,7 +8,7 @@ from ..elfutils import filter_undefined_symbols, is_subdir from . import load_policies log = logging.getLogger(__name__) -LIBPYTHON_RE = re.compile(r"^libpython\d\.\dm?.so(.\d)*$") +LIBPYTHON_RE = re.compile(r"^libpython\d+\.\d+m?.so(.\d)*$") def lddtree_external_references(lddtree: dict, wheel_path: str) -> dict:
pypa/auditwheel
d270db8188386891fd1a653a6737ce5553de568c
diff --git a/tests/unit/test_policy.py b/tests/unit/test_policy.py index 9fa21c9..adbdad8 100644 --- a/tests/unit/test_policy.py +++ b/tests/unit/test_policy.py @@ -10,6 +10,7 @@ from auditwheel.policy import ( get_policy_name, get_priority_by_name, get_replace_platforms, + lddtree_external_references, ) @@ -202,3 +203,32 @@ class TestPolicyAccess: def test_get_by_name_duplicate(self): with pytest.raises(RuntimeError): get_priority_by_name("duplicate") + + +class TestLddTreeExternalReferences: + """Tests for lddtree_external_references.""" + + def test_filter_libs(self): + """Test the nested filter_libs function.""" + filtered_libs = [ + "ld-linux-x86_64.so.1", + "ld64.so.1", + "ld64.so.2", + "libpython3.7m.so.1.0", + "libpython3.9.so.1.0", + "libpython3.10.so.1.0", + "libpython999.999.so.1.0", + ] + unfiltered_libs = ["libfoo.so.1.0", "libbar.so.999.999.999"] + libs = filtered_libs + unfiltered_libs + + lddtree = { + "realpath": "/path/to/lib", + "needed": libs, + "libs": {lib: {"needed": [], "realpath": "/path/to/lib"} for lib in libs}, + } + full_external_refs = lddtree_external_references(lddtree, "/path/to/wheel") + + # Assert that each policy only has the unfiltered libs. + for policy in full_external_refs: + assert set(full_external_refs[policy]["libs"]) == set(unfiltered_libs)
libpython3.x is skipped but libpython3.xx is included howdy, i noticed when running auditwheel repair on a wheel with dependency on libpython3.8.so or libpython3.9.so, they are not added to the wheel libs directory, but libpython3.10.so and libpython3.11.so are added. I'm not very familiar with the project, but i suspect [the regex detecting libpython](https://github.com/pypa/auditwheel/blob/63c30761e6857491af50fbb1922ecfd4c034ef76/src/auditwheel/policy/external_references.py#L11) could be the culprit
0.0
[ "tests/unit/test_policy.py::TestLddTreeExternalReferences::test_filter_libs" ]
[ "tests/unit/test_policy.py::test_32bits_arch_name[armv6l-armv6l]", "tests/unit/test_policy.py::test_32bits_arch_name[armv7l-armv7l]", "tests/unit/test_policy.py::test_32bits_arch_name[armv8l-armv7l]", "tests/unit/test_policy.py::test_32bits_arch_name[aarch64-armv7l]", "tests/unit/test_policy.py::test_32bits_arch_name[i686-i686]", "tests/unit/test_policy.py::test_32bits_arch_name[x86_64-i686]", "tests/unit/test_policy.py::test_64bits_arch_name[armv8l-aarch64]", "tests/unit/test_policy.py::test_64bits_arch_name[aarch64-aarch64]", "tests/unit/test_policy.py::test_64bits_arch_name[ppc64le-ppc64le]", "tests/unit/test_policy.py::test_64bits_arch_name[i686-x86_64]", "tests/unit/test_policy.py::test_64bits_arch_name[x86_64-x86_64]", "tests/unit/test_policy.py::test_replacement_platform[linux_aarch64-expected0]", "tests/unit/test_policy.py::test_replacement_platform[manylinux1_ppc64le-expected1]", "tests/unit/test_policy.py::test_replacement_platform[manylinux2014_x86_64-expected2]", "tests/unit/test_policy.py::test_replacement_platform[manylinux_2_24_x86_64-expected3]", "tests/unit/test_policy.py::test_pep600_compliance", "tests/unit/test_policy.py::TestPolicyAccess::test_get_by_priority", "tests/unit/test_policy.py::TestPolicyAccess::test_get_by_priority_missing", "tests/unit/test_policy.py::TestPolicyAccess::test_get_by_priority_duplicate", "tests/unit/test_policy.py::TestPolicyAccess::test_get_by_name", "tests/unit/test_policy.py::TestPolicyAccess::test_get_by_name_missing", "tests/unit/test_policy.py::TestPolicyAccess::test_get_by_name_duplicate" ]
2023-04-25 17:30:15+00:00
4,962
pysal__esda-235
diff --git a/esda/adbscan.py b/esda/adbscan.py index dbb69e2..6d8444b 100644 --- a/esda/adbscan.py +++ b/esda/adbscan.py @@ -247,9 +247,12 @@ def _one_draw(pars): if sample_weight is not None: thin_sample_weight = sample_weight.iloc[rids] + min_samples = min_samples * pct_exact + min_samples = 1 if min_samples < 1 else int(np.floor(min_samples)) + dbs = DBSCAN( eps=eps, - min_samples=int(np.round(min_samples * pct_exact)), + min_samples=min_samples, algorithm=algorithm, n_jobs=n_jobs, ).fit(X_thin[xy], sample_weight=thin_sample_weight)
pysal/esda
ec12837871b20b7b79c2c7bef813025459f22188
diff --git a/esda/tests/test_adbscan.py b/esda/tests/test_adbscan.py index 8440012..ea2434a 100644 --- a/esda/tests/test_adbscan.py +++ b/esda/tests/test_adbscan.py @@ -2,7 +2,6 @@ import unittest import numpy as np import pandas -import pytest from .. import adbscan @@ -73,9 +72,6 @@ class ADBSCAN_Tester(unittest.TestCase): ] ) - @pytest.mark.xfail( - raises=ValueError, reason="**NEEDS ATTENTION**. Change in scikit-learn>=1.1." - ) def test_adbscan(self): # ------------------------# # # Single Core # @@ -264,9 +260,6 @@ class Get_Cluster_Boundary_Tester(unittest.TestCase): _ = ads.fit(self.db, xy=["x", "y"]) self.labels = pandas.Series(ads.labels_, index=self.db.index) - @pytest.mark.xfail( - raises=ValueError, reason="**NEEDS ATTENTION**. Change in scikit-learn>=1.1." - ) def test_get_cluster_boundary(self): # ------------------------# # # Single Core #
Bug: ADBSCAN.fit is broken https://github.com/pysal/esda/actions/runs/3071277058/jobs/4961813274#step:4:210 It appears that `min_samples` is no longer getting set properly: ``` E ValueError: min_samples == 0, must be >= 1. ```
0.0
[ "esda/tests/test_adbscan.py::ADBSCAN_Tester::test_adbscan", "esda/tests/test_adbscan.py::Get_Cluster_Boundary_Tester::test_get_cluster_boundary" ]
[ "esda/tests/test_adbscan.py::Remap_lbls_Tester::test_remap_lbls", "esda/tests/test_adbscan.py::Ensemble_Tester::test_ensemble", "esda/tests/test_adbscan.py::i::test_ensemble" ]
2022-11-23 18:22:00+00:00
5,029
python-beaver__python-conf_d-7
diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..21fef2c --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +# build files +*.pyc +__pycache__ +*.egg-info +.eggs +dist +build +.tox diff --git a/README.rst b/README.rst index 2599913..a2a0ac8 100644 --- a/README.rst +++ b/README.rst @@ -7,7 +7,7 @@ read configuration files, conf.d style Requirements ============ -* Python 2.6+ +* Python 2.6+ or Python 3.4+ Installation ============ @@ -20,7 +20,7 @@ From Github:: From PyPI:: - pip install conf_d==0.0.3 + pip install conf_d==0.0.5 Usage ===== diff --git a/conf_d/__init__.py b/conf_d/__init__.py index a18e3a0..bd55dcc 100644 --- a/conf_d/__init__.py +++ b/conf_d/__init__.py @@ -1,13 +1,14 @@ # -*- coding: utf-8 -*- -import ConfigParser import os -__version__ = '0.0.4' +from conf_d.compat import ConfigParser + +__version__ = '0.0.5' class Configuration(): - def __init__(self, name, path, parse=True, confd_path=None, conf_ext=None, main_defaults={}, section_defaults={}, main_parser=None, section_parser=None, path_from_main=None, config_parser=ConfigParser.ConfigParser): + def __init__(self, name, path, parse=True, confd_path=None, conf_ext=None, main_defaults={}, section_defaults={}, main_parser=None, section_parser=None, path_from_main=None, config_parser=ConfigParser): self._conf_ext = conf_ext self._config_sections = {} self._confd_path = confd_path diff --git a/conf_d/compat.py b/conf_d/compat.py new file mode 100644 index 0000000..7d0e1fe --- /dev/null +++ b/conf_d/compat.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +from sys import version_info + +if version_info[0] < 3: + from ConfigParser import ConfigParser +else: + from configparser import ConfigParser diff --git a/setup.py b/setup.py index fd655b7..2b142ee 100644 --- a/setup.py +++ b/setup.py @@ -33,6 +33,10 @@ setup( 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', ], description='read configuration files, conf.d style', long_description=open('README.rst').read() + '\n\n' + diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..607ed8e --- /dev/null +++ b/tox.ini @@ -0,0 +1,6 @@ +[tox] +envlist = py26,py27,py34,py35,py36,py37 + +[testenv] +commands= + python setup.py test
python-beaver/python-conf_d
a032f0131c873a7f5199adb4ab819cd7a07bc693
diff --git a/conf_d/tests/test_configuration.py b/conf_d/tests/test_configuration.py index f04143c..d8b64d0 100644 --- a/conf_d/tests/test_configuration.py +++ b/conf_d/tests/test_configuration.py @@ -1,10 +1,11 @@ # -*- coding: utf-8 -*- -import ConfigParser import unittest from conf_d import Configuration +from conf_d.compat import ConfigParser -class TestConfigParser(ConfigParser.ConfigParser): + +class TestConfigParser(ConfigParser): def read(self, path): raise NotImplementedError('Catch this')
ConfigParser not found ConfigParse has changed to configparser in Python3, when install Beaver from pip, the problem occurs and leads the installation to failure.
0.0
[ "conf_d/tests/test_configuration.py::ConfigurationTests::test_confd", "conf_d/tests/test_configuration.py::ConfigurationTests::test_custom_config_parser", "conf_d/tests/test_configuration.py::ConfigurationTests::test_defaults", "conf_d/tests/test_configuration.py::ConfigurationTests::test_get", "conf_d/tests/test_configuration.py::ConfigurationTests::test_has_section", "conf_d/tests/test_configuration.py::ConfigurationTests::test_invalid_path", "conf_d/tests/test_configuration.py::ConfigurationTests::test_parse", "conf_d/tests/test_configuration.py::ConfigurationTests::test_parser", "conf_d/tests/test_configuration.py::ConfigurationTests::test_raw", "conf_d/tests/test_configuration.py::ConfigurationTests::test_readme" ]
[]
2018-12-09 12:41:32+00:00
5,048
python-bonobo__bonobo-335
diff --git a/bin/update_apidoc.py b/bin/update_apidoc.py index efb0563..b93cddf 100644 --- a/bin/update_apidoc.py +++ b/bin/update_apidoc.py @@ -1,3 +1,4 @@ +import inspect import os from jinja2 import DictLoader, Environment @@ -30,8 +31,6 @@ class Module: return os.path.join(__path__, apidoc_root, *self.name.split(".")) + ".rst" -import inspect - bonobo = __import__("bonobo") assert bonobo.__version__ diff --git a/bonobo/structs/graphs.py b/bonobo/structs/graphs.py index aaf3fd6..ce43ef6 100644 --- a/bonobo/structs/graphs.py +++ b/bonobo/structs/graphs.py @@ -59,7 +59,11 @@ class PartialGraph: class Graph: """ - Represents a directed graph of nodes. + Core structure representing a directed graph of nodes. It will be used to create data streaming queues between your + objects during the job execution. + + This is how the data flows are defined. + """ name = "" @@ -75,7 +79,9 @@ class Graph: yield from self.nodes def __len__(self): - """Node count. + """ + The graph length is defined as its node count. + """ return len(self.nodes) @@ -92,8 +98,19 @@ class Graph: return self.get_cursor().__rshift__(other) def get_cursor(self, ref=BEGIN): + """ + Create a `GraphCursor` to use the operator-based syntax to build graph, starting at `ref`. + + """ return GraphCursor(self, last=self.index_of(ref)) + def orphan(self): + """ + Create a `GraphCursor` attached to nothing. + + """ + return self.get_cursor(None) + def index_of(self, mixed): """ Find the index based on various strategies for a node, probably an input or output of chain. Supported @@ -115,10 +132,16 @@ class Graph: raise ValueError("Cannot find node matching {!r}.".format(mixed)) def indexes_of(self, *things): + """ + Returns the set of indexes of the things passed as arguments. + + """ return set(map(self.index_of, things)) def outputs_of(self, idx_or_node, create=False): - """Get a set of the outputs for a given node, node index or name. + """ + Get a set of the outputs for a given node, node index or name. + """ idx_or_node = self.index_of(idx_or_node) @@ -127,8 +150,10 @@ class Graph: return self.edges[idx_or_node] def add_node(self, new_node, *, _name=None): - """Add a node without connections in this graph and returns its index. + """ + Add a node without connections in this graph and returns its index. If _name is specified, name this node (string reference for further usage). + """ idx = len(self.nodes) self.edges[idx] = set() @@ -149,7 +174,8 @@ class Graph: return self.add_node(new_node, _name=_name) def add_chain(self, *nodes, _input=BEGIN, _output=None, _name=None, use_existing_nodes=False): - """Add `nodes` as a chain in this graph. + """ + Add `nodes` as a chain in this graph. **Input rules** @@ -222,7 +248,9 @@ class Graph: @property def topologically_sorted_indexes(self): - """Iterate in topological order, based on networkx's topological_sort() function. + """ + Iterate in topological order, based on networkx's topological_sort() function. + """ try: return self._topologcally_sorted_indexes_cache diff --git a/docs/guide/graphs.rst b/docs/guide/graphs.rst index 67f8ce9..e6a83ba 100644 --- a/docs/guide/graphs.rst +++ b/docs/guide/graphs.rst @@ -201,8 +201,11 @@ positional parameters as you want. .. note:: As of |bonobo| 0.7, a new syntax is available that we believe is more powerfull and more readable than the legacy - `add_chain` method. The former API is here to stay and it's perfectly safe to use it, but if it is an option, you - should consider the new syntax. During the transition period, we'll document both. + `add_chain` method. The former API is here to stay and it's perfectly safe to use it (in fact, the new syntax uses + `add_chain` under the hood). + + If it is an option for you, we suggest you consider the new syntax. During the transition period, we'll document + both but the new syntax will eventually become default. .. code-block:: python @@ -393,6 +396,33 @@ You can also create single nodes, and the api provide the same capability on sin graph.add_chain(..., _output="foo") +Orphan nodes / chains +::::::::::::::::::::: + +The default behaviour of `add_chain` (or `get_cursor`) is to connect the first node to the special `BEGIN` token, which +instruct |bonobo| to call the connected node once without parameter to kickstart the data stream. + +This is normally what you want, but there are ways to override it, as you may want to add "orphan" nodes or chains to your graph. + +.. code-block:: python + + import bonobo + + graph = bonobo.Graph() + + # using add_node will naturally add a node as "orphan" + graph.add_node(a) + + # using add_chain with "None" as the input will create an orphan chain + graph.add_chain(a, b, c, _input=None) + + # using the new syntax, you can use either get_cursor(None) or the orphan() shortcut + graph.get_cursor(None) >> a >> b >> c + + # ... using the shortcut ... + graph.orphan() >> a >> b >> c + + Connecting two nodes ::::::::::::::::::::
python-bonobo/bonobo
e5b115e5df400ceba9d76a5fb68c2b22cff0da6e
diff --git a/bonobo/util/testing.py b/bonobo/util/testing.py index 13554f8..0c80e92 100644 --- a/bonobo/util/testing.py +++ b/bonobo/util/testing.py @@ -5,7 +5,7 @@ import os import runpy import sys from contextlib import contextmanager, redirect_stderr, redirect_stdout -from unittest.mock import patch +from unittest.mock import patch, sentinel import pytest @@ -14,6 +14,7 @@ from bonobo.commands import entrypoint from bonobo.execution.contexts.graph import GraphExecutionContext from bonobo.execution.contexts.node import NodeExecutionContext from bonobo.structs.tokens import Token +from bonobo.util import tuplize @contextmanager @@ -26,6 +27,11 @@ def optional_contextmanager(cm, *, ignore=False): class FilesystemTester: + """ + Helper that create temporary filesystem service to be used in unit tests. + + """ + def __init__(self, extension="txt", mode="w", *, input_data=""): self.extension = extension self.input_data = input_data @@ -43,6 +49,12 @@ class FilesystemTester: class QueueList(list): + """ + A list that behave like a queue (or is it the oposite?). + + The datastructure is not smart at all, but it's quite useful for testing. + """ + def append(self, item): if not isinstance(item, Token): super(QueueList, self).append(item) @@ -51,6 +63,11 @@ class QueueList(list): class BufferingContext: + """ + Base class to add a buffer to a context. + + """ + def __init__(self, buffer=None): if buffer is None: buffer = QueueList() @@ -64,12 +81,22 @@ class BufferingContext: class BufferingNodeExecutionContext(BufferingContext, NodeExecutionContext): + """ + Node execution context that actually stores the node outputs in a buffer, so one can test it afterward. + + """ + def __init__(self, *args, buffer=None, **kwargs): BufferingContext.__init__(self, buffer) NodeExecutionContext.__init__(self, *args, **kwargs, _outputs=[self.buffer]) class BufferingGraphExecutionContext(BufferingContext, GraphExecutionContext): + """ + Graph execution context that uses buffering node execution contexts, all nodes buffering to the same buffer. + + """ + NodeExecutionContextType = BufferingNodeExecutionContext def __init__(self, *args, buffer=None, **kwargs): @@ -99,13 +126,13 @@ def runner(f): @runner def runner_entrypoint(args): - """ Run bonobo using the python command entrypoint directly (bonobo.commands.entrypoint). """ + """Run bonobo using the python command entrypoint directly (bonobo.commands.entrypoint). """ return entrypoint(args) @runner def runner_module(args): - """ Run bonobo using the bonobo.__main__ file, which is equivalent as doing "python -m bonobo ...".""" + """Run bonobo using the bonobo.__main__ file, which is equivalent as doing "python -m bonobo ...".""" with patch.object(sys, "argv", ["bonobo", *args]): return runpy.run_path(__main__.__file__, run_name="__main__") @@ -192,7 +219,10 @@ class ConfigurableNodeTest: class ReaderTest(ConfigurableNodeTest): - """ Helper class to test reader transformations. """ + """ + Helper class to test reader transformations. + + """ ReaderNodeType = None @@ -232,7 +262,10 @@ class ReaderTest(ConfigurableNodeTest): class WriterTest(ConfigurableNodeTest): - """ Helper class to test writer transformations. """ + """ + Helper class to test writer transformations. + + """ WriterNodeType = None @@ -255,3 +288,15 @@ class WriterTest(ConfigurableNodeTest): def readlines(self): with self.fs.open(self.filename) as fp: return tuple(map(str.strip, fp.readlines())) + + +@tuplize +def get_pseudo_nodes(*names): + """ + Generates a serie of named sentinels to test graph APIs. + + >>> a, b, c = get_pseudo_nodes(*"abc") + + """ + for name in names: + yield getattr(sentinel, name) diff --git a/tests/structs/test_graphs.py b/tests/structs/test_graphs.py index 725ba61..5dcb10f 100644 --- a/tests/structs/test_graphs.py +++ b/tests/structs/test_graphs.py @@ -4,6 +4,7 @@ import pytest from bonobo.constants import BEGIN from bonobo.structs.graphs import Graph +from bonobo.util.testing import get_pseudo_nodes identity = lambda x: x @@ -26,19 +27,21 @@ def test_graph_outputs_of(): def test_graph_index_of(): g = Graph() - g.add_node(sentinel.foo) - g.add_node(sentinel.bar) + foo, bar, not_there = get_pseudo_nodes("foo", "bar", "not_there") + + g.add_node(foo) + g.add_node(bar) # sequential, can resolve objects - assert g.index_of(sentinel.foo) == 0 - assert g.index_of(sentinel.bar) == 1 + assert g.index_of(foo) == 0 + assert g.index_of(bar) == 1 # calling on an index should return the index - assert g.index_of(sentinel.bar) == g.index_of(g.index_of(sentinel.bar)) + assert g.index_of(bar) == g.index_of(g.index_of(bar)) # not existing should raise value error with pytest.raises(ValueError): - g.index_of(sentinel.not_there) + g.index_of(not_there) # tokens resolve to themselves assert g.index_of(BEGIN) == BEGIN @@ -58,15 +61,16 @@ def test_graph_add_component(): def test_invalid_graph_usage(): g = Graph() + foo, bar = get_pseudo_nodes("foo", "bar") with pytest.raises(ValueError): g.add_chain() - g.add_node(sentinel.foo) - g.add_node(sentinel.bar) + g.add_node(foo) + g.add_node(bar) with pytest.raises(RuntimeError): - g.add_chain(_input=sentinel.bar, _output=sentinel.foo, _name="this_is_not_possible") + g.add_chain(_input=bar, _output=foo, _name="this_is_not_possible") def test_graph_add_chain(): @@ -81,48 +85,51 @@ def test_graph_add_chain(): def test_graph_topological_sort(): g = Graph() + a1, a2, a3, b1, b2 = get_pseudo_nodes("a1", "a2", "a3", "b1", "b2") - g.add_chain(sentinel.a1, sentinel.a2, sentinel.a3, _input=None, _output=None) + g.add_chain(a1, a2, a3, _input=None, _output=None) assert g.topologically_sorted_indexes == (0, 1, 2) - assert g[0] == sentinel.a1 - assert g[1] == sentinel.a2 - assert g[2] == sentinel.a3 + assert g[0] == a1 + assert g[1] == a2 + assert g[2] == a3 - g.add_chain(sentinel.b1, sentinel.b2, _output=sentinel.a2) + g.add_chain(b1, b2, _output=a2) assert g.topologically_sorted_indexes[-2:] == (1, 2) assert g.topologically_sorted_indexes.index(3) < g.topologically_sorted_indexes.index(4) - assert g[3] == sentinel.b1 - assert g[4] == sentinel.b2 + assert g[3] == b1 + assert g[4] == b2 def test_connect_two_chains(): g = Graph() + a1, a2, b1, b2 = get_pseudo_nodes("a1", "a2", "b1", "b2") - g.add_chain(sentinel.a1, sentinel.a2, _input=None, _output=None) - g.add_chain(sentinel.b1, sentinel.b2, _input=None, _output=None) - assert len(g.outputs_of(sentinel.a2)) == 0 + g.add_chain(a1, a2, _input=None, _output=None) + g.add_chain(b1, b2, _input=None, _output=None) + assert len(g.outputs_of(a2)) == 0 - g.add_chain(_input=sentinel.a2, _output=sentinel.b1) - assert g.outputs_of(sentinel.a2) == {g.index_of(sentinel.b1)} + g.add_chain(_input=a2, _output=b1) + assert g.outputs_of(a2) == g.indexes_of(b1) def test_connect_two_anonymous_nodes(): g = Graph() + a, b = get_pseudo_nodes(*"ab") # Create two "anonymous" nodes - g.add_node(sentinel.a) - g.add_node(sentinel.b) + g.add_node(a) + g.add_node(b) # Connect them - g.add_chain(_input=sentinel.a, _output=sentinel.b) + g.add_chain(_input=a, _output=b) def test_named_nodes(): g = Graph() - a, b, c, d, e, f = sentinel.a, sentinel.b, sentinel.c, sentinel.d, sentinel.e, sentinel.f + a, b, c, d, e, f = get_pseudo_nodes(*"abcdef") # Here we mark _input to None, so normalize won't get the "begin" impulsion. g.add_chain(e, f, _input=None, _name="load") diff --git a/tests/structs/test_graphs_new_syntax.py b/tests/structs/test_graphs_new_syntax.py index 570fa47..68f0e74 100644 --- a/tests/structs/test_graphs_new_syntax.py +++ b/tests/structs/test_graphs_new_syntax.py @@ -1,17 +1,10 @@ from operator import attrgetter -from unittest.mock import sentinel import pytest from bonobo.constants import BEGIN from bonobo.structs.graphs import Graph, GraphCursor -from bonobo.util import tuplize - - -@tuplize -def get_pseudo_nodes(*names): - for name in names: - yield getattr(sentinel, name) +from bonobo.util.testing import get_pseudo_nodes def test_get_cursor(): @@ -127,3 +120,23 @@ def test_cursor_merge(): assert g.outputs_of(c) == set() assert c1 == c2 + + +def test_cursor_merge_orphan_in_between(): + a, b, c, v, w, x, y = get_pseudo_nodes(*"abcdefg") + g = Graph() + g >> a >> b >> c + assert len(g) == 3 + g.orphan() >> v >> w >> b + assert len(g) == 5 + g.orphan() >> x >> y >> b + assert len(g) == 7 + + assert g.outputs_of(BEGIN) == g.indexes_of(a) + assert g.outputs_of(a) == g.indexes_of(b) + assert g.outputs_of(b) == g.indexes_of(c) + assert g.outputs_of(c) == set() + assert g.outputs_of(v) == g.indexes_of(w) + assert g.outputs_of(w) == g.indexes_of(b) + assert g.outputs_of(x) == g.indexes_of(y) + assert g.outputs_of(y) == g.indexes_of(b)
New Syntax: Forks As a dev, I should be able to create a graph using new syntax that contain "forks" (one input, more than one output)
0.0
[ "tests/structs/test_graphs_new_syntax.py::test_cursor_merge_orphan_in_between" ]
[ "tests/structs/test_graphs.py::test_graph_outputs_of", "tests/structs/test_graphs.py::test_graph_index_of", "tests/structs/test_graphs.py::test_graph_add_component", "tests/structs/test_graphs.py::test_invalid_graph_usage", "tests/structs/test_graphs.py::test_graph_add_chain", "tests/structs/test_graphs.py::test_graph_topological_sort", "tests/structs/test_graphs.py::test_connect_two_chains", "tests/structs/test_graphs.py::test_connect_two_anonymous_nodes", "tests/structs/test_graphs.py::test_named_nodes", "tests/structs/test_graphs.py::test_copy", "tests/structs/test_graphs_new_syntax.py::test_get_cursor", "tests/structs/test_graphs_new_syntax.py::test_get_cursor_in_a_vacuum", "tests/structs/test_graphs_new_syntax.py::test_cursor_usage_to_add_a_chain", "tests/structs/test_graphs_new_syntax.py::test_cursor_usage_to_add_a_chain_in_a_context_manager", "tests/structs/test_graphs_new_syntax.py::test_implicit_cursor_usage", "tests/structs/test_graphs_new_syntax.py::test_cursor_to_fork_a_graph", "tests/structs/test_graphs_new_syntax.py::test_cursor_to_fork_at_the_end", "tests/structs/test_graphs_new_syntax.py::test_cursor_merge" ]
2019-06-02 06:55:06+00:00
5,049
python__bedevere-617
diff --git a/bedevere/prtype.py b/bedevere/prtype.py index 0105e50..bccd879 100644 --- a/bedevere/prtype.py +++ b/bedevere/prtype.py @@ -43,7 +43,7 @@ async def classify_by_filepaths(gh, pull_request, filenames): if util.is_news_dir(filename): news = True filepath = pathlib.PurePath(filename) - if filepath.suffix == ".rst": + if filepath.suffix == ".rst" or filepath.name == ".nitignore": docs = True elif filepath.name.startswith("test_"): tests = True
python/bedevere
b5bcd24e79ad72b47582f89f7e7053f5b3157fa4
diff --git a/tests/test_prtype.py b/tests/test_prtype.py index 4fcaf0c..c9b0777 100644 --- a/tests/test_prtype.py +++ b/tests/test_prtype.py @@ -85,6 +85,26 @@ async def test_docs_no_news(): assert gh.post_data[0] == [Labels.docs.value, Labels.skip_news.value] +async def test_docs_no_news_with_dotnitignore(): + filenames = {"path/to/docs1.rst", "path/to/.nitignore"} + issue = {"labels": [], "labels_url": "https://api.github.com/some/label"} + gh = FakeGH(getitem=issue) + event_data = { + "action": "opened", + "number": 1234, + "pull_request": { + "url": "https://api.github.com/repos/cpython/python/pulls/1234", + "statuses_url": "https://api.github.com/some/status", + "issue_url": "https://api.github.com/repos/cpython/python/issue/1234", + }, + } + await prtype.classify_by_filepaths(gh, event_data["pull_request"], filenames) + assert gh.getitem_url == "https://api.github.com/repos/cpython/python/issue/1234" + assert len(gh.post_url) == 1 + assert gh.post_url[0] == "https://api.github.com/some/label" + assert gh.post_data[0] == [Labels.docs.value, Labels.skip_news.value] + + async def test_docs_and_news(): filenames = {"/path/to/docs1.rst", f"Misc/NEWS.d/next/Lib/{GOOD_BASENAME}"} issue = {"labels": [], "labels_url": "https://api.github.com/some/label"}
Add docs label for PRs that touch Doc/tools/.nitignore See e.g. https://github.com/python/cpython/pull/114280 or https://github.com/python/cpython/pull/114194
0.0
[ "tests/test_prtype.py::test_docs_no_news_with_dotnitignore" ]
[ "tests/test_prtype.py::test_no_files", "tests/test_prtype.py::test_news_only", "tests/test_prtype.py::test_docs_no_news", "tests/test_prtype.py::test_docs_and_news", "tests/test_prtype.py::test_tests_only", "tests/test_prtype.py::test_docs_and_tests", "tests/test_prtype.py::test_leave_existing_type_labels", "tests/test_prtype.py::test_do_not_post_if_nothing_to_apply", "tests/test_prtype.py::test_news_and_tests", "tests/test_prtype.py::test_other_files" ]
2024-01-19 08:13:31+00:00
5,111
pyupio__changelogs-232
diff --git a/changelogs/finder.py b/changelogs/finder.py index c34bbd2..8bba1e5 100644 --- a/changelogs/finder.py +++ b/changelogs/finder.py @@ -68,7 +68,8 @@ def find_repo_urls(session, name, candidates): :return: str, URL to a repo """ for _url in candidates: - if validate_url(_url): + _url = validate_url(_url) + if _url: try: resp = session.get(_url) if resp.status_code == 200:
pyupio/changelogs
734763fa320d5fbf71016f74f188c5a51e60c45f
diff --git a/tests/test_finder.py b/tests/test_finder.py index a127632..0631ad4 100644 --- a/tests/test_finder.py +++ b/tests/test_finder.py @@ -1,4 +1,6 @@ -from changelogs.finder import contains_project_name +from unittest.mock import Mock + +from changelogs.finder import contains_project_name, find_repo_urls def test_contains_project_name(): @@ -19,5 +21,19 @@ def test_not_contains_project_name(): assert not call('dj-dashboard', 'https://github.com/pydanny/cookiecutter-djangopackage') -def test_find_repo_urls(): - pass +def test_find_repo_urls_invalid_candidate(): + session = Mock() + list(find_repo_urls(session, 'foobar', ['invalid-link'])) + assert not session.get.called + + +def test_find_repo_urls_valid_candidate(): + session = Mock() + list(find_repo_urls(session, 'foobar', ['http://example.com/link'])) + session.get.assert_called_with('http://example.com/link') + + +def test_find_repo_urls_domain_candidate(): + session = Mock() + list(find_repo_urls(session, 'foobar', ['example.com'])) + session.get.assert_called_with('http://example.com')
Url validation ignored ```In [12]: changelogs.get('1') --------------------------------------------------------------------------- MissingSchema Traceback (most recent call last) <ipython-input-12-47f2b1bcf4e3> in <module> ----> 1 changelogs.get('1') ~/src/cve-search/env/lib/python3.6/site-packages/changelogs/changelogs.py in get(name, vendor, functions, _depth) 166 data=data, 167 releases=releases, --> 168 find_changelogs_fn=fns["find_changelogs"] 169 ) 170 ~/src/cve-search/env/lib/python3.6/site-packages/changelogs/pypi.py in get_urls(session, name, data, find_changelogs_fn, **kwargs) 91 if data['info']['description']: 92 candidates.extend(changelogs.url_re.findall(data["info"]["description"])) ---> 93 return find_changelogs_fn(session=session, name=name, candidates=candidates) 94 return set(), set() ~/src/cve-search/env/lib/python3.6/site-packages/changelogs/finder.py in find_changelogs(session, name, candidates) 217 if not repos: 218 logger.info("No repo found, trying to find one on related sites {}".format(candidates)) --> 219 repos = set(find_repo_urls(session, name, candidates)) 220 221 urls = [] ~/src/cve-search/env/lib/python3.6/site-packages/changelogs/finder.py in find_repo_urls(session, name, candidates) 71 if validate_url(_url): 72 try: ---> 73 resp = session.get(_url) 74 if resp.status_code == 200: 75 tree = etree.HTML(resp.content) ~/src/cve-search/env/lib/python3.6/site-packages/requests/sessions.py in get(self, url, **kwargs) 523 524 kwargs.setdefault('allow_redirects', True) --> 525 return self.request('GET', url, **kwargs) 526 527 def options(self, url, **kwargs): ~/src/cve-search/env/lib/python3.6/site-packages/requests/sessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json) 496 hooks=hooks, 497 ) --> 498 prep = self.prepare_request(req) 499 500 proxies = proxies or {} ~/src/cve-search/env/lib/python3.6/site-packages/requests/sessions.py in prepare_request(self, request) 439 auth=merge_setting(auth, self.auth), 440 cookies=merged_cookies, --> 441 hooks=merge_hooks(request.hooks, self.hooks), 442 ) 443 return p ~/src/cve-search/env/lib/python3.6/site-packages/requests/models.py in prepare(self, method, url, headers, files, data, params, auth, cookies, hooks, json) 307 308 self.prepare_method(method) --> 309 self.prepare_url(url, params) 310 self.prepare_headers(headers) 311 self.prepare_cookies(cookies) ~/src/cve-search/env/lib/python3.6/site-packages/requests/models.py in prepare_url(self, url, params) 381 error = error.format(to_native_string(url, 'utf8')) 382 --> 383 raise MissingSchema(error) 384 385 if not host: MissingSchema: Invalid URL 'gxc.online': No schema supplied. Perhaps you meant http://gxc.online? ``` Basically, [this line](https://github.com/pyupio/changelogs/blob/0cdb929ac4546c766cd7eef9ae4eb4baaa08f452/changelogs/finder.py#L71) should be like this: ``` _url = validate_url(_url) if _url: ``` I'm gonna send a PR soon unless you fix it before that.
0.0
[ "tests/test_finder.py::test_find_repo_urls_domain_candidate" ]
[ "tests/test_finder.py::test_contains_project_name", "tests/test_finder.py::test_not_contains_project_name", "tests/test_finder.py::test_find_repo_urls_invalid_candidate", "tests/test_finder.py::test_find_repo_urls_valid_candidate" ]
2020-11-04 15:29:02+00:00
5,124
qiboteam__qibo-642
diff --git a/src/qibo/gates/abstract.py b/src/qibo/gates/abstract.py index 00c79562b..831f7a379 100644 --- a/src/qibo/gates/abstract.py +++ b/src/qibo/gates/abstract.py @@ -340,6 +340,11 @@ class ParametrizedGate(Gate): for gate in self.device_gates: # pragma: no cover gate.parameters = x + def on_qubits(self, qubit_map): + gate = super().on_qubits(qubit_map) + gate.parameters = self.parameters + return gate + def substitute_symbols(self): params = list(self._parameters) for i, param in self.symbolic_parameters.items(): diff --git a/src/qibo/gates/gates.py b/src/qibo/gates/gates.py index e08b185c6..0fb66e0e3 100644 --- a/src/qibo/gates/gates.py +++ b/src/qibo/gates/gates.py @@ -1264,6 +1264,7 @@ class Unitary(ParametrizedGate): if self.is_controlled_by: controls = (qubit_map.get(i) for i in self.control_qubits) gate = gate.controlled_by(*controls) + gate.parameters = self.parameters return gate def _dagger(self):
qiboteam/qibo
dce74ac77755ab0a0b88fd93c91d0b6d604a00e9
diff --git a/src/qibo/tests/test_models_circuit_parametrized.py b/src/qibo/tests/test_models_circuit_parametrized.py index 6fb28f60f..3314c49c7 100644 --- a/src/qibo/tests/test_models_circuit_parametrized.py +++ b/src/qibo/tests/test_models_circuit_parametrized.py @@ -190,6 +190,27 @@ def test_set_parameters_with_gate_fusion(backend, trainable): backend.assert_circuitclose(fused_c, c) [email protected]("trainable", [True, False]) +def test_set_parameters_with_light_cone(backend, trainable): + """Check updating parameters of light cone circuit.""" + params = np.random.random(4) + c = Circuit(4) + c.add(gates.RX(0, theta=params[0], trainable=trainable)) + c.add(gates.RY(1, theta=params[1])) + c.add(gates.CZ(0, 1)) + c.add(gates.RX(2, theta=params[2])) + c.add(gates.RY(3, theta=params[3], trainable=trainable)) + c.add(gates.CZ(2, 3)) + if trainable: + c.set_parameters(np.random.random(4)) + else: + c.set_parameters(np.random.random(2)) + target_state = backend.execute_circuit(c) + lc, _ = c.light_cone(1, 2) + final_state = backend.execute_circuit(lc) + backend.assert_allclose(final_state, target_state) + + def test_variable_theta(): """Check that parametrized gates accept `tf.Variable` parameters.""" try:
Circuit.light_cone() resets phases of parametrized gates Example: ```python3 from qibo import gates as gt from qibo.models import Circuit from numpy import asarray, pi from numpy.random import rand qubits = 7 depth = 2 circuit = Circuit(qubits) for _ in range(depth): for qubit in range(qubits): circuit.add(gt.U3(qubit, 0, 0, 0)) cnots_even = asarray([[k, k + 1] for k in range(0, qubits - 1, 2)]) cnots_odd = asarray([[k, k + 1] for k in range(1, qubits - 1, 2)]) for row in cnots_even: circuit.add(gt.CNOT(*row)) for row in cnots_odd: circuit.add(gt.CNOT(*row)) phases = list() for _ in range(depth): p = rand(qubits, 3) p[:, 0] = p[:, 0] * pi p[:, 1] = p[:, 1] * 2*pi p[:, 1] = p[:, 2] * 2*pi phases.append(p) phases = asarray(phases).flatten().reshape(qubits * depth, 3) circuit.set_parameters(phases) print(circuit.get_parameters()) cone = circuit.copy(True) cone.add((gt.Y(k) for k in [1, 2])) cone = cone.light_cone(1, 2)[0] print(cone.get_parameters()) ``` I have tried `cone.set_parameters(phases)` after the deep copy, but it doesn't solve the issue. No matter what, phases are set to 0 after `light_cone()`
0.0
[ "src/qibo/tests/test_models_circuit_parametrized.py::test_set_parameters_with_light_cone[numpy-True]", "src/qibo/tests/test_models_circuit_parametrized.py::test_set_parameters_with_light_cone[numpy-False]" ]
[ "src/qibo/tests/test_models_circuit_parametrized.py::test_rx_parameter_setter[numpy]", "src/qibo/tests/test_models_circuit_parametrized.py::test_set_parameters_with_list[numpy-True]", "src/qibo/tests/test_models_circuit_parametrized.py::test_set_parameters_with_list[numpy-False]", "src/qibo/tests/test_models_circuit_parametrized.py::test_circuit_set_parameters_ungates[numpy-None-True]", "src/qibo/tests/test_models_circuit_parametrized.py::test_circuit_set_parameters_ungates[numpy-None-False]", "src/qibo/tests/test_models_circuit_parametrized.py::test_set_parameters_with_gate_fusion[numpy-True]", "src/qibo/tests/test_models_circuit_parametrized.py::test_set_parameters_with_gate_fusion[numpy-False]" ]
2022-09-13 08:46:27+00:00
5,135
qiboteam__qibolab-659
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9a58abee..75df67bc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -10,7 +10,7 @@ repos: - id: check-toml - id: debug-statements - repo: https://github.com/psf/black - rev: 23.10.1 + rev: 23.11.0 hooks: - id: black args: @@ -25,7 +25,7 @@ repos: hooks: - id: pyupgrade - repo: https://github.com/hadialqattan/pycln - rev: v2.3.0 + rev: v2.4.0 hooks: - id: pycln args: diff --git a/src/qibolab/instruments/qblox/controller.py b/src/qibolab/instruments/qblox/controller.py index 245bf018..d2e5fc3e 100644 --- a/src/qibolab/instruments/qblox/controller.py +++ b/src/qibolab/instruments/qblox/controller.py @@ -216,6 +216,7 @@ class QbloxController(Controller): for ro_pulse in sequence.ro_pulses: if options.acquisition_type is AcquisitionType.DISCRIMINATION: _res = acquisition_results[ro_pulse.serial][2] + _res = _res.reshape(nshots, -1) if options.averaging_mode == AveragingMode.SINGLESHOT else _res if average: _res = np.mean(_res, axis=0) else: diff --git a/src/qibolab/instruments/zhinst.py b/src/qibolab/instruments/zhinst.py index 7a1df9ac..e8b5c8db 100644 --- a/src/qibolab/instruments/zhinst.py +++ b/src/qibolab/instruments/zhinst.py @@ -503,8 +503,13 @@ class Zurich(Controller): ) def run_exp(self): - """Compilation settings, compilation step, execution step and data retrival""" - # self.experiment.save("saved_exp") + """ + Compilation settings, compilation step, execution step and data retrival + - Save a experiment Python object: + self.experiment.save("saved_exp") + - Save a experiment compiled experiment (): + self.exp.save("saved_exp") # saving compiled experiment + """ self.exp = self.session.compile(self.experiment, compiler_settings=COMPILER_SETTINGS) # self.exp.save_compiled_experiment("saved_exp") self.results = self.session.run(self.exp) @@ -890,38 +895,18 @@ class Zurich(Controller): """qubit readout pulse, data acquisition and qubit relaxation""" play_after = None - if len(self.sequence_qibo.qf_pulses) != 0 and len(self.sequence_qibo.qd_pulses) != 0: - play_after = ( - self.play_after_set(self.sequence_qibo.qf_pulses, "bias") - if self.sequence_qibo.qf_pulses.finish > self.sequence_qibo.qd_pulses.finish - else self.play_after_set(self.sequence_qibo.qd_pulses, "drive") - ) - if len(self.sequence_qibo.cf_pulses) != 0 and len(self.sequence_qibo.qd_pulses) != 0: - play_after = ( - self.play_after_set(self.sequence_qibo.cf_pulses, "bias_coupler") - if self.sequence_qibo.cf_pulses.finish > self.sequence_qibo.qd_pulses.finish - else self.play_after_set(self.sequence_qibo.qd_pulses, "drive") - ) + # TODO: if we use duration sweepers, the code might not behave as expected + # i.e.: self.sequence_qibo will contain the a pulse or sweeper with a static duration that may screw the comparison + qf_finish = self.sequence_qibo.qf_pulses.finish + qd_finish = self.sequence_qibo.qd_pulses.finish + cf_finish = self.sequence_qibo.cf_pulses.finish - elif len(self.sequence_qibo.qf_pulses) != 0: + if qf_finish > qd_finish and qf_finish > cf_finish: play_after = self.play_after_set(self.sequence_qibo.qf_pulses, "bias") - elif len(self.sequence_qibo.qd_pulses) != 0: + elif qd_finish > qf_finish and qd_finish > cf_finish: play_after = self.play_after_set(self.sequence_qibo.qd_pulses, "drive") - elif ( - len(self.sequence_qibo.qf_pulses) != 0 - and len(self.sequence_qibo.qd_pulses) != 0 - and len(self.sequence_qibo.cf_pulses) != 0 - ): - seq_qf = self.sequence_qibo.qf_pulses.finish - seq_qd = self.sequence_qibo.qd_pulses.finish - seq_cf = self.sequence_qibo.cf_pulses.finish - # add here for flux coupler pulses - if seq_qf > seq_qd and seq_qf > seq_cf: - play_after = self.play_after_set(self.sequence_qibo.qf_pulses, "bias") - elif seq_qd > seq_qf and seq_qd > seq_cf: - play_after = self.play_after_set(self.sequence_qibo.qd_pulses, "drive") - elif seq_cf > seq_qf and seq_cf > seq_qd: - play_after = self.play_after_set(self.sequence_qibo.cf_pulse, "bias_coupler") + elif cf_finish > qf_finish and cf_finish > qd_finish: + play_after = self.play_after_set(self.sequence_qibo.cf_pulses, "bias_coupler") readout_schedule = defaultdict(list) qubit_readout_schedule = defaultdict(list) @@ -946,6 +931,9 @@ class Zurich(Controller): for pulse, q, iq_angle in zip(pulses, qubits, iq_angles): pulse.zhpulse.uid += str(i) + # TODO: if the measure sequence starts after the last pulse, add a delay + # keep in mind that the signal might start before the last pulse + # if sweepers are involved if play_after is None: exp.delay( signal=f"measure{q}", diff --git a/src/qibolab/pulses.py b/src/qibolab/pulses.py index 90c6c1d6..100bf498 100644 --- a/src/qibolab/pulses.py +++ b/src/qibolab/pulses.py @@ -733,7 +733,7 @@ class Pulse: value (se_int | int | np.integer): the time in ns. """ - if not isinstance(value, (se_int, int, np.integer)): + if not isinstance(value, (se_int, int, np.integer, float)): raise TypeError(f"start argument type should be intSymbolicExpression or int, got {type(value).__name__}") if not value >= 0: raise ValueError(f"start argument must be >= 0, got {value}") @@ -749,7 +749,7 @@ class Pulse: else: if isinstance(value, np.integer): self._start = int(value) - elif isinstance(value, int): + else: self._start = value if not self._duration is None: @@ -794,7 +794,7 @@ class Pulse: else: if isinstance(value, np.integer): self._duration = int(value) - elif isinstance(value, int): + else: self._duration = value if not self._start is None:
qiboteam/qibolab
139914889c7dc5e2dc0010abe3a4f4ac7852f225
diff --git a/tests/test_pulses.py b/tests/test_pulses.py index 9c0edb98..b4970f39 100644 --- a/tests/test_pulses.py +++ b/tests/test_pulses.py @@ -158,6 +158,23 @@ def test_pulses_pulse_init(): p11 = FluxPulse(0, 40, 0.9, SNZ(t_half_flux_pulse=17, b_amplitude=0.8), 0, 200) p11 = Pulse(0, 40, 0.9, 400e6, 0, eCap(alpha=2), 0, PulseType.DRIVE) + # initialisation with float duration and start + p12 = Pulse( + start=5.5, + duration=34.33, + amplitude=0.9, + frequency=20_000_000, + relative_phase=1, + shape=Rectangular(), + channel=0, + type=PulseType.READOUT, + qubit=0, + ) + assert repr(p12) == "Pulse(5.5, 34.33, 0.9, 20_000_000, 1, Rectangular(), 0, PulseType.READOUT, 0)" + assert isinstance(p12.start, float) + assert isinstance(p12.duration, float) + assert p12.finish == 5.5 + 34.33 + def test_pulses_pulse_attributes(): channel = 0 @@ -1152,9 +1169,9 @@ def test_pulse_properties(start, duration): check_properties(p0) [email protected]("faulty_start", [10.0, "hello"]) [email protected]("faulty_duration", [100.0, "hello"]) -def test_pulse_setter_errors(faulty_start, faulty_duration): +def test_pulse_setter_errors(): + faulty_duration = "hello" + faulty_start = "hello" with pytest.raises(TypeError): p0 = Pulse(faulty_start, 100, 0.9, 0, 0, Rectangular(), 0) with pytest.raises(TypeError):
Bugs when sweeping drive pulses parameters in qblox driver with `AveragingMode.SINGLESHOT` I found two bugs while running new protocols developed in https://github.com/qiboteam/qibocal/pull/567: 1. `rabi_amplitude` The error triggered is the following ```sh RecursionError: maximum recursion depth exceeded in comparison ``` 2. `rabi_length` Here is an example of a runcard: ```yml platform: qw5q_gold_qblox qubits: [0,1,2,3,4] actions: - id: rabi priority: 0 operation: rabi_length parameters: pulse_duration_start: 10 pulse_duration_end: 200 pulse_duration_step: 4 pulse_amplitude: 0.5 relaxation_time: 100_000 nshots: 1024 ``` In this case I see a single value in the plot: ![image](https://github.com/qiboteam/qibolab/assets/49183315/3123ece7-245e-41ec-b391-83fa327c4f22)
0.0
[ "tests/test_pulses.py::test_pulses_pulse_init" ]
[ "tests/test_pulses.py::test_pulses_pulse_attributes", "tests/test_pulses.py::test_pulses_is_equal_ignoring_start", "tests/test_pulses.py::test_pulses_pulse_serial", "tests/test_pulses.py::test_pulses_pulseshape_sampling_rate", "tests/test_pulses.py::test_raise_shapeiniterror", "tests/test_pulses.py::test_pulses_pulseshape_drag_shape", "tests/test_pulses.py::test_pulses_pulse_hash", "tests/test_pulses.py::test_pulses_pulse_aliases", "tests/test_pulses.py::test_pulses_pulse_split_pulse", "tests/test_pulses.py::test_pulses_pulsesequence_init", "tests/test_pulses.py::test_pulses_pulsesequence_operators", "tests/test_pulses.py::test_pulses_pulsesequence_add", "tests/test_pulses.py::test_pulses_pulsesequence_clear", "tests/test_pulses.py::test_pulses_pulsesequence_start_finish", "tests/test_pulses.py::test_pulses_pulsesequence_get_channel_pulses", "tests/test_pulses.py::test_pulses_pulsesequence_get_qubit_pulses", "tests/test_pulses.py::test_pulses_pulsesequence_pulses_overlap", "tests/test_pulses.py::test_pulses_pulsesequence_separate_overlapping_pulses", "tests/test_pulses.py::test_pulses_pulse_symbolic_expressions", "tests/test_pulses.py::test_pulses_pulse_pulse_order", "tests/test_pulses.py::test_pulses_waveform", "tests/test_pulses.py::test_pulses_pulseshape_rectangular", "tests/test_pulses.py::test_pulses_pulseshape_gaussian", "tests/test_pulses.py::test_pulses_pulseshape_drag", "tests/test_pulses.py::test_pulses_pulseshape_eq", "tests/test_pulses.py::test_pulse", "tests/test_pulses.py::test_readout_pulse", "tests/test_pulses.py::test_pulse_sequence_add", "tests/test_pulses.py::test_pulse_sequence__add__", "tests/test_pulses.py::test_pulse_sequence__mul__", "tests/test_pulses.py::test_pulse_sequence_add_readout", "tests/test_pulses.py::test_envelope_waveform_i_q", "tests/test_pulses.py::test_pulse_properties[100-0]", "tests/test_pulses.py::test_pulse_properties[100-10]", "tests/test_pulses.py::test_pulse_properties[100-start2]", "tests/test_pulses.py::test_pulse_properties[100-start3]", "tests/test_pulses.py::test_pulse_properties[500-0]", "tests/test_pulses.py::test_pulse_properties[500-10]", "tests/test_pulses.py::test_pulse_properties[500-start2]", "tests/test_pulses.py::test_pulse_properties[500-start3]", "tests/test_pulses.py::test_pulse_properties[duration2-0]", "tests/test_pulses.py::test_pulse_properties[duration2-10]", "tests/test_pulses.py::test_pulse_properties[duration2-start2]", "tests/test_pulses.py::test_pulse_properties[duration2-start3]", "tests/test_pulses.py::test_pulse_properties[duration3-0]", "tests/test_pulses.py::test_pulse_properties[duration3-10]", "tests/test_pulses.py::test_pulse_properties[duration3-start2]", "tests/test_pulses.py::test_pulse_properties[duration3-start3]", "tests/test_pulses.py::test_pulse_setter_errors" ]
2023-11-14 12:33:49+00:00
5,142
qiboteam__qibolab-660
diff --git a/src/qibolab/pulses.py b/src/qibolab/pulses.py index 90c6c1d6..100bf498 100644 --- a/src/qibolab/pulses.py +++ b/src/qibolab/pulses.py @@ -733,7 +733,7 @@ class Pulse: value (se_int | int | np.integer): the time in ns. """ - if not isinstance(value, (se_int, int, np.integer)): + if not isinstance(value, (se_int, int, np.integer, float)): raise TypeError(f"start argument type should be intSymbolicExpression or int, got {type(value).__name__}") if not value >= 0: raise ValueError(f"start argument must be >= 0, got {value}") @@ -749,7 +749,7 @@ class Pulse: else: if isinstance(value, np.integer): self._start = int(value) - elif isinstance(value, int): + else: self._start = value if not self._duration is None: @@ -794,7 +794,7 @@ class Pulse: else: if isinstance(value, np.integer): self._duration = int(value) - elif isinstance(value, int): + else: self._duration = value if not self._start is None:
qiboteam/qibolab
139914889c7dc5e2dc0010abe3a4f4ac7852f225
diff --git a/tests/test_pulses.py b/tests/test_pulses.py index 9c0edb98..b4970f39 100644 --- a/tests/test_pulses.py +++ b/tests/test_pulses.py @@ -158,6 +158,23 @@ def test_pulses_pulse_init(): p11 = FluxPulse(0, 40, 0.9, SNZ(t_half_flux_pulse=17, b_amplitude=0.8), 0, 200) p11 = Pulse(0, 40, 0.9, 400e6, 0, eCap(alpha=2), 0, PulseType.DRIVE) + # initialisation with float duration and start + p12 = Pulse( + start=5.5, + duration=34.33, + amplitude=0.9, + frequency=20_000_000, + relative_phase=1, + shape=Rectangular(), + channel=0, + type=PulseType.READOUT, + qubit=0, + ) + assert repr(p12) == "Pulse(5.5, 34.33, 0.9, 20_000_000, 1, Rectangular(), 0, PulseType.READOUT, 0)" + assert isinstance(p12.start, float) + assert isinstance(p12.duration, float) + assert p12.finish == 5.5 + 34.33 + def test_pulses_pulse_attributes(): channel = 0 @@ -1152,9 +1169,9 @@ def test_pulse_properties(start, duration): check_properties(p0) [email protected]("faulty_start", [10.0, "hello"]) [email protected]("faulty_duration", [100.0, "hello"]) -def test_pulse_setter_errors(faulty_start, faulty_duration): +def test_pulse_setter_errors(): + faulty_duration = "hello" + faulty_start = "hello" with pytest.raises(TypeError): p0 = Pulse(faulty_start, 100, 0.9, 0, 0, Rectangular(), 0) with pytest.raises(TypeError):
Pulse duration validation Currently the pulse duration and start parameters are in integers of nanoseconds. However, this is an issue for instruments with sampling rates that are not 1GSps. For the IcarusQ RFSoC, we are using a [bisection](https://github.com/qiboteam/qibolab/blob/icarusq_multiqubit/src/qibolab/instruments/icarusqfpga.py#L263-L268) to retrieve the sample indices associated with the pulse. Perhaps this parameter can be sanitized on the driver side instead?
0.0
[ "tests/test_pulses.py::test_pulses_pulse_init" ]
[ "tests/test_pulses.py::test_pulses_pulse_attributes", "tests/test_pulses.py::test_pulses_is_equal_ignoring_start", "tests/test_pulses.py::test_pulses_pulse_serial", "tests/test_pulses.py::test_pulses_pulseshape_sampling_rate", "tests/test_pulses.py::test_raise_shapeiniterror", "tests/test_pulses.py::test_pulses_pulseshape_drag_shape", "tests/test_pulses.py::test_pulses_pulse_hash", "tests/test_pulses.py::test_pulses_pulse_aliases", "tests/test_pulses.py::test_pulses_pulse_split_pulse", "tests/test_pulses.py::test_pulses_pulsesequence_init", "tests/test_pulses.py::test_pulses_pulsesequence_operators", "tests/test_pulses.py::test_pulses_pulsesequence_add", "tests/test_pulses.py::test_pulses_pulsesequence_clear", "tests/test_pulses.py::test_pulses_pulsesequence_start_finish", "tests/test_pulses.py::test_pulses_pulsesequence_get_channel_pulses", "tests/test_pulses.py::test_pulses_pulsesequence_get_qubit_pulses", "tests/test_pulses.py::test_pulses_pulsesequence_pulses_overlap", "tests/test_pulses.py::test_pulses_pulsesequence_separate_overlapping_pulses", "tests/test_pulses.py::test_pulses_pulse_symbolic_expressions", "tests/test_pulses.py::test_pulses_pulse_pulse_order", "tests/test_pulses.py::test_pulses_waveform", "tests/test_pulses.py::test_pulses_pulseshape_rectangular", "tests/test_pulses.py::test_pulses_pulseshape_gaussian", "tests/test_pulses.py::test_pulses_pulseshape_drag", "tests/test_pulses.py::test_pulses_pulseshape_eq", "tests/test_pulses.py::test_pulse", "tests/test_pulses.py::test_readout_pulse", "tests/test_pulses.py::test_pulse_sequence_add", "tests/test_pulses.py::test_pulse_sequence__add__", "tests/test_pulses.py::test_pulse_sequence__mul__", "tests/test_pulses.py::test_pulse_sequence_add_readout", "tests/test_pulses.py::test_envelope_waveform_i_q", "tests/test_pulses.py::test_pulse_properties[100-0]", "tests/test_pulses.py::test_pulse_properties[100-10]", "tests/test_pulses.py::test_pulse_properties[100-start2]", "tests/test_pulses.py::test_pulse_properties[100-start3]", "tests/test_pulses.py::test_pulse_properties[500-0]", "tests/test_pulses.py::test_pulse_properties[500-10]", "tests/test_pulses.py::test_pulse_properties[500-start2]", "tests/test_pulses.py::test_pulse_properties[500-start3]", "tests/test_pulses.py::test_pulse_properties[duration2-0]", "tests/test_pulses.py::test_pulse_properties[duration2-10]", "tests/test_pulses.py::test_pulse_properties[duration2-start2]", "tests/test_pulses.py::test_pulse_properties[duration2-start3]", "tests/test_pulses.py::test_pulse_properties[duration3-0]", "tests/test_pulses.py::test_pulse_properties[duration3-10]", "tests/test_pulses.py::test_pulse_properties[duration3-start2]", "tests/test_pulses.py::test_pulse_properties[duration3-start3]", "tests/test_pulses.py::test_pulse_setter_errors" ]
2023-11-14 12:54:01+00:00
5,143
radiasoft__pykern-126
diff --git a/pykern/pkio.py b/pykern/pkio.py index 4eea5f8..4d878dc 100644 --- a/pykern/pkio.py +++ b/pykern/pkio.py @@ -285,19 +285,22 @@ def walk_tree(dirname, file_re=None): Yields: py.path.local: paths in sorted order """ - fr = file_re - if fr and not hasattr(fr, 'search'): - fr = re.compile(fr) - dirname = py_path(dirname).realpath() - dn = str(dirname) + def _walk(dir_path): + for r, _, files in os.walk(str(dir_path), topdown=True, onerror=None, followlinks=False): + r = py_path(r) + for f in files: + yield r.join(f) + res = [] - for r, d, files in os.walk(dn, topdown=True, onerror=None, followlinks=False): - for f in files: - p = py_path(r).join(f) - if fr and not fr.search(dirname.bestrelpath(p)): - continue - res.append(p) - # Not an iterator, but works as one. Don't assume always will return list + d = py_path(dirname) + if not file_re: + res = list(_walk(d)) + else: + if not hasattr(file_re, 'search'): + file_re = re.compile(file_re) + for p in _walk(d): + if file_re.search(d.bestrelpath(p)): + res.append(p) return sorted(res)
radiasoft/pykern
753082c2a08a5776b30ddab5b28533cdb85b7f3c
diff --git a/tests/pkio_test.py b/tests/pkio_test.py index d17b4fb..2b7e78f 100644 --- a/tests/pkio_test.py +++ b/tests/pkio_test.py @@ -102,6 +102,7 @@ def test_walk_tree_and_sorted_glob(): """Looks in work_dir""" from pykern import pkunit from pykern import pkio + import re with pkunit.save_chdir_work() as pwd: for f in ('d1/d7', 'd2/d3', 'd4/d5/d6'): @@ -116,6 +117,8 @@ def test_walk_tree_and_sorted_glob(): 'When walking tree with file_re, should only return matching files' assert [expect[0]] == list(pkio.walk_tree('.', '^d1')), \ 'When walking tree with file_re, file to match does not include dir being searched' + assert [expect[0]] == list(pkio.walk_tree('.', re.compile('^d1'))), \ + 'When walking tree with file_re, file to match does not include dir being searched' assert pkio.sorted_glob('*/*/f*', key='basename') == expect
refactor pkio.walk_tree This is an example refactoring about local variables.
0.0
[ "tests/pkio_test.py::test_unchecked_remove", "tests/pkio_test.py::test_has_file_extension", "tests/pkio_test.py::test_py_path", "tests/pkio_test.py::test_save_chdir", "tests/pkio_test.py::test_write_binary", "tests/pkio_test.py::test_walk_tree_and_sorted_glob", "tests/pkio_test.py::test_write_text", "tests/pkio_test.py::test_atomic_write" ]
[]
2022-02-22 20:34:30+00:00
5,152
radiasoft__pykern-158
diff --git a/pykern/pkunit.py b/pykern/pkunit.py index c76ac2a..fc72b1b 100644 --- a/pykern/pkunit.py +++ b/pykern/pkunit.py @@ -239,12 +239,17 @@ def file_eq(expect_path, *args, **kwargs): actual_path = b if not isinstance(actual_path, pykern.pkconst.PY_PATH_LOCAL_TYPE): actual_path = work_dir().join(actual_path) - actual = kwargs['actual'] if a else pkio.read_text(actual_path) + if a: + actual = kwargs['actual'] + if actual_path.exists(): + pkfail('actual={} and actual_path={} both exist', actual, actual_path) + else: + actual = pkio.read_text(actual_path) if expect_path.ext == '.json' and not actual_path.exists(): - e = pykern.pkjson.load_any(expect_path) + e = pkio.read_text(expect_path) if a: pkio.mkdir_parent_only(actual_path) - pykern.pkjson.dump_pretty(actual, filename=actual_path) + actual = pykern.pkjson.dump_pretty(actual, filename=actual_path) else: if j: import pykern.pkjinja
radiasoft/pykern
253ff7fa844d592cd544e2961036d27f51f05faa
diff --git a/tests/pkunit_data/file_eq1.json b/tests/pkunit_data/file_eq1.json new file mode 100644 index 0000000..7326da5 --- /dev/null +++ b/tests/pkunit_data/file_eq1.json @@ -0,0 +1,1 @@ +"array('d', [1.0])" diff --git a/tests/pkunit_data/file_eq2.txt b/tests/pkunit_data/file_eq2.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/pkunit_data/file_eq3.txt b/tests/pkunit_data/file_eq3.txt new file mode 100644 index 0000000..339f0be --- /dev/null +++ b/tests/pkunit_data/file_eq3.txt @@ -0,0 +1,1 @@ +something else \ No newline at end of file diff --git a/tests/pkunit_test.py b/tests/pkunit_test.py index 31acebb..40b0b6e 100644 --- a/tests/pkunit_test.py +++ b/tests/pkunit_test.py @@ -5,6 +5,9 @@ u"""PyTest for :mod:`pykern.pkunit` :license: http://www.apache.org/licenses/LICENSE-2.0.html """ from __future__ import absolute_import, division, print_function +import pkgutil + +import py import pytest def test_assert_object_with_json(): @@ -55,6 +58,22 @@ def test_empty_work_dir(): 'Ensure directory was created' +def test_file_eq(): + import array + import pykern.pkunit + import pykern.pkio + + a = array.ArrayType('d', [1]) + pykern.pkunit.file_eq('file_eq1.json', actual=a) + + with pykern.pkunit.pkexcept(TypeError): + pykern.pkunit.file_eq('file_eq2.txt', actual=dict()) + d = pykern.pkunit.empty_work_dir() + pykern.pkio.write_text(d.join('file_eq3.txt'), 'something') + with pykern.pkunit.pkexcept('both exist'): + pykern.pkunit.file_eq('file_eq3.txt', actual='something else') + + def test_import_module_from_data_dir(monkeypatch): from pykern import pkunit
pkunit.file_eq always should compare files Almost always compares objects instead
0.0
[ "tests/pkunit_test.py::test_file_eq" ]
[ "tests/pkunit_test.py::test_assert_object_with_json", "tests/pkunit_test.py::test_data_dir", "tests/pkunit_test.py::test_data_yaml", "tests/pkunit_test.py::test_empty_work_dir", "tests/pkunit_test.py::test_import_module_from_data_dir", "tests/pkunit_test.py::test_pkexcept", "tests/pkunit_test.py::test_pkok", "tests/pkunit_test.py::test_pkre_convert" ]
2022-04-07 20:43:22+00:00
5,153
radiasoft__pykern-160
diff --git a/pykern/pkcli/__init__.py b/pykern/pkcli/__init__.py index cb8bd7e..27cbf90 100644 --- a/pykern/pkcli/__init__.py +++ b/pykern/pkcli/__init__.py @@ -66,6 +66,42 @@ def command_error(fmt, *args, **kwargs): raise CommandError(fmt.format(*args, **kwargs)) +class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, + argparse.RawDescriptionHelpFormatter): + def _expand_help(self, action): + return super()._expand_help(action).split('\n')[0] + +class CustomParser(argparse.ArgumentParser): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.program = kwargs.copy() + self.options = [] + + def format_help(self): + f = argh.PARSER_FORMATTER(prog=self.prog) + if not self.description: + f = CustomFormatter(prog=self.prog) + f.add_usage( + self.usage, + self._actions, + self._mutually_exclusive_groups + ) + f.add_text(self.description) + for a in self._action_groups: + f.start_section(a.title) + f.add_text(a.description) + f.add_arguments(a._group_actions) + f.end_section() + f.add_text(self.epilog) + if not self.description: + return f.format_help().replace('positional arguments', 'commands') + return f.format_help() + + def print_help(self): + print(self.format_help()) + + def main(root_pkg, argv=None): """Invokes module functions in :mod:`pykern.pkcli` @@ -90,8 +126,7 @@ def main(root_pkg, argv=None): if not cli: return 1 prog = prog + ' ' + module_name - parser = argparse.ArgumentParser( - prog=prog, formatter_class=argh.PARSER_FORMATTER) + parser = CustomParser(prog) cmds = _commands(cli) dc = _default_command(cmds, argv) if dc:
radiasoft/pykern
814541baf45bb9221a6c03aa766bd28fbd523ec5
diff --git a/tests/pkcli_data/p1/pkcli/conf1.py b/tests/pkcli_data/p1/pkcli/conf1.py index 3feccce..057bb50 100644 --- a/tests/pkcli_data/p1/pkcli/conf1.py +++ b/tests/pkcli_data/p1/pkcli/conf1.py @@ -5,11 +5,21 @@ last_cmd = None from pykern.pkdebug import pkdp def cmd1(arg1): + """Subject line for cmd1 + + Args: + arg1 + """ global last_cmd last_cmd = cmd1 return def cmd2(): + """Subject line for cmd2 + + Args: + - + """ global last_cmd last_cmd = cmd2 return diff --git a/tests/pkcli_test.py b/tests/pkcli_test.py index fcaeb56..09d6087 100644 --- a/tests/pkcli_test.py +++ b/tests/pkcli_test.py @@ -42,7 +42,7 @@ def test_main2(capsys): _dev(rp, [], None, all_modules, capsys) _dev(rp, ['--help'], None, all_modules, capsys) _dev(rp, ['conf1'], SystemExit, r'cmd1,cmd2.*too few', capsys) - _dev(rp, ['conf1', '-h'], SystemExit, r'\{cmd1,cmd2\}.*positional arguments', capsys) + _dev(rp, ['conf1', '-h'], SystemExit, r'\{cmd1,cmd2\}.*commands', capsys) if six.PY2: _dev(rp, ['not_found'], None, r'no module', capsys) else: @@ -91,8 +91,9 @@ def _dev(root_pkg, argv, exc, expect, capsys): out, err = capsys.readouterr() if not err: err = out - assert re.search(expect, err, flags=re.IGNORECASE+re.DOTALL) is not None, \ - 'Looking for {} in err={}'.format(expect, err) + assert re.search('Args.*arg1', err, flags=re.DOTALL) is None, \ + 'failure to ignore arguments and only print subject. out: {}'.format(err) + pkunit.pkre(expect, err) def _main(root_pkg, argv):
pkcli docstring printing too greedy When using the `pkcli` interaction with the middl project, I'm finding that the help printing seems a bit too greedy, printing the entire docstring instead of just the function summary. The command is below: ```bash (middl) [joshec@roentgen middl]$ middlsoft train -h usage: middlsoft train [-h] {regressor,vrae} ... positional arguments: {regressor,vrae} regressor Train and run regressor to map latent space back to data Args: configuration (str): path to configuration file in model directory no_cuda (bool): whether or not to use CUDA. Defaults to False. cuda_device (int): CUDA device index. Defaults to -1 (all). do_write (bool): write regressor output to file. Defaults to False. use_sklearn (bool): use scikit-learn MLPRegressor vs pytorch. Defaults to False. datadir (str): alternative directory to find data in. Defaults to None. profile (bool): perform simple profiling of pytorch model. Defaults to False. Returns: None vrae optional arguments: -h, --help show this help message and exit ``` This printing occurs on the disvae_losses branch of the middl project: [middl/middlsoft/pkcli/train.py](https://github.com/radiasoft/middl/blob/d8ebd67e8dff4ff563fc7be78220f785c1fb5ad3/middlsoft/pkcli/train.py#L253-L274)
0.0
[ "tests/pkcli_test.py::test_main2" ]
[ "tests/pkcli_test.py::test_command_error", "tests/pkcli_test.py::test_main1", "tests/pkcli_test.py::test_main3" ]
2022-04-14 23:01:09+00:00
5,154
raimon49__pip-licenses-150
diff --git a/README.md b/README.md index eeeb287..7517b50 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,7 @@ Dump the software license list of Python packages installed with pip. * [Usage](#usage) * [Command\-Line Options](#command-line-options) * [Common options](#common-options) + * [Option: python](#option-python) * [Option: from](#option-from) * [Option: order](#option-order) * [Option: format](#option-format) @@ -97,6 +98,21 @@ Execute the command with your venv (or virtualenv) environment. ### Common options +#### Option: python + +By default, this tools finds the packages from the environment pip-licenses is launched from, by searching in current python's `sys.path` folders. In the case you want to search for packages in an other environment (e.g. if you want to run pip-licenses from its own isolated environment), you can specify a path to a python executable. The packages will be searched for in the given python's `sys.path`, free of pip-licenses dependencies. + +```bash +(venv) $ pip-licenses --with-system | grep pip + pip 22.3.1 MIT License + pip-licenses 4.1.0 MIT License +``` + +```bash +(venv) $ pip-licenses --python=</path/to/other/env>/bin/python --with-system | grep pip + pip 23.0.1 MIT License +``` + #### Option: from By default, this tool finds the license from [Trove Classifiers](https://pypi.org/classifiers/) or package Metadata. Some Python packages declare their license only in Trove Classifiers. diff --git a/piplicenses.py b/piplicenses.py index 3bac56e..488c338 100644 --- a/piplicenses.py +++ b/piplicenses.py @@ -30,7 +30,9 @@ from __future__ import annotations import argparse import codecs +import os import re +import subprocess import sys from collections import Counter from enum import Enum, auto @@ -194,7 +196,21 @@ def get_packages( return pkg_info - pkgs = importlib_metadata.distributions() + def get_python_sys_path(executable: str) -> list[str]: + script = "import sys; print(' '.join(filter(bool, sys.path)))" + output = subprocess.run( + [executable, "-c", script], + capture_output=True, + env={**os.environ, "PYTHONPATH": "", "VIRTUAL_ENV": ""}, + ) + return output.stdout.decode().strip().split() + + if args.python == sys.executable: + search_paths = sys.path + else: + search_paths = get_python_sys_path(args.python) + + pkgs = importlib_metadata.distributions(path=search_paths) ignore_pkgs_as_lower = [pkg.lower() for pkg in args.ignore_packages] pkgs_as_lower = [pkg.lower() for pkg in args.packages] @@ -785,6 +801,17 @@ def create_parser() -> CompatibleArgumentParser: "-v", "--version", action="version", version="%(prog)s " + __version__ ) + common_options.add_argument( + "--python", + type=str, + default=sys.executable, + metavar="PYTHON_EXEC", + help="R| path to python executable to search distributions from\n" + "Package will be searched in the selected python's sys.path\n" + "By default, will search packages for current env executable\n" + "(default: sys.executable)", + ) + common_options.add_argument( "--from", dest="from_",
raimon49/pip-licenses
bd213a57b824bd404436dfb261af1383824b5465
diff --git a/test_piplicenses.py b/test_piplicenses.py index f470377..426f31a 100644 --- a/test_piplicenses.py +++ b/test_piplicenses.py @@ -4,11 +4,14 @@ from __future__ import annotations import copy import email +import json import re import sys import unittest +import venv from enum import Enum, auto from importlib.metadata import Distribution +from types import SimpleNamespace from typing import TYPE_CHECKING, Any, List import docutils.frontend @@ -39,6 +42,7 @@ from piplicenses import ( factory_styled_table_with_args, find_license_from_classifier, get_output_fields, + get_packages, get_sortby, output_colored, save_if_needs, @@ -780,6 +784,26 @@ def test_allow_only(monkeypatch) -> None: ) +def test_different_python() -> None: + import tempfile + + class TempEnvBuild(venv.EnvBuilder): + def post_setup(self, context: SimpleNamespace) -> None: + self.context = context + + with tempfile.TemporaryDirectory() as target_dir_path: + venv_builder = TempEnvBuild(with_pip=True) + venv_builder.create(str(target_dir_path)) + python_exec = venv_builder.context.env_exe + python_arg = f"--python={python_exec}" + args = create_parser().parse_args([python_arg, "-s", "-f=json"]) + pkgs = get_packages(args) + package_names = sorted(p["name"] for p in pkgs) + print(package_names) + + assert package_names == ["pip", "setuptools"] + + def test_fail_on(monkeypatch) -> None: licenses = ("MIT license",) allow_only_args = ["--fail-on={}".format(";".join(licenses))]
Retrieve licenses from other environment Somehow related to #107 Currently, we can only get packages from the environments we are running pip-licenses on. This is a problem for two reasons - As said in mentioned issue, we cannot use it as a pre-commit hook easily. There is the possibility to use a hook within the activated environment, but it's clearly not following its guidelines - Using this libraries implies installing it in your venv, and thus contaminating it by installing packages you wouldn't need otherwise. It's no great given the purpose of this lib is to identify licenses of installed packages I think there is a simple fix to this. Indeed, thepckages are discovered via `importlib` `distributions()` function. As it can be seen here : https://github.com/raimon49/pip-licenses/blob/master/piplicenses.py#L193 By looking at the documentation and code of importlib, it seems to me that `distributions` should accept a `path` argument with a list of folders to search into. See https://importlib-metadata.readthedocs.io/en/latest/api.html#importlib_metadata.distributions and https://github.com/python/importlib_metadata/blob/700f2c7d74543e3695163d5487155b92e6f04d65/importlib_metadata/__init__.py#L809 `distributions(**kwargs)` calls `Distribution.discover(**kwargs)` which expects either a `Context` object or its args for creating one (see https://github.com/python/importlib_metadata/blob/700f2c7d74543e3695163d5487155b92e6f04d65/importlib_metadata/__init__.py#L387 ) All in all, calling `distributions([folder_path])` will retrieve all packages metadata in the given `folder_path` which could be a CLI parameter. What do you think ?
0.0
[ "test_piplicenses.py::test_different_python" ]
[ "test_piplicenses.py::PYCODESTYLE", "test_piplicenses.py::TestGetLicenses::test_case_insensitive_set_diff", "test_piplicenses.py::TestGetLicenses::test_case_insensitive_set_intersect", "test_piplicenses.py::TestGetLicenses::test_display_multiple_license_from_classifier", "test_piplicenses.py::TestGetLicenses::test_find_license_from_classifier", "test_piplicenses.py::TestGetLicenses::test_format_confluence", "test_piplicenses.py::TestGetLicenses::test_format_csv", "test_piplicenses.py::TestGetLicenses::test_format_json", "test_piplicenses.py::TestGetLicenses::test_format_json_license_manager", "test_piplicenses.py::TestGetLicenses::test_format_markdown", "test_piplicenses.py::TestGetLicenses::test_format_plain", "test_piplicenses.py::TestGetLicenses::test_format_plain_vertical", "test_piplicenses.py::TestGetLicenses::test_format_rst_default_filter", "test_piplicenses.py::TestGetLicenses::test_from_classifier", "test_piplicenses.py::TestGetLicenses::test_from_meta", "test_piplicenses.py::TestGetLicenses::test_from_mixed", "test_piplicenses.py::TestGetLicenses::test_if_no_classifiers_then_no_licences_found", "test_piplicenses.py::TestGetLicenses::test_ignore_packages", "test_piplicenses.py::TestGetLicenses::test_order_author", "test_piplicenses.py::TestGetLicenses::test_order_license", "test_piplicenses.py::TestGetLicenses::test_order_name", "test_piplicenses.py::TestGetLicenses::test_order_url", "test_piplicenses.py::TestGetLicenses::test_order_url_no_effect", "test_piplicenses.py::TestGetLicenses::test_output_colored_bold", "test_piplicenses.py::TestGetLicenses::test_output_colored_normal", "test_piplicenses.py::TestGetLicenses::test_select_license_by_source", "test_piplicenses.py::TestGetLicenses::test_summary", "test_piplicenses.py::TestGetLicenses::test_summary_sort_by_count", "test_piplicenses.py::TestGetLicenses::test_summary_sort_by_name", "test_piplicenses.py::TestGetLicenses::test_summary_warning", "test_piplicenses.py::TestGetLicenses::test_with_authors", "test_piplicenses.py::TestGetLicenses::test_with_default_filter", "test_piplicenses.py::TestGetLicenses::test_with_description", "test_piplicenses.py::TestGetLicenses::test_with_empty_args", "test_piplicenses.py::TestGetLicenses::test_with_license_file", "test_piplicenses.py::TestGetLicenses::test_with_license_file_no_path", "test_piplicenses.py::TestGetLicenses::test_with_license_file_warning", "test_piplicenses.py::TestGetLicenses::test_with_notice_file", "test_piplicenses.py::TestGetLicenses::test_with_packages", "test_piplicenses.py::TestGetLicenses::test_with_packages_with_system", "test_piplicenses.py::TestGetLicenses::test_with_specified_filter", "test_piplicenses.py::TestGetLicenses::test_with_system", "test_piplicenses.py::TestGetLicenses::test_with_urls", "test_piplicenses.py::TestGetLicenses::test_without_filter", "test_piplicenses.py::test_output_file_success", "test_piplicenses.py::test_output_file_error", "test_piplicenses.py::test_output_file_none", "test_piplicenses.py::test_allow_only", "test_piplicenses.py::test_fail_on", "test_piplicenses.py::test_enums", "test_piplicenses.py::test_verify_args" ]
2023-03-21 21:15:37+00:00
5,159
End of preview. Expand in Data Studio

No dataset card yet

Downloads last month
39